blob: 7e64f6d960c55f38ed4b7d124525a809d8d68acc [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Paul Mundt38c425f2007-05-11 11:26:10 +09006 * Copyright (C) 2005, 2006, 2007 Paul Mundt
Paul Mundt0c7b1df2006-09-27 15:08:07 +09007 *
8 * P1/P2 Section mapping definitions from map32.h, which was:
9 *
10 * Copyright 2003 (c) Lineo Solutions,Inc.
11 *
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
15 */
16#include <linux/init.h>
17#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000018#include <linux/sysdev.h>
19#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090020#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/bitops.h>
23#include <linux/debugfs.h>
24#include <linux/fs.h>
25#include <linux/seq_file.h>
26#include <linux/err.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090029#include <asm/pgtable.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090030#include <asm/mmu.h>
31#include <asm/io.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090032#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090033
34#define NR_PMB_ENTRIES 16
35
Matt Flemingfc2bdef2009-10-06 21:22:22 +000036static void __pmb_unmap(struct pmb_entry *);
37
Matt Flemingedd7de82009-10-06 21:22:29 +000038static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090039static unsigned long pmb_map;
40
Paul Mundt0c7b1df2006-09-27 15:08:07 +090041static inline unsigned long mk_pmb_entry(unsigned int entry)
42{
43 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
44}
45
46static inline unsigned long mk_pmb_addr(unsigned int entry)
47{
48 return mk_pmb_entry(entry) | PMB_ADDR;
49}
50
51static inline unsigned long mk_pmb_data(unsigned int entry)
52{
53 return mk_pmb_entry(entry) | PMB_DATA;
54}
55
Matt Fleming067784f2009-10-06 21:22:23 +000056static int pmb_alloc_entry(void)
57{
58 unsigned int pos;
59
60repeat:
61 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
62
63 if (unlikely(pos > NR_PMB_ENTRIES))
64 return -ENOSPC;
65
66 if (test_and_set_bit(pos, &pmb_map))
67 goto repeat;
68
69 return pos;
70}
71
Matt Fleming8386aeb2009-10-06 21:22:28 +000072static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
73 unsigned long flags)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090074{
75 struct pmb_entry *pmbe;
Matt Fleming067784f2009-10-06 21:22:23 +000076 int pos;
77
78 pos = pmb_alloc_entry();
79 if (pos < 0)
80 return ERR_PTR(pos);
Paul Mundt0c7b1df2006-09-27 15:08:07 +090081
Matt Flemingedd7de82009-10-06 21:22:29 +000082 pmbe = &pmb_entry_list[pos];
Paul Mundt0c7b1df2006-09-27 15:08:07 +090083 if (!pmbe)
84 return ERR_PTR(-ENOMEM);
85
86 pmbe->vpn = vpn;
87 pmbe->ppn = ppn;
88 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +000089 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +090090
91 return pmbe;
92}
93
Matt Fleming8386aeb2009-10-06 21:22:28 +000094static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090095{
Matt Flemingedd7de82009-10-06 21:22:29 +000096 int pos = pmbe->entry;
Paul Mundt38c425f2007-05-11 11:26:10 +090097
Matt Flemingedd7de82009-10-06 21:22:29 +000098 pmbe->vpn = 0;
99 pmbe->ppn = 0;
100 pmbe->flags = 0;
101 pmbe->entry = 0;
102
103 clear_bit(pos, &pmb_map);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900104}
105
106/*
107 * Must be in P2 for __set_pmb_entry()
108 */
Matt Fleming8386aeb2009-10-06 21:22:28 +0000109static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
110 unsigned long flags, int pos)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900111{
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900112 ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
113
Paul Mundte7bd34a2007-07-31 17:07:28 +0900114#ifdef CONFIG_CACHE_WRITETHROUGH
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900115 /*
116 * When we are in 32-bit address extended mode, CCR.CB becomes
117 * invalid, so care must be taken to manually adjust cacheable
118 * translations.
119 */
120 if (likely(flags & PMB_C))
121 flags |= PMB_WT;
122#endif
123
124 ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900125}
126
Matt Fleming8386aeb2009-10-06 21:22:28 +0000127static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900128{
Stuart Menefycbaa1182007-11-30 17:06:36 +0900129 jump_to_uncached();
Matt Fleming067784f2009-10-06 21:22:23 +0000130 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900131 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900132}
133
Matt Fleming8386aeb2009-10-06 21:22:28 +0000134static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900135{
136 unsigned int entry = pmbe->entry;
137 unsigned long addr;
138
Matt Fleming31051212009-10-06 21:22:30 +0000139 if (unlikely(entry >= NR_PMB_ENTRIES))
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900140 return;
141
Stuart Menefycbaa1182007-11-30 17:06:36 +0900142 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900143
144 /* Clear V-bit */
145 addr = mk_pmb_addr(entry);
146 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
147
148 addr = mk_pmb_data(entry);
149 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
150
Stuart Menefycbaa1182007-11-30 17:06:36 +0900151 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900152}
153
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900154
155static struct {
156 unsigned long size;
157 int flag;
158} pmb_sizes[] = {
159 { .size = 0x20000000, .flag = PMB_SZ_512M, },
160 { .size = 0x08000000, .flag = PMB_SZ_128M, },
161 { .size = 0x04000000, .flag = PMB_SZ_64M, },
162 { .size = 0x01000000, .flag = PMB_SZ_16M, },
163};
164
165long pmb_remap(unsigned long vaddr, unsigned long phys,
166 unsigned long size, unsigned long flags)
167{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000168 struct pmb_entry *pmbp, *pmbe;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900169 unsigned long wanted;
170 int pmb_flags, i;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000171 long err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900172
173 /* Convert typical pgprot value to the PMB equivalent */
174 if (flags & _PAGE_CACHABLE) {
175 if (flags & _PAGE_WT)
176 pmb_flags = PMB_WT;
177 else
178 pmb_flags = PMB_C;
179 } else
180 pmb_flags = PMB_WT | PMB_UB;
181
182 pmbp = NULL;
183 wanted = size;
184
185again:
186 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900187 if (size < pmb_sizes[i].size)
188 continue;
189
190 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000191 if (IS_ERR(pmbe)) {
192 err = PTR_ERR(pmbe);
193 goto out;
194 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900195
Matt Fleming067784f2009-10-06 21:22:23 +0000196 set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900197
198 phys += pmb_sizes[i].size;
199 vaddr += pmb_sizes[i].size;
200 size -= pmb_sizes[i].size;
201
202 /*
203 * Link adjacent entries that span multiple PMB entries
204 * for easier tear-down.
205 */
206 if (likely(pmbp))
207 pmbp->link = pmbe;
208
209 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000210
211 /*
212 * Instead of trying smaller sizes on every iteration
213 * (even if we succeed in allocating space), try using
214 * pmb_sizes[i].size again.
215 */
216 i--;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900217 }
218
219 if (size >= 0x1000000)
220 goto again;
221
222 return wanted - size;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000223
224out:
225 if (pmbp)
226 __pmb_unmap(pmbp);
227
228 return err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900229}
230
231void pmb_unmap(unsigned long addr)
232{
Matt Flemingedd7de82009-10-06 21:22:29 +0000233 struct pmb_entry *pmbe = NULL;
234 int i;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900235
Matt Flemingedd7de82009-10-06 21:22:29 +0000236 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
237 if (test_bit(i, &pmb_map)) {
238 pmbe = &pmb_entry_list[i];
239 if (pmbe->vpn == addr)
240 break;
241 }
242 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900243
244 if (unlikely(!pmbe))
245 return;
246
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000247 __pmb_unmap(pmbe);
248}
249
250static void __pmb_unmap(struct pmb_entry *pmbe)
251{
Matt Flemingedd7de82009-10-06 21:22:29 +0000252 BUG_ON(!test_bit(pmbe->entry, &pmb_map));
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900253
254 do {
255 struct pmb_entry *pmblink = pmbe;
256
Matt Fleming067784f2009-10-06 21:22:23 +0000257 /*
258 * We may be called before this pmb_entry has been
259 * entered into the PMB table via set_pmb_entry(), but
260 * that's OK because we've allocated a unique slot for
261 * this entry in pmb_alloc() (even if we haven't filled
262 * it yet).
263 *
264 * Therefore, calling clear_pmb_entry() is safe as no
265 * other mapping can be using that slot.
266 */
267 clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000268
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900269 pmbe = pmblink->link;
270
271 pmb_free(pmblink);
272 } while (pmbe);
273}
274
Matt Fleming8386aeb2009-10-06 21:22:28 +0000275int __uses_jump_to_uncached pmb_init(void)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900276{
Matt Fleming31051212009-10-06 21:22:30 +0000277 unsigned int i;
Matt Flemingef269b32009-10-06 21:22:32 +0000278 long size, ret;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900279
Stuart Menefycbaa1182007-11-30 17:06:36 +0900280 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900281
282 /*
Matt Fleming31051212009-10-06 21:22:30 +0000283 * Insert PMB entries for the P1 and P2 areas so that, after
284 * we've switched the MMU to 32-bit mode, the semantics of P1
285 * and P2 are the same as in 29-bit mode, e.g.
286 *
287 * P1 - provides a cached window onto physical memory
288 * P2 - provides an uncached window onto physical memory
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900289 */
Matt Flemingef269b32009-10-06 21:22:32 +0000290 size = __MEMORY_START + __MEMORY_SIZE;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900291
Matt Flemingef269b32009-10-06 21:22:32 +0000292 ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
293 BUG_ON(ret != size);
294
295 ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
296 BUG_ON(ret != size);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900297
298 ctrl_outl(0, PMB_IRMCR);
299
300 /* PMB.SE and UB[7] */
Matt Fleming31051212009-10-06 21:22:30 +0000301 ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900302
Stuart Menefyeddeeb32007-11-26 21:32:40 +0900303 /* Flush out the TLB */
304 i = ctrl_inl(MMUCR);
305 i |= MMUCR_TI;
306 ctrl_outl(i, MMUCR);
307
Stuart Menefycbaa1182007-11-30 17:06:36 +0900308 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900309
310 return 0;
311}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900312
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900313static int pmb_seq_show(struct seq_file *file, void *iter)
314{
315 int i;
316
317 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
318 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
319 seq_printf(file, "ety vpn ppn size flags\n");
320
321 for (i = 0; i < NR_PMB_ENTRIES; i++) {
322 unsigned long addr, data;
323 unsigned int size;
324 char *sz_str = NULL;
325
326 addr = ctrl_inl(mk_pmb_addr(i));
327 data = ctrl_inl(mk_pmb_data(i));
328
329 size = data & PMB_SZ_MASK;
330 sz_str = (size == PMB_SZ_16M) ? " 16MB":
331 (size == PMB_SZ_64M) ? " 64MB":
332 (size == PMB_SZ_128M) ? "128MB":
333 "512MB";
334
335 /* 02: V 0x88 0x08 128MB C CB B */
336 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
337 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
338 (addr >> 24) & 0xff, (data >> 24) & 0xff,
339 sz_str, (data & PMB_C) ? 'C' : ' ',
340 (data & PMB_WT) ? "WT" : "CB",
341 (data & PMB_UB) ? "UB" : " B");
342 }
343
344 return 0;
345}
346
347static int pmb_debugfs_open(struct inode *inode, struct file *file)
348{
349 return single_open(file, pmb_seq_show, NULL);
350}
351
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800352static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900353 .owner = THIS_MODULE,
354 .open = pmb_debugfs_open,
355 .read = seq_read,
356 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800357 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900358};
359
360static int __init pmb_debugfs_init(void)
361{
362 struct dentry *dentry;
363
364 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900365 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800366 if (!dentry)
367 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900368 if (IS_ERR(dentry))
369 return PTR_ERR(dentry);
370
371 return 0;
372}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900373postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000374
375#ifdef CONFIG_PM
376static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
377{
378 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000379 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000380
381 /* Restore the PMB after a resume from hibernation */
382 if (state.event == PM_EVENT_ON &&
383 prev_state.event == PM_EVENT_FREEZE) {
384 struct pmb_entry *pmbe;
Matt Flemingedd7de82009-10-06 21:22:29 +0000385 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
386 if (test_bit(i, &pmb_map)) {
387 pmbe = &pmb_entry_list[i];
388 set_pmb_entry(pmbe);
389 }
390 }
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000391 }
392 prev_state = state;
393 return 0;
394}
395
396static int pmb_sysdev_resume(struct sys_device *dev)
397{
398 return pmb_sysdev_suspend(dev, PMSG_ON);
399}
400
401static struct sysdev_driver pmb_sysdev_driver = {
402 .suspend = pmb_sysdev_suspend,
403 .resume = pmb_sysdev_resume,
404};
405
406static int __init pmb_sysdev_init(void)
407{
408 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
409}
410
411subsys_initcall(pmb_sysdev_init);
412#endif