blob: 55d21902d707d1cd531e0d46498d8a74d478b99c [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000015#include <linux/sysdev.h>
16#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090024#include <linux/io.h>
Paul Mundtd53a0d32010-02-17 21:17:02 +090025#include <linux/spinlock.h>
Paul Mundt90e7d642010-02-23 16:20:53 +090026#include <linux/vmalloc.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090027#include <asm/sizes.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090028#include <asm/system.h>
29#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090030#include <asm/pgtable.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090031#include <asm/page.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090032#include <asm/mmu.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090033#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090034
Paul Mundtd53a0d32010-02-17 21:17:02 +090035struct pmb_entry;
36
37struct pmb_entry {
38 unsigned long vpn;
39 unsigned long ppn;
40 unsigned long flags;
41 unsigned long size;
42
43 spinlock_t lock;
44
45 /*
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
48 */
49 int entry;
50
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry *link;
53};
54
Paul Mundt90e7d642010-02-23 16:20:53 +090055static struct {
56 unsigned long size;
57 int flag;
58} pmb_sizes[] = {
59 { .size = SZ_512M, .flag = PMB_SZ_512M, },
60 { .size = SZ_128M, .flag = PMB_SZ_128M, },
61 { .size = SZ_64M, .flag = PMB_SZ_64M, },
62 { .size = SZ_16M, .flag = PMB_SZ_16M, },
63};
64
Paul Mundtd01447b2010-02-18 18:13:51 +090065static void pmb_unmap_entry(struct pmb_entry *, int depth);
Matt Flemingfc2bdef2009-10-06 21:22:22 +000066
Paul Mundtd53a0d32010-02-17 21:17:02 +090067static DEFINE_RWLOCK(pmb_rwlock);
Matt Flemingedd7de82009-10-06 21:22:29 +000068static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt51becfd2010-02-17 15:33:30 +090069static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
Paul Mundt0c7b1df2006-09-27 15:08:07 +090070
Paul Mundt4cfa8e72010-03-02 16:49:50 +090071static unsigned int pmb_iomapping_enabled;
72
Paul Mundt51becfd2010-02-17 15:33:30 +090073static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090074{
75 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
76}
77
Paul Mundt51becfd2010-02-17 15:33:30 +090078static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090079{
80 return mk_pmb_entry(entry) | PMB_ADDR;
81}
82
Paul Mundt51becfd2010-02-17 15:33:30 +090083static __always_inline unsigned long mk_pmb_data(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090084{
85 return mk_pmb_entry(entry) | PMB_DATA;
86}
87
Paul Mundt90e7d642010-02-23 16:20:53 +090088static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
89{
90 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
91}
92
93/*
94 * Ensure that the PMB entries match our cache configuration.
95 *
96 * When we are in 32-bit address extended mode, CCR.CB becomes
97 * invalid, so care must be taken to manually adjust cacheable
98 * translations.
99 */
100static __always_inline unsigned long pmb_cache_flags(void)
101{
102 unsigned long flags = 0;
103
104#if defined(CONFIG_CACHE_OFF)
105 flags |= PMB_WT | PMB_UB;
106#elif defined(CONFIG_CACHE_WRITETHROUGH)
107 flags |= PMB_C | PMB_WT | PMB_UB;
108#elif defined(CONFIG_CACHE_WRITEBACK)
109 flags |= PMB_C;
110#endif
111
112 return flags;
113}
114
115/*
116 * Convert typical pgprot value to the PMB equivalent
117 */
118static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
119{
120 unsigned long pmb_flags = 0;
121 u64 flags = pgprot_val(prot);
122
123 if (flags & _PAGE_CACHABLE)
124 pmb_flags |= PMB_C;
125 if (flags & _PAGE_WT)
126 pmb_flags |= PMB_WT | PMB_UB;
127
128 return pmb_flags;
129}
130
131static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
132{
133 return (b->vpn == (a->vpn + a->size)) &&
134 (b->ppn == (a->ppn + a->size)) &&
135 (b->flags == a->flags);
136}
137
138static bool pmb_size_valid(unsigned long size)
139{
140 int i;
141
142 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
143 if (pmb_sizes[i].size == size)
144 return true;
145
146 return false;
147}
148
149static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
150{
151 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
152}
153
154static inline bool pmb_prot_valid(pgprot_t prot)
155{
156 return (pgprot_val(prot) & _PAGE_USER) == 0;
157}
158
159static int pmb_size_to_flags(unsigned long size)
160{
161 int i;
162
163 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
164 if (pmb_sizes[i].size == size)
165 return pmb_sizes[i].flag;
166
167 return 0;
168}
169
Matt Fleming067784f2009-10-06 21:22:23 +0000170static int pmb_alloc_entry(void)
171{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900172 int pos;
Matt Fleming067784f2009-10-06 21:22:23 +0000173
Paul Mundt51becfd2010-02-17 15:33:30 +0900174 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900175 if (pos >= 0 && pos < NR_PMB_ENTRIES)
176 __set_bit(pos, pmb_map);
177 else
178 pos = -ENOSPC;
Matt Fleming067784f2009-10-06 21:22:23 +0000179
180 return pos;
181}
182
Matt Fleming8386aeb2009-10-06 21:22:28 +0000183static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +0000184 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900185{
186 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900187 unsigned long irqflags;
188 void *ret = NULL;
Matt Fleming067784f2009-10-06 21:22:23 +0000189 int pos;
190
Paul Mundtd53a0d32010-02-17 21:17:02 +0900191 write_lock_irqsave(&pmb_rwlock, irqflags);
192
Matt Fleming20b50142009-10-06 21:22:33 +0000193 if (entry == PMB_NO_ENTRY) {
194 pos = pmb_alloc_entry();
Paul Mundtd53a0d32010-02-17 21:17:02 +0900195 if (unlikely(pos < 0)) {
196 ret = ERR_PTR(pos);
197 goto out;
198 }
Matt Fleming20b50142009-10-06 21:22:33 +0000199 } else {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900200 if (__test_and_set_bit(entry, pmb_map)) {
201 ret = ERR_PTR(-ENOSPC);
202 goto out;
203 }
204
Matt Fleming20b50142009-10-06 21:22:33 +0000205 pos = entry;
206 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900207
Paul Mundtd53a0d32010-02-17 21:17:02 +0900208 write_unlock_irqrestore(&pmb_rwlock, irqflags);
209
Matt Flemingedd7de82009-10-06 21:22:29 +0000210 pmbe = &pmb_entry_list[pos];
Paul Mundtd53a0d32010-02-17 21:17:02 +0900211
Paul Mundtd01447b2010-02-18 18:13:51 +0900212 memset(pmbe, 0, sizeof(struct pmb_entry));
213
Paul Mundtd53a0d32010-02-17 21:17:02 +0900214 spin_lock_init(&pmbe->lock);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900215
216 pmbe->vpn = vpn;
217 pmbe->ppn = ppn;
218 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +0000219 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900220
221 return pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900222
223out:
224 write_unlock_irqrestore(&pmb_rwlock, irqflags);
225 return ret;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900226}
227
Matt Fleming8386aeb2009-10-06 21:22:28 +0000228static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900229{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900230 __clear_bit(pmbe->entry, pmb_map);
Paul Mundtd01447b2010-02-18 18:13:51 +0900231
232 pmbe->entry = PMB_NO_ENTRY;
233 pmbe->link = NULL;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900234}
235
236/*
Paul Mundt51becfd2010-02-17 15:33:30 +0900237 * Must be run uncached.
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900238 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900239static void __set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900240{
Paul Mundt90e7d642010-02-23 16:20:53 +0900241 /* Set V-bit */
242 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
243 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900244}
245
Paul Mundtd53a0d32010-02-17 21:17:02 +0900246static void __clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900247{
Paul Mundt2e450642010-02-18 13:26:05 +0900248 unsigned long addr, data;
249 unsigned long addr_val, data_val;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900250
Paul Mundt2e450642010-02-18 13:26:05 +0900251 addr = mk_pmb_addr(pmbe->entry);
252 data = mk_pmb_data(pmbe->entry);
253
254 addr_val = __raw_readl(addr);
255 data_val = __raw_readl(data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900256
257 /* Clear V-bit */
Paul Mundt2e450642010-02-18 13:26:05 +0900258 writel_uncached(addr_val & ~PMB_V, addr);
259 writel_uncached(data_val & ~PMB_V, data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900260}
261
Paul Mundtd53a0d32010-02-17 21:17:02 +0900262static void set_pmb_entry(struct pmb_entry *pmbe)
263{
264 unsigned long flags;
265
266 spin_lock_irqsave(&pmbe->lock, flags);
267 __set_pmb_entry(pmbe);
268 spin_unlock_irqrestore(&pmbe->lock, flags);
269}
270
Paul Mundt90e7d642010-02-23 16:20:53 +0900271int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
272 unsigned long size, pgprot_t prot)
273{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000274 struct pmb_entry *pmbp, *pmbe;
Paul Mundt90e7d642010-02-23 16:20:53 +0900275 unsigned long pmb_flags;
276 int i, mapped;
Paul Mundt7bdda622010-02-17 13:23:00 +0900277
Paul Mundt6eb3c732010-03-02 17:22:29 +0900278 if (!pmb_addr_valid(vaddr, size))
279 return -EFAULT;
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900280
Paul Mundt90e7d642010-02-23 16:20:53 +0900281 pmb_flags = pgprot_to_pmb_flags(prot);
Paul Mundt6eb3c732010-03-02 17:22:29 +0900282 pmbp = NULL;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900283
284again:
Paul Mundt6eb3c732010-03-02 17:22:29 +0900285 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900286 unsigned long flags;
287
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900288 if (size < pmb_sizes[i].size)
289 continue;
290
Matt Fleming20b50142009-10-06 21:22:33 +0000291 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
292 PMB_NO_ENTRY);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000293 if (IS_ERR(pmbe)) {
Paul Mundt90e7d642010-02-23 16:20:53 +0900294 pmb_unmap_entry(pmbp, mapped);
Paul Mundt6eb3c732010-03-02 17:22:29 +0900295 return PTR_ERR(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000296 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900297
Paul Mundtd53a0d32010-02-17 21:17:02 +0900298 spin_lock_irqsave(&pmbe->lock, flags);
299
Paul Mundt90e7d642010-02-23 16:20:53 +0900300 pmbe->size = pmb_sizes[i].size;
301
Paul Mundtd53a0d32010-02-17 21:17:02 +0900302 __set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900303
Paul Mundt90e7d642010-02-23 16:20:53 +0900304 phys += pmbe->size;
305 vaddr += pmbe->size;
306 size -= pmbe->size;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900307
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900308 /*
309 * Link adjacent entries that span multiple PMB entries
310 * for easier tear-down.
311 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900312 if (likely(pmbp)) {
313 spin_lock(&pmbp->lock);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900314 pmbp->link = pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900315 spin_unlock(&pmbp->lock);
316 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900317
318 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000319
320 /*
321 * Instead of trying smaller sizes on every iteration
322 * (even if we succeed in allocating space), try using
323 * pmb_sizes[i].size again.
324 */
325 i--;
Paul Mundt90e7d642010-02-23 16:20:53 +0900326 mapped++;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900327
328 spin_unlock_irqrestore(&pmbe->lock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900329 }
330
Paul Mundtd53a0d32010-02-17 21:17:02 +0900331 if (size >= SZ_16M)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900332 goto again;
333
Paul Mundt6eb3c732010-03-02 17:22:29 +0900334 return 0;
335}
336
337void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
338 pgprot_t prot, void *caller)
339{
340 unsigned long orig_addr, vaddr;
341 phys_addr_t offset, last_addr;
342 phys_addr_t align_mask;
343 unsigned long aligned;
344 struct vm_struct *area;
345 int i, ret;
346
347 if (!pmb_iomapping_enabled)
348 return NULL;
349
350 /*
351 * Small mappings need to go through the TLB.
352 */
353 if (size < SZ_16M)
354 return ERR_PTR(-EINVAL);
355 if (!pmb_prot_valid(prot))
356 return ERR_PTR(-EINVAL);
357
358 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
359 if (size >= pmb_sizes[i].size)
360 break;
361
362 last_addr = phys + size;
363 align_mask = ~(pmb_sizes[i].size - 1);
364 offset = phys & ~align_mask;
365 phys &= align_mask;
366 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
367
368 area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
369 P3SEG, caller);
370 if (!area)
371 return NULL;
372
373 area->phys_addr = phys;
374 orig_addr = vaddr = (unsigned long)area->addr;
375
376 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
377 if (ret != 0)
378 return ERR_PTR(ret);
379
Paul Mundt90e7d642010-02-23 16:20:53 +0900380 return (void __iomem *)(offset + (char *)orig_addr);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900381}
382
Paul Mundt90e7d642010-02-23 16:20:53 +0900383int pmb_unmap(void __iomem *addr)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900384{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900385 struct pmb_entry *pmbe = NULL;
Paul Mundt90e7d642010-02-23 16:20:53 +0900386 unsigned long vaddr = (unsigned long __force)addr;
387 int i, found = 0;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900388
Paul Mundtd53a0d32010-02-17 21:17:02 +0900389 read_lock(&pmb_rwlock);
390
Matt Flemingedd7de82009-10-06 21:22:29 +0000391 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900392 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000393 pmbe = &pmb_entry_list[i];
Paul Mundt90e7d642010-02-23 16:20:53 +0900394 if (pmbe->vpn == vaddr) {
395 found = 1;
Matt Flemingedd7de82009-10-06 21:22:29 +0000396 break;
Paul Mundt90e7d642010-02-23 16:20:53 +0900397 }
Matt Flemingedd7de82009-10-06 21:22:29 +0000398 }
399 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900400
401 read_unlock(&pmb_rwlock);
402
Paul Mundt90e7d642010-02-23 16:20:53 +0900403 if (found) {
404 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
405 return 0;
406 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900407
Paul Mundt90e7d642010-02-23 16:20:53 +0900408 return -EINVAL;
Paul Mundtd01447b2010-02-18 18:13:51 +0900409}
410
411static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
412{
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900413 do {
414 struct pmb_entry *pmblink = pmbe;
415
Matt Fleming067784f2009-10-06 21:22:23 +0000416 /*
417 * We may be called before this pmb_entry has been
418 * entered into the PMB table via set_pmb_entry(), but
419 * that's OK because we've allocated a unique slot for
420 * this entry in pmb_alloc() (even if we haven't filled
421 * it yet).
422 *
Paul Mundtd53a0d32010-02-17 21:17:02 +0900423 * Therefore, calling __clear_pmb_entry() is safe as no
Matt Fleming067784f2009-10-06 21:22:23 +0000424 * other mapping can be using that slot.
425 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900426 __clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000427
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900428 pmbe = pmblink->link;
429
430 pmb_free(pmblink);
Paul Mundtd01447b2010-02-18 18:13:51 +0900431 } while (pmbe && --depth);
432}
Paul Mundtd53a0d32010-02-17 21:17:02 +0900433
Paul Mundtd01447b2010-02-18 18:13:51 +0900434static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
435{
436 unsigned long flags;
437
438 if (unlikely(!pmbe))
439 return;
440
441 write_lock_irqsave(&pmb_rwlock, flags);
442 __pmb_unmap_entry(pmbe, depth);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900443 write_unlock_irqrestore(&pmb_rwlock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900444}
445
Paul Mundtd01447b2010-02-18 18:13:51 +0900446static void __init pmb_notify(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000447{
Paul Mundtd01447b2010-02-18 18:13:51 +0900448 int i;
Matt Fleming3d467672010-01-18 19:33:10 +0900449
Paul Mundtefd54ea2010-02-16 18:39:30 +0900450 pr_info("PMB: boot mappings:\n");
Matt Fleming3d467672010-01-18 19:33:10 +0900451
Paul Mundtd01447b2010-02-18 18:13:51 +0900452 read_lock(&pmb_rwlock);
453
454 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
455 struct pmb_entry *pmbe;
456
457 if (!test_bit(i, pmb_map))
458 continue;
459
460 pmbe = &pmb_entry_list[i];
461
462 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
463 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
464 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
465 }
466
467 read_unlock(&pmb_rwlock);
468}
469
470/*
471 * Sync our software copy of the PMB mappings with those in hardware. The
472 * mappings in the hardware PMB were either set up by the bootloader or
473 * very early on by the kernel.
474 */
475static void __init pmb_synchronize(void)
476{
477 struct pmb_entry *pmbp = NULL;
478 int i, j;
479
Matt Fleming3d467672010-01-18 19:33:10 +0900480 /*
Paul Mundtefd54ea2010-02-16 18:39:30 +0900481 * Run through the initial boot mappings, log the established
482 * ones, and blow away anything that falls outside of the valid
483 * PPN range. Specifically, we only care about existing mappings
484 * that impact the cached/uncached sections.
Matt Fleming3d467672010-01-18 19:33:10 +0900485 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900486 * Note that touching these can be a bit of a minefield; the boot
487 * loader can establish multi-page mappings with the same caching
488 * attributes, so we need to ensure that we aren't modifying a
489 * mapping that we're presently executing from, or may execute
490 * from in the case of straddling page boundaries.
Matt Fleming3d467672010-01-18 19:33:10 +0900491 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900492 * In the future we will have to tidy up after the boot loader by
493 * jumping between the cached and uncached mappings and tearing
494 * down alternating mappings while executing from the other.
Matt Fleming3d467672010-01-18 19:33:10 +0900495 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900496 for (i = 0; i < NR_PMB_ENTRIES; i++) {
Matt Fleming3d467672010-01-18 19:33:10 +0900497 unsigned long addr, data;
498 unsigned long addr_val, data_val;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900499 unsigned long ppn, vpn, flags;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900500 unsigned long irqflags;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900501 unsigned int size;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900502 struct pmb_entry *pmbe;
Matt Fleming3d467672010-01-18 19:33:10 +0900503
504 addr = mk_pmb_addr(i);
505 data = mk_pmb_data(i);
506
507 addr_val = __raw_readl(addr);
508 data_val = __raw_readl(data);
509
510 /*
511 * Skip over any bogus entries
512 */
513 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
514 continue;
515
516 ppn = data_val & PMB_PFN_MASK;
517 vpn = addr_val & PMB_PFN_MASK;
518
519 /*
520 * Only preserve in-range mappings.
521 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900522 if (!pmb_ppn_in_range(ppn)) {
Matt Fleming3d467672010-01-18 19:33:10 +0900523 /*
524 * Invalidate anything out of bounds.
525 */
Paul Mundt2e450642010-02-18 13:26:05 +0900526 writel_uncached(addr_val & ~PMB_V, addr);
527 writel_uncached(data_val & ~PMB_V, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900528 continue;
Matt Fleming3d467672010-01-18 19:33:10 +0900529 }
Paul Mundtefd54ea2010-02-16 18:39:30 +0900530
531 /*
532 * Update the caching attributes if necessary
533 */
534 if (data_val & PMB_C) {
Paul Mundt0065b962010-02-17 18:05:23 +0900535 data_val &= ~PMB_CACHE_MASK;
536 data_val |= pmb_cache_flags();
Paul Mundt2e450642010-02-18 13:26:05 +0900537
538 writel_uncached(data_val, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900539 }
540
Paul Mundtd7813bc2010-02-17 17:56:38 +0900541 size = data_val & PMB_SZ_MASK;
542 flags = size | (data_val & PMB_CACHE_MASK);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900543
544 pmbe = pmb_alloc(vpn, ppn, flags, i);
545 if (IS_ERR(pmbe)) {
546 WARN_ON_ONCE(1);
547 continue;
548 }
549
Paul Mundtd53a0d32010-02-17 21:17:02 +0900550 spin_lock_irqsave(&pmbe->lock, irqflags);
551
Paul Mundtd7813bc2010-02-17 17:56:38 +0900552 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
553 if (pmb_sizes[j].flag == size)
554 pmbe->size = pmb_sizes[j].size;
555
Paul Mundtd53a0d32010-02-17 21:17:02 +0900556 if (pmbp) {
557 spin_lock(&pmbp->lock);
558
559 /*
560 * Compare the previous entry against the current one to
561 * see if the entries span a contiguous mapping. If so,
Paul Mundtd01447b2010-02-18 18:13:51 +0900562 * setup the entry links accordingly. Compound mappings
563 * are later coalesced.
Paul Mundtd53a0d32010-02-17 21:17:02 +0900564 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900565 if (pmb_can_merge(pmbp, pmbe))
Paul Mundtd53a0d32010-02-17 21:17:02 +0900566 pmbp->link = pmbe;
567
568 spin_unlock(&pmbp->lock);
569 }
Paul Mundtd7813bc2010-02-17 17:56:38 +0900570
571 pmbp = pmbe;
572
Paul Mundtd53a0d32010-02-17 21:17:02 +0900573 spin_unlock_irqrestore(&pmbe->lock, irqflags);
Matt Fleming3d467672010-01-18 19:33:10 +0900574 }
Matt Fleming3d467672010-01-18 19:33:10 +0900575}
Matt Fleming3d467672010-01-18 19:33:10 +0900576
Paul Mundtd01447b2010-02-18 18:13:51 +0900577static void __init pmb_merge(struct pmb_entry *head)
Matt Fleming3d467672010-01-18 19:33:10 +0900578{
Paul Mundtd01447b2010-02-18 18:13:51 +0900579 unsigned long span, newsize;
580 struct pmb_entry *tail;
581 int i = 1, depth = 0;
582
583 span = newsize = head->size;
584
585 tail = head->link;
586 while (tail) {
587 span += tail->size;
588
589 if (pmb_size_valid(span)) {
590 newsize = span;
591 depth = i;
592 }
593
594 /* This is the end of the line.. */
595 if (!tail->link)
596 break;
597
598 tail = tail->link;
599 i++;
600 }
Matt Fleming20b50142009-10-06 21:22:33 +0000601
Matt Fleming3d467672010-01-18 19:33:10 +0900602 /*
Paul Mundtd01447b2010-02-18 18:13:51 +0900603 * The merged page size must be valid.
Matt Fleming3d467672010-01-18 19:33:10 +0900604 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900605 if (!pmb_size_valid(newsize))
606 return;
607
608 head->flags &= ~PMB_SZ_MASK;
609 head->flags |= pmb_size_to_flags(newsize);
610
611 head->size = newsize;
612
613 __pmb_unmap_entry(head->link, depth);
614 __set_pmb_entry(head);
615}
616
617static void __init pmb_coalesce(void)
618{
619 unsigned long flags;
620 int i;
621
622 write_lock_irqsave(&pmb_rwlock, flags);
623
624 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
625 struct pmb_entry *pmbe;
626
627 if (!test_bit(i, pmb_map))
628 continue;
629
630 pmbe = &pmb_entry_list[i];
631
632 /*
633 * We're only interested in compound mappings
634 */
635 if (!pmbe->link)
636 continue;
637
638 /*
639 * Nothing to do if it already uses the largest possible
640 * page size.
641 */
642 if (pmbe->size == SZ_512M)
643 continue;
644
645 pmb_merge(pmbe);
646 }
647
648 write_unlock_irqrestore(&pmb_rwlock, flags);
649}
650
651#ifdef CONFIG_UNCACHED_MAPPING
652static void __init pmb_resize(void)
653{
654 int i;
655
656 /*
657 * If the uncached mapping was constructed by the kernel, it will
658 * already be a reasonable size.
659 */
660 if (uncached_size == SZ_16M)
661 return;
662
663 read_lock(&pmb_rwlock);
664
665 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
666 struct pmb_entry *pmbe;
667 unsigned long flags;
668
669 if (!test_bit(i, pmb_map))
670 continue;
671
672 pmbe = &pmb_entry_list[i];
673
674 if (pmbe->vpn != uncached_start)
675 continue;
676
677 /*
678 * Found it, now resize it.
679 */
680 spin_lock_irqsave(&pmbe->lock, flags);
681
682 pmbe->size = SZ_16M;
683 pmbe->flags &= ~PMB_SZ_MASK;
684 pmbe->flags |= pmb_size_to_flags(pmbe->size);
685
686 uncached_resize(pmbe->size);
687
688 __set_pmb_entry(pmbe);
689
690 spin_unlock_irqrestore(&pmbe->lock, flags);
691 }
692
693 read_lock(&pmb_rwlock);
694}
695#endif
696
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900697static int __init early_pmb(char *p)
698{
699 if (!p)
700 return 0;
701
702 if (strstr(p, "iomap"))
703 pmb_iomapping_enabled = 1;
704
705 return 0;
706}
707early_param("pmb", early_pmb);
708
Paul Mundtd01447b2010-02-18 18:13:51 +0900709void __init pmb_init(void)
710{
711 /* Synchronize software state */
712 pmb_synchronize();
713
714 /* Attempt to combine compound mappings */
715 pmb_coalesce();
716
717#ifdef CONFIG_UNCACHED_MAPPING
718 /* Resize initial mappings, if necessary */
719 pmb_resize();
720#endif
721
722 /* Log them */
723 pmb_notify();
Matt Fleming20b50142009-10-06 21:22:33 +0000724
Paul Mundt2e450642010-02-18 13:26:05 +0900725 writel_uncached(0, PMB_IRMCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900726
Paul Mundta0ab3662010-01-13 18:31:48 +0900727 /* Flush out the TLB */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900728 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
Paul Mundt2e450642010-02-18 13:26:05 +0900729 ctrl_barrier();
Matt Fleming20b50142009-10-06 21:22:33 +0000730}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900731
Paul Mundt2efa53b2010-01-20 16:40:48 +0900732bool __in_29bit_mode(void)
733{
734 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
735}
736
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900737static int pmb_seq_show(struct seq_file *file, void *iter)
738{
739 int i;
740
741 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
742 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
743 seq_printf(file, "ety vpn ppn size flags\n");
744
745 for (i = 0; i < NR_PMB_ENTRIES; i++) {
746 unsigned long addr, data;
747 unsigned int size;
748 char *sz_str = NULL;
749
Paul Mundt9d56dd32010-01-26 12:58:40 +0900750 addr = __raw_readl(mk_pmb_addr(i));
751 data = __raw_readl(mk_pmb_data(i));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900752
753 size = data & PMB_SZ_MASK;
754 sz_str = (size == PMB_SZ_16M) ? " 16MB":
755 (size == PMB_SZ_64M) ? " 64MB":
756 (size == PMB_SZ_128M) ? "128MB":
757 "512MB";
758
759 /* 02: V 0x88 0x08 128MB C CB B */
760 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
761 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
762 (addr >> 24) & 0xff, (data >> 24) & 0xff,
763 sz_str, (data & PMB_C) ? 'C' : ' ',
764 (data & PMB_WT) ? "WT" : "CB",
765 (data & PMB_UB) ? "UB" : " B");
766 }
767
768 return 0;
769}
770
771static int pmb_debugfs_open(struct inode *inode, struct file *file)
772{
773 return single_open(file, pmb_seq_show, NULL);
774}
775
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800776static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900777 .owner = THIS_MODULE,
778 .open = pmb_debugfs_open,
779 .read = seq_read,
780 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800781 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900782};
783
784static int __init pmb_debugfs_init(void)
785{
786 struct dentry *dentry;
787
788 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900789 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800790 if (!dentry)
791 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900792 if (IS_ERR(dentry))
793 return PTR_ERR(dentry);
794
795 return 0;
796}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900797postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000798
799#ifdef CONFIG_PM
800static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
801{
802 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000803 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000804
805 /* Restore the PMB after a resume from hibernation */
806 if (state.event == PM_EVENT_ON &&
807 prev_state.event == PM_EVENT_FREEZE) {
808 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900809
810 read_lock(&pmb_rwlock);
811
Matt Flemingedd7de82009-10-06 21:22:29 +0000812 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900813 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000814 pmbe = &pmb_entry_list[i];
815 set_pmb_entry(pmbe);
816 }
817 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900818
819 read_unlock(&pmb_rwlock);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000820 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900821
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000822 prev_state = state;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900823
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000824 return 0;
825}
826
827static int pmb_sysdev_resume(struct sys_device *dev)
828{
829 return pmb_sysdev_suspend(dev, PMSG_ON);
830}
831
832static struct sysdev_driver pmb_sysdev_driver = {
833 .suspend = pmb_sysdev_suspend,
834 .resume = pmb_sysdev_resume,
835};
836
837static int __init pmb_sysdev_init(void)
838{
839 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
840}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000841subsys_initcall(pmb_sysdev_init);
842#endif