blob: e65e8b8e2a5e36e31deb8e7a1d8d9d7bdc45c308 [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Matt Fleming3d467672010-01-18 19:33:10 +09006 * Copyright (C) 2005 - 2010 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000015#include <linux/sysdev.h>
16#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/bitops.h>
20#include <linux/debugfs.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/err.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090024#include <linux/io.h>
Paul Mundtd53a0d32010-02-17 21:17:02 +090025#include <linux/spinlock.h>
26#include <linux/rwlock.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090027#include <asm/sizes.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090028#include <asm/system.h>
29#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090030#include <asm/pgtable.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090031#include <asm/page.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090032#include <asm/mmu.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090033#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090034
Paul Mundtd53a0d32010-02-17 21:17:02 +090035struct pmb_entry;
36
37struct pmb_entry {
38 unsigned long vpn;
39 unsigned long ppn;
40 unsigned long flags;
41 unsigned long size;
42
43 spinlock_t lock;
44
45 /*
46 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
47 * PMB_NO_ENTRY to search for a free one
48 */
49 int entry;
50
51 /* Adjacent entry link for contiguous multi-entry mappings */
52 struct pmb_entry *link;
53};
54
Paul Mundt51becfd2010-02-17 15:33:30 +090055static void pmb_unmap_entry(struct pmb_entry *);
Matt Flemingfc2bdef2009-10-06 21:22:22 +000056
Paul Mundtd53a0d32010-02-17 21:17:02 +090057static DEFINE_RWLOCK(pmb_rwlock);
Matt Flemingedd7de82009-10-06 21:22:29 +000058static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt51becfd2010-02-17 15:33:30 +090059static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
Paul Mundt0c7b1df2006-09-27 15:08:07 +090060
Paul Mundt51becfd2010-02-17 15:33:30 +090061static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090062{
63 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
64}
65
Paul Mundt51becfd2010-02-17 15:33:30 +090066static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090067{
68 return mk_pmb_entry(entry) | PMB_ADDR;
69}
70
Paul Mundt51becfd2010-02-17 15:33:30 +090071static __always_inline unsigned long mk_pmb_data(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090072{
73 return mk_pmb_entry(entry) | PMB_DATA;
74}
75
Matt Fleming067784f2009-10-06 21:22:23 +000076static int pmb_alloc_entry(void)
77{
Paul Mundtd53a0d32010-02-17 21:17:02 +090078 int pos;
Matt Fleming067784f2009-10-06 21:22:23 +000079
Paul Mundt51becfd2010-02-17 15:33:30 +090080 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
Paul Mundtd53a0d32010-02-17 21:17:02 +090081 if (pos >= 0 && pos < NR_PMB_ENTRIES)
82 __set_bit(pos, pmb_map);
83 else
84 pos = -ENOSPC;
Matt Fleming067784f2009-10-06 21:22:23 +000085
86 return pos;
87}
88
Matt Fleming8386aeb2009-10-06 21:22:28 +000089static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +000090 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090091{
92 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +090093 unsigned long irqflags;
94 void *ret = NULL;
Matt Fleming067784f2009-10-06 21:22:23 +000095 int pos;
96
Paul Mundtd53a0d32010-02-17 21:17:02 +090097 write_lock_irqsave(&pmb_rwlock, irqflags);
98
Matt Fleming20b50142009-10-06 21:22:33 +000099 if (entry == PMB_NO_ENTRY) {
100 pos = pmb_alloc_entry();
Paul Mundtd53a0d32010-02-17 21:17:02 +0900101 if (unlikely(pos < 0)) {
102 ret = ERR_PTR(pos);
103 goto out;
104 }
Matt Fleming20b50142009-10-06 21:22:33 +0000105 } else {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900106 if (__test_and_set_bit(entry, pmb_map)) {
107 ret = ERR_PTR(-ENOSPC);
108 goto out;
109 }
110
Matt Fleming20b50142009-10-06 21:22:33 +0000111 pos = entry;
112 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900113
Paul Mundtd53a0d32010-02-17 21:17:02 +0900114 write_unlock_irqrestore(&pmb_rwlock, irqflags);
115
Matt Flemingedd7de82009-10-06 21:22:29 +0000116 pmbe = &pmb_entry_list[pos];
Paul Mundtd53a0d32010-02-17 21:17:02 +0900117
118 spin_lock_init(&pmbe->lock);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900119
120 pmbe->vpn = vpn;
121 pmbe->ppn = ppn;
122 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +0000123 pmbe->entry = pos;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900124 pmbe->size = 0;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900125
126 return pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900127
128out:
129 write_unlock_irqrestore(&pmb_rwlock, irqflags);
130 return ret;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900131}
132
Matt Fleming8386aeb2009-10-06 21:22:28 +0000133static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900134{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900135 __clear_bit(pmbe->entry, pmb_map);
Paul Mundtd7813bc2010-02-17 17:56:38 +0900136 pmbe->entry = PMB_NO_ENTRY;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900137}
138
139/*
Paul Mundt0065b962010-02-17 18:05:23 +0900140 * Ensure that the PMB entries match our cache configuration.
141 *
142 * When we are in 32-bit address extended mode, CCR.CB becomes
143 * invalid, so care must be taken to manually adjust cacheable
144 * translations.
145 */
146static __always_inline unsigned long pmb_cache_flags(void)
147{
148 unsigned long flags = 0;
149
150#if defined(CONFIG_CACHE_WRITETHROUGH)
151 flags |= PMB_C | PMB_WT | PMB_UB;
152#elif defined(CONFIG_CACHE_WRITEBACK)
153 flags |= PMB_C;
154#endif
155
156 return flags;
157}
158
159/*
Paul Mundt51becfd2010-02-17 15:33:30 +0900160 * Must be run uncached.
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900161 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900162static void __set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900163{
Paul Mundt51becfd2010-02-17 15:33:30 +0900164 jump_to_uncached();
165
Paul Mundt0065b962010-02-17 18:05:23 +0900166 pmbe->flags &= ~PMB_CACHE_MASK;
167 pmbe->flags |= pmb_cache_flags();
168
Paul Mundt51becfd2010-02-17 15:33:30 +0900169 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
Paul Mundt51becfd2010-02-17 15:33:30 +0900170 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900171
Stuart Menefycbaa1182007-11-30 17:06:36 +0900172 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900173}
174
Paul Mundtd53a0d32010-02-17 21:17:02 +0900175static void __clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900176{
177 unsigned int entry = pmbe->entry;
178 unsigned long addr;
179
Stuart Menefycbaa1182007-11-30 17:06:36 +0900180 jump_to_uncached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900181
182 /* Clear V-bit */
183 addr = mk_pmb_addr(entry);
Paul Mundt9d56dd32010-01-26 12:58:40 +0900184 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900185
186 addr = mk_pmb_data(entry);
Paul Mundt9d56dd32010-01-26 12:58:40 +0900187 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900188
Stuart Menefycbaa1182007-11-30 17:06:36 +0900189 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900190}
191
Paul Mundtd53a0d32010-02-17 21:17:02 +0900192static void set_pmb_entry(struct pmb_entry *pmbe)
193{
194 unsigned long flags;
195
196 spin_lock_irqsave(&pmbe->lock, flags);
197 __set_pmb_entry(pmbe);
198 spin_unlock_irqrestore(&pmbe->lock, flags);
199}
200
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900201static struct {
202 unsigned long size;
203 int flag;
204} pmb_sizes[] = {
Paul Mundt51becfd2010-02-17 15:33:30 +0900205 { .size = SZ_512M, .flag = PMB_SZ_512M, },
206 { .size = SZ_128M, .flag = PMB_SZ_128M, },
207 { .size = SZ_64M, .flag = PMB_SZ_64M, },
208 { .size = SZ_16M, .flag = PMB_SZ_16M, },
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900209};
210
211long pmb_remap(unsigned long vaddr, unsigned long phys,
Paul Mundt7bdda622010-02-17 13:23:00 +0900212 unsigned long size, pgprot_t prot)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900213{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000214 struct pmb_entry *pmbp, *pmbe;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900215 unsigned long wanted;
216 int pmb_flags, i;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000217 long err;
Paul Mundt7bdda622010-02-17 13:23:00 +0900218 u64 flags;
219
220 flags = pgprot_val(prot);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900221
Paul Mundt0065b962010-02-17 18:05:23 +0900222 pmb_flags = PMB_WT | PMB_UB;
223
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900224 /* Convert typical pgprot value to the PMB equivalent */
225 if (flags & _PAGE_CACHABLE) {
Paul Mundt0065b962010-02-17 18:05:23 +0900226 pmb_flags |= PMB_C;
227
228 if ((flags & _PAGE_WT) == 0)
229 pmb_flags &= ~(PMB_WT | PMB_UB);
230 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900231
232 pmbp = NULL;
233 wanted = size;
234
235again:
236 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900237 unsigned long flags;
238
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900239 if (size < pmb_sizes[i].size)
240 continue;
241
Matt Fleming20b50142009-10-06 21:22:33 +0000242 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
243 PMB_NO_ENTRY);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000244 if (IS_ERR(pmbe)) {
245 err = PTR_ERR(pmbe);
246 goto out;
247 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900248
Paul Mundtd53a0d32010-02-17 21:17:02 +0900249 spin_lock_irqsave(&pmbe->lock, flags);
250
251 __set_pmb_entry(pmbe);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900252
253 phys += pmb_sizes[i].size;
254 vaddr += pmb_sizes[i].size;
255 size -= pmb_sizes[i].size;
256
Paul Mundtd7813bc2010-02-17 17:56:38 +0900257 pmbe->size = pmb_sizes[i].size;
258
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900259 /*
260 * Link adjacent entries that span multiple PMB entries
261 * for easier tear-down.
262 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900263 if (likely(pmbp)) {
264 spin_lock(&pmbp->lock);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900265 pmbp->link = pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900266 spin_unlock(&pmbp->lock);
267 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900268
269 pmbp = pmbe;
Matt Fleminga2767cf2009-10-06 21:22:34 +0000270
271 /*
272 * Instead of trying smaller sizes on every iteration
273 * (even if we succeed in allocating space), try using
274 * pmb_sizes[i].size again.
275 */
276 i--;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900277
278 spin_unlock_irqrestore(&pmbe->lock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900279 }
280
Paul Mundtd53a0d32010-02-17 21:17:02 +0900281 if (size >= SZ_16M)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900282 goto again;
283
284 return wanted - size;
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000285
286out:
Paul Mundt51becfd2010-02-17 15:33:30 +0900287 pmb_unmap_entry(pmbp);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000288
289 return err;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900290}
291
292void pmb_unmap(unsigned long addr)
293{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900294 struct pmb_entry *pmbe = NULL;
Matt Flemingedd7de82009-10-06 21:22:29 +0000295 int i;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900296
Paul Mundtd53a0d32010-02-17 21:17:02 +0900297 read_lock(&pmb_rwlock);
298
Matt Flemingedd7de82009-10-06 21:22:29 +0000299 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900300 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000301 pmbe = &pmb_entry_list[i];
Paul Mundtd53a0d32010-02-17 21:17:02 +0900302 if (pmbe->vpn == addr)
Matt Flemingedd7de82009-10-06 21:22:29 +0000303 break;
304 }
305 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900306
307 read_unlock(&pmb_rwlock);
308
309 pmb_unmap_entry(pmbe);
Paul Mundt51becfd2010-02-17 15:33:30 +0900310}
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900311
Paul Mundt51becfd2010-02-17 15:33:30 +0900312static void pmb_unmap_entry(struct pmb_entry *pmbe)
313{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900314 unsigned long flags;
315
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900316 if (unlikely(!pmbe))
317 return;
318
Paul Mundtd53a0d32010-02-17 21:17:02 +0900319 write_lock_irqsave(&pmb_rwlock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900320
321 do {
322 struct pmb_entry *pmblink = pmbe;
323
Matt Fleming067784f2009-10-06 21:22:23 +0000324 /*
325 * We may be called before this pmb_entry has been
326 * entered into the PMB table via set_pmb_entry(), but
327 * that's OK because we've allocated a unique slot for
328 * this entry in pmb_alloc() (even if we haven't filled
329 * it yet).
330 *
Paul Mundtd53a0d32010-02-17 21:17:02 +0900331 * Therefore, calling __clear_pmb_entry() is safe as no
Matt Fleming067784f2009-10-06 21:22:23 +0000332 * other mapping can be using that slot.
333 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900334 __clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000335
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900336 pmbe = pmblink->link;
337
338 pmb_free(pmblink);
339 } while (pmbe);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900340
341 write_unlock_irqrestore(&pmb_rwlock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900342}
343
Paul Mundtd7813bc2010-02-17 17:56:38 +0900344static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
Paul Mundtefd54ea2010-02-16 18:39:30 +0900345{
346 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
347}
348
349static int pmb_synchronize_mappings(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000350{
Matt Fleming3d467672010-01-18 19:33:10 +0900351 unsigned int applied = 0;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900352 struct pmb_entry *pmbp = NULL;
353 int i, j;
Matt Fleming3d467672010-01-18 19:33:10 +0900354
Paul Mundtefd54ea2010-02-16 18:39:30 +0900355 pr_info("PMB: boot mappings:\n");
Matt Fleming3d467672010-01-18 19:33:10 +0900356
357 /*
Paul Mundtefd54ea2010-02-16 18:39:30 +0900358 * Run through the initial boot mappings, log the established
359 * ones, and blow away anything that falls outside of the valid
360 * PPN range. Specifically, we only care about existing mappings
361 * that impact the cached/uncached sections.
Matt Fleming3d467672010-01-18 19:33:10 +0900362 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900363 * Note that touching these can be a bit of a minefield; the boot
364 * loader can establish multi-page mappings with the same caching
365 * attributes, so we need to ensure that we aren't modifying a
366 * mapping that we're presently executing from, or may execute
367 * from in the case of straddling page boundaries.
Matt Fleming3d467672010-01-18 19:33:10 +0900368 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900369 * In the future we will have to tidy up after the boot loader by
370 * jumping between the cached and uncached mappings and tearing
371 * down alternating mappings while executing from the other.
Matt Fleming3d467672010-01-18 19:33:10 +0900372 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900373 for (i = 0; i < NR_PMB_ENTRIES; i++) {
Matt Fleming3d467672010-01-18 19:33:10 +0900374 unsigned long addr, data;
375 unsigned long addr_val, data_val;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900376 unsigned long ppn, vpn, flags;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900377 unsigned long irqflags;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900378 unsigned int size;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900379 struct pmb_entry *pmbe;
Matt Fleming3d467672010-01-18 19:33:10 +0900380
381 addr = mk_pmb_addr(i);
382 data = mk_pmb_data(i);
383
384 addr_val = __raw_readl(addr);
385 data_val = __raw_readl(data);
386
387 /*
388 * Skip over any bogus entries
389 */
390 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
391 continue;
392
393 ppn = data_val & PMB_PFN_MASK;
394 vpn = addr_val & PMB_PFN_MASK;
395
396 /*
397 * Only preserve in-range mappings.
398 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900399 if (!pmb_ppn_in_range(ppn)) {
Matt Fleming3d467672010-01-18 19:33:10 +0900400 /*
401 * Invalidate anything out of bounds.
402 */
403 __raw_writel(addr_val & ~PMB_V, addr);
404 __raw_writel(data_val & ~PMB_V, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900405 continue;
Matt Fleming3d467672010-01-18 19:33:10 +0900406 }
Paul Mundtefd54ea2010-02-16 18:39:30 +0900407
408 /*
409 * Update the caching attributes if necessary
410 */
411 if (data_val & PMB_C) {
Paul Mundt0065b962010-02-17 18:05:23 +0900412 data_val &= ~PMB_CACHE_MASK;
413 data_val |= pmb_cache_flags();
Paul Mundtefd54ea2010-02-16 18:39:30 +0900414 __raw_writel(data_val, data);
415 }
416
Paul Mundtd7813bc2010-02-17 17:56:38 +0900417 size = data_val & PMB_SZ_MASK;
418 flags = size | (data_val & PMB_CACHE_MASK);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900419
420 pmbe = pmb_alloc(vpn, ppn, flags, i);
421 if (IS_ERR(pmbe)) {
422 WARN_ON_ONCE(1);
423 continue;
424 }
425
Paul Mundtd53a0d32010-02-17 21:17:02 +0900426 spin_lock_irqsave(&pmbe->lock, irqflags);
427
Paul Mundtd7813bc2010-02-17 17:56:38 +0900428 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
429 if (pmb_sizes[j].flag == size)
430 pmbe->size = pmb_sizes[j].size;
431
Paul Mundtd53a0d32010-02-17 21:17:02 +0900432 if (pmbp) {
433 spin_lock(&pmbp->lock);
434
435 /*
436 * Compare the previous entry against the current one to
437 * see if the entries span a contiguous mapping. If so,
438 * setup the entry links accordingly.
439 */
440 if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) &&
441 (pmbe->ppn == (pmbp->ppn + pmbp->size)))
442 pmbp->link = pmbe;
443
444 spin_unlock(&pmbp->lock);
445 }
Paul Mundtd7813bc2010-02-17 17:56:38 +0900446
447 pmbp = pmbe;
448
Paul Mundtd53a0d32010-02-17 21:17:02 +0900449 spin_unlock_irqrestore(&pmbe->lock, irqflags);
450
Paul Mundtd7813bc2010-02-17 17:56:38 +0900451 pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n",
452 vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20,
453 (data_val & PMB_C) ? "" : "un");
Paul Mundtefd54ea2010-02-16 18:39:30 +0900454
455 applied++;
Matt Fleming3d467672010-01-18 19:33:10 +0900456 }
457
458 return (applied == 0);
459}
Matt Fleming3d467672010-01-18 19:33:10 +0900460
Paul Mundt2dc2f8e2010-01-21 16:05:25 +0900461int pmb_init(void)
Matt Fleming3d467672010-01-18 19:33:10 +0900462{
Paul Mundtefd54ea2010-02-16 18:39:30 +0900463 int ret;
Matt Fleming20b50142009-10-06 21:22:33 +0000464
Matt Fleming3d467672010-01-18 19:33:10 +0900465 jump_to_uncached();
466
467 /*
Matt Fleming3d467672010-01-18 19:33:10 +0900468 * Sync our software copy of the PMB mappings with those in
469 * hardware. The mappings in the hardware PMB were either set up
470 * by the bootloader or very early on by the kernel.
471 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900472 ret = pmb_synchronize_mappings();
473 if (unlikely(ret == 0)) {
474 back_to_cached();
475 return 0;
Matt Fleming20b50142009-10-06 21:22:33 +0000476 }
477
Paul Mundt9d56dd32010-01-26 12:58:40 +0900478 __raw_writel(0, PMB_IRMCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900479
Paul Mundta0ab3662010-01-13 18:31:48 +0900480 /* Flush out the TLB */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900481 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900482
Matt Fleming20b50142009-10-06 21:22:33 +0000483 back_to_cached();
484
485 return 0;
486}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900487
Paul Mundt2efa53b2010-01-20 16:40:48 +0900488bool __in_29bit_mode(void)
489{
490 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
491}
492
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900493static int pmb_seq_show(struct seq_file *file, void *iter)
494{
495 int i;
496
497 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
498 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
499 seq_printf(file, "ety vpn ppn size flags\n");
500
501 for (i = 0; i < NR_PMB_ENTRIES; i++) {
502 unsigned long addr, data;
503 unsigned int size;
504 char *sz_str = NULL;
505
Paul Mundt9d56dd32010-01-26 12:58:40 +0900506 addr = __raw_readl(mk_pmb_addr(i));
507 data = __raw_readl(mk_pmb_data(i));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900508
509 size = data & PMB_SZ_MASK;
510 sz_str = (size == PMB_SZ_16M) ? " 16MB":
511 (size == PMB_SZ_64M) ? " 64MB":
512 (size == PMB_SZ_128M) ? "128MB":
513 "512MB";
514
515 /* 02: V 0x88 0x08 128MB C CB B */
516 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
517 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
518 (addr >> 24) & 0xff, (data >> 24) & 0xff,
519 sz_str, (data & PMB_C) ? 'C' : ' ',
520 (data & PMB_WT) ? "WT" : "CB",
521 (data & PMB_UB) ? "UB" : " B");
522 }
523
524 return 0;
525}
526
527static int pmb_debugfs_open(struct inode *inode, struct file *file)
528{
529 return single_open(file, pmb_seq_show, NULL);
530}
531
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800532static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900533 .owner = THIS_MODULE,
534 .open = pmb_debugfs_open,
535 .read = seq_read,
536 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800537 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900538};
539
540static int __init pmb_debugfs_init(void)
541{
542 struct dentry *dentry;
543
544 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundtb9e393c2008-03-07 17:19:58 +0900545 sh_debugfs_root, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800546 if (!dentry)
547 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900548 if (IS_ERR(dentry))
549 return PTR_ERR(dentry);
550
551 return 0;
552}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900553postcore_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000554
555#ifdef CONFIG_PM
556static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
557{
558 static pm_message_t prev_state;
Matt Flemingedd7de82009-10-06 21:22:29 +0000559 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000560
561 /* Restore the PMB after a resume from hibernation */
562 if (state.event == PM_EVENT_ON &&
563 prev_state.event == PM_EVENT_FREEZE) {
564 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900565
566 read_lock(&pmb_rwlock);
567
Matt Flemingedd7de82009-10-06 21:22:29 +0000568 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900569 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000570 pmbe = &pmb_entry_list[i];
571 set_pmb_entry(pmbe);
572 }
573 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900574
575 read_unlock(&pmb_rwlock);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000576 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900577
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000578 prev_state = state;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900579
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000580 return 0;
581}
582
583static int pmb_sysdev_resume(struct sys_device *dev)
584{
585 return pmb_sysdev_suspend(dev, PMSG_ON);
586}
587
588static struct sysdev_driver pmb_sysdev_driver = {
589 .suspend = pmb_sysdev_suspend,
590 .resume = pmb_sysdev_resume,
591};
592
593static int __init pmb_sysdev_init(void)
594{
595 return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
596}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000597subsys_initcall(pmb_sysdev_init);
598#endif