blob: 7160c9fd6fe3ab3e4283d440032396bbf65c453c [file] [log] [blame]
Paul Mundt0c7b1df2006-09-27 15:08:07 +09001/*
2 * arch/sh/mm/pmb.c
3 *
4 * Privileged Space Mapping Buffer (PMB) Support.
5 *
Paul Mundtd4cc1832011-03-23 19:05:18 +09006 * Copyright (C) 2005 - 2011 Paul Mundt
Matt Fleming3d467672010-01-18 19:33:10 +09007 * Copyright (C) 2010 Matt Fleming
Paul Mundt0c7b1df2006-09-27 15:08:07 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
Paul Mundtd4cc1832011-03-23 19:05:18 +090015#include <linux/syscore_ops.h>
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +000016#include <linux/cpu.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090017#include <linux/module.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090018#include <linux/bitops.h>
19#include <linux/debugfs.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
22#include <linux/err.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090023#include <linux/io.h>
Paul Mundtd53a0d32010-02-17 21:17:02 +090024#include <linux/spinlock.h>
Paul Mundt90e7d642010-02-23 16:20:53 +090025#include <linux/vmalloc.h>
Paul Mundt281983d2010-03-04 16:44:20 +090026#include <asm/cacheflush.h>
Paul Mundt51becfd2010-02-17 15:33:30 +090027#include <asm/sizes.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090028#include <asm/uaccess.h>
Paul Mundtd7cdc9e2006-09-27 15:16:42 +090029#include <asm/pgtable.h>
Paul Mundt7bdda622010-02-17 13:23:00 +090030#include <asm/page.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090031#include <asm/mmu.h>
Stuart Menefyeddeeb32007-11-26 21:32:40 +090032#include <asm/mmu_context.h>
Paul Mundt0c7b1df2006-09-27 15:08:07 +090033
Paul Mundtd53a0d32010-02-17 21:17:02 +090034struct pmb_entry;
35
36struct pmb_entry {
37 unsigned long vpn;
38 unsigned long ppn;
39 unsigned long flags;
40 unsigned long size;
41
Paul Mundtf7fcec92010-10-14 03:49:15 +090042 raw_spinlock_t lock;
Paul Mundtd53a0d32010-02-17 21:17:02 +090043
44 /*
45 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
46 * PMB_NO_ENTRY to search for a free one
47 */
48 int entry;
49
50 /* Adjacent entry link for contiguous multi-entry mappings */
51 struct pmb_entry *link;
52};
53
Paul Mundt90e7d642010-02-23 16:20:53 +090054static struct {
55 unsigned long size;
56 int flag;
57} pmb_sizes[] = {
58 { .size = SZ_512M, .flag = PMB_SZ_512M, },
59 { .size = SZ_128M, .flag = PMB_SZ_128M, },
60 { .size = SZ_64M, .flag = PMB_SZ_64M, },
61 { .size = SZ_16M, .flag = PMB_SZ_16M, },
62};
63
Paul Mundtd01447b2010-02-18 18:13:51 +090064static void pmb_unmap_entry(struct pmb_entry *, int depth);
Matt Flemingfc2bdef2009-10-06 21:22:22 +000065
Paul Mundtd53a0d32010-02-17 21:17:02 +090066static DEFINE_RWLOCK(pmb_rwlock);
Matt Flemingedd7de82009-10-06 21:22:29 +000067static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
Paul Mundt51becfd2010-02-17 15:33:30 +090068static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
Paul Mundt0c7b1df2006-09-27 15:08:07 +090069
Paul Mundt4cfa8e72010-03-02 16:49:50 +090070static unsigned int pmb_iomapping_enabled;
71
Paul Mundt51becfd2010-02-17 15:33:30 +090072static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090073{
74 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
75}
76
Paul Mundt51becfd2010-02-17 15:33:30 +090077static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090078{
79 return mk_pmb_entry(entry) | PMB_ADDR;
80}
81
Paul Mundt51becfd2010-02-17 15:33:30 +090082static __always_inline unsigned long mk_pmb_data(unsigned int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +090083{
84 return mk_pmb_entry(entry) | PMB_DATA;
85}
86
Paul Mundt90e7d642010-02-23 16:20:53 +090087static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
88{
89 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
90}
91
92/*
93 * Ensure that the PMB entries match our cache configuration.
94 *
95 * When we are in 32-bit address extended mode, CCR.CB becomes
96 * invalid, so care must be taken to manually adjust cacheable
97 * translations.
98 */
99static __always_inline unsigned long pmb_cache_flags(void)
100{
101 unsigned long flags = 0;
102
103#if defined(CONFIG_CACHE_OFF)
104 flags |= PMB_WT | PMB_UB;
105#elif defined(CONFIG_CACHE_WRITETHROUGH)
106 flags |= PMB_C | PMB_WT | PMB_UB;
107#elif defined(CONFIG_CACHE_WRITEBACK)
108 flags |= PMB_C;
109#endif
110
111 return flags;
112}
113
114/*
115 * Convert typical pgprot value to the PMB equivalent
116 */
117static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
118{
119 unsigned long pmb_flags = 0;
120 u64 flags = pgprot_val(prot);
121
122 if (flags & _PAGE_CACHABLE)
123 pmb_flags |= PMB_C;
124 if (flags & _PAGE_WT)
125 pmb_flags |= PMB_WT | PMB_UB;
126
127 return pmb_flags;
128}
129
Paul Mundta1042aa2010-03-03 13:13:25 +0900130static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
Paul Mundt90e7d642010-02-23 16:20:53 +0900131{
132 return (b->vpn == (a->vpn + a->size)) &&
133 (b->ppn == (a->ppn + a->size)) &&
134 (b->flags == a->flags);
135}
136
Paul Mundta1042aa2010-03-03 13:13:25 +0900137static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
138 unsigned long size)
139{
140 int i;
141
142 read_lock(&pmb_rwlock);
143
144 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
145 struct pmb_entry *pmbe, *iter;
146 unsigned long span;
147
148 if (!test_bit(i, pmb_map))
149 continue;
150
151 pmbe = &pmb_entry_list[i];
152
153 /*
154 * See if VPN and PPN are bounded by an existing mapping.
155 */
156 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
157 continue;
158 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
159 continue;
160
161 /*
162 * Now see if we're in range of a simple mapping.
163 */
164 if (size <= pmbe->size) {
165 read_unlock(&pmb_rwlock);
166 return true;
167 }
168
169 span = pmbe->size;
170
171 /*
172 * Finally for sizes that involve compound mappings, walk
173 * the chain.
174 */
175 for (iter = pmbe->link; iter; iter = iter->link)
176 span += iter->size;
177
178 /*
179 * Nothing else to do if the range requirements are met.
180 */
181 if (size <= span) {
182 read_unlock(&pmb_rwlock);
183 return true;
184 }
185 }
186
187 read_unlock(&pmb_rwlock);
188 return false;
189}
190
Paul Mundt90e7d642010-02-23 16:20:53 +0900191static bool pmb_size_valid(unsigned long size)
192{
193 int i;
194
195 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
196 if (pmb_sizes[i].size == size)
197 return true;
198
199 return false;
200}
201
202static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
203{
204 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
205}
206
207static inline bool pmb_prot_valid(pgprot_t prot)
208{
209 return (pgprot_val(prot) & _PAGE_USER) == 0;
210}
211
212static int pmb_size_to_flags(unsigned long size)
213{
214 int i;
215
216 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
217 if (pmb_sizes[i].size == size)
218 return pmb_sizes[i].flag;
219
220 return 0;
221}
222
Matt Fleming067784f2009-10-06 21:22:23 +0000223static int pmb_alloc_entry(void)
224{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900225 int pos;
Matt Fleming067784f2009-10-06 21:22:23 +0000226
Paul Mundt51becfd2010-02-17 15:33:30 +0900227 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900228 if (pos >= 0 && pos < NR_PMB_ENTRIES)
229 __set_bit(pos, pmb_map);
230 else
231 pos = -ENOSPC;
Matt Fleming067784f2009-10-06 21:22:23 +0000232
233 return pos;
234}
235
Matt Fleming8386aeb2009-10-06 21:22:28 +0000236static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
Matt Fleming20b50142009-10-06 21:22:33 +0000237 unsigned long flags, int entry)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900238{
239 struct pmb_entry *pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900240 unsigned long irqflags;
241 void *ret = NULL;
Matt Fleming067784f2009-10-06 21:22:23 +0000242 int pos;
243
Paul Mundtd53a0d32010-02-17 21:17:02 +0900244 write_lock_irqsave(&pmb_rwlock, irqflags);
245
Matt Fleming20b50142009-10-06 21:22:33 +0000246 if (entry == PMB_NO_ENTRY) {
247 pos = pmb_alloc_entry();
Paul Mundtd53a0d32010-02-17 21:17:02 +0900248 if (unlikely(pos < 0)) {
249 ret = ERR_PTR(pos);
250 goto out;
251 }
Matt Fleming20b50142009-10-06 21:22:33 +0000252 } else {
Paul Mundtd53a0d32010-02-17 21:17:02 +0900253 if (__test_and_set_bit(entry, pmb_map)) {
254 ret = ERR_PTR(-ENOSPC);
255 goto out;
256 }
257
Matt Fleming20b50142009-10-06 21:22:33 +0000258 pos = entry;
259 }
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900260
Paul Mundtd53a0d32010-02-17 21:17:02 +0900261 write_unlock_irqrestore(&pmb_rwlock, irqflags);
262
Matt Flemingedd7de82009-10-06 21:22:29 +0000263 pmbe = &pmb_entry_list[pos];
Paul Mundtd53a0d32010-02-17 21:17:02 +0900264
Paul Mundtd01447b2010-02-18 18:13:51 +0900265 memset(pmbe, 0, sizeof(struct pmb_entry));
266
Paul Mundtf7fcec92010-10-14 03:49:15 +0900267 raw_spin_lock_init(&pmbe->lock);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900268
269 pmbe->vpn = vpn;
270 pmbe->ppn = ppn;
271 pmbe->flags = flags;
Matt Fleming067784f2009-10-06 21:22:23 +0000272 pmbe->entry = pos;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900273
274 return pmbe;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900275
276out:
277 write_unlock_irqrestore(&pmb_rwlock, irqflags);
278 return ret;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900279}
280
Matt Fleming8386aeb2009-10-06 21:22:28 +0000281static void pmb_free(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900282{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900283 __clear_bit(pmbe->entry, pmb_map);
Paul Mundtd01447b2010-02-18 18:13:51 +0900284
285 pmbe->entry = PMB_NO_ENTRY;
286 pmbe->link = NULL;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900287}
288
289/*
Paul Mundt51becfd2010-02-17 15:33:30 +0900290 * Must be run uncached.
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900291 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900292static void __set_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900293{
Paul Mundt281983d2010-03-04 16:44:20 +0900294 unsigned long addr, data;
295
296 addr = mk_pmb_addr(pmbe->entry);
297 data = mk_pmb_data(pmbe->entry);
298
299 jump_to_uncached();
300
Paul Mundt90e7d642010-02-23 16:20:53 +0900301 /* Set V-bit */
Paul Mundt281983d2010-03-04 16:44:20 +0900302 __raw_writel(pmbe->vpn | PMB_V, addr);
303 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
304
305 back_to_cached();
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900306}
307
Paul Mundtd53a0d32010-02-17 21:17:02 +0900308static void __clear_pmb_entry(struct pmb_entry *pmbe)
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900309{
Paul Mundt2e450642010-02-18 13:26:05 +0900310 unsigned long addr, data;
311 unsigned long addr_val, data_val;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900312
Paul Mundt2e450642010-02-18 13:26:05 +0900313 addr = mk_pmb_addr(pmbe->entry);
314 data = mk_pmb_data(pmbe->entry);
315
316 addr_val = __raw_readl(addr);
317 data_val = __raw_readl(data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900318
319 /* Clear V-bit */
Paul Mundt2e450642010-02-18 13:26:05 +0900320 writel_uncached(addr_val & ~PMB_V, addr);
321 writel_uncached(data_val & ~PMB_V, data);
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900322}
323
Matt Fleming3fe0f362010-03-22 22:09:58 +0000324#ifdef CONFIG_PM
Paul Mundtd53a0d32010-02-17 21:17:02 +0900325static void set_pmb_entry(struct pmb_entry *pmbe)
326{
327 unsigned long flags;
328
Paul Mundtf7fcec92010-10-14 03:49:15 +0900329 raw_spin_lock_irqsave(&pmbe->lock, flags);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900330 __set_pmb_entry(pmbe);
Paul Mundtf7fcec92010-10-14 03:49:15 +0900331 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900332}
Matt Fleming3fe0f362010-03-22 22:09:58 +0000333#endif /* CONFIG_PM */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900334
Paul Mundt90e7d642010-02-23 16:20:53 +0900335int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
336 unsigned long size, pgprot_t prot)
337{
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000338 struct pmb_entry *pmbp, *pmbe;
Paul Mundt281983d2010-03-04 16:44:20 +0900339 unsigned long orig_addr, orig_size;
Paul Mundta1042aa2010-03-03 13:13:25 +0900340 unsigned long flags, pmb_flags;
Paul Mundt90e7d642010-02-23 16:20:53 +0900341 int i, mapped;
Paul Mundt7bdda622010-02-17 13:23:00 +0900342
Paul Mundtdfbca892010-05-11 13:50:29 +0900343 if (size < SZ_16M)
344 return -EINVAL;
Paul Mundt6eb3c732010-03-02 17:22:29 +0900345 if (!pmb_addr_valid(vaddr, size))
346 return -EFAULT;
Paul Mundta1042aa2010-03-03 13:13:25 +0900347 if (pmb_mapping_exists(vaddr, phys, size))
348 return 0;
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900349
Paul Mundt281983d2010-03-04 16:44:20 +0900350 orig_addr = vaddr;
351 orig_size = size;
352
353 flush_tlb_kernel_range(vaddr, vaddr + size);
354
Paul Mundt90e7d642010-02-23 16:20:53 +0900355 pmb_flags = pgprot_to_pmb_flags(prot);
Paul Mundt6eb3c732010-03-02 17:22:29 +0900356 pmbp = NULL;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900357
Paul Mundta1042aa2010-03-03 13:13:25 +0900358 do {
359 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
360 if (size < pmb_sizes[i].size)
361 continue;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900362
Paul Mundta1042aa2010-03-03 13:13:25 +0900363 pmbe = pmb_alloc(vaddr, phys, pmb_flags |
364 pmb_sizes[i].flag, PMB_NO_ENTRY);
365 if (IS_ERR(pmbe)) {
366 pmb_unmap_entry(pmbp, mapped);
367 return PTR_ERR(pmbe);
368 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900369
Paul Mundtf7fcec92010-10-14 03:49:15 +0900370 raw_spin_lock_irqsave(&pmbe->lock, flags);
Paul Mundta1042aa2010-03-03 13:13:25 +0900371
372 pmbe->size = pmb_sizes[i].size;
373
374 __set_pmb_entry(pmbe);
375
376 phys += pmbe->size;
377 vaddr += pmbe->size;
378 size -= pmbe->size;
379
380 /*
381 * Link adjacent entries that span multiple PMB
382 * entries for easier tear-down.
383 */
384 if (likely(pmbp)) {
Paul Mundtf7fcec92010-10-14 03:49:15 +0900385 raw_spin_lock_nested(&pmbp->lock,
386 SINGLE_DEPTH_NESTING);
Paul Mundta1042aa2010-03-03 13:13:25 +0900387 pmbp->link = pmbe;
Paul Mundtf7fcec92010-10-14 03:49:15 +0900388 raw_spin_unlock(&pmbp->lock);
Paul Mundta1042aa2010-03-03 13:13:25 +0900389 }
390
391 pmbp = pmbe;
392
393 /*
394 * Instead of trying smaller sizes on every
395 * iteration (even if we succeed in allocating
396 * space), try using pmb_sizes[i].size again.
397 */
398 i--;
399 mapped++;
400
Paul Mundtf7fcec92010-10-14 03:49:15 +0900401 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000402 }
Paul Mundta1042aa2010-03-03 13:13:25 +0900403 } while (size >= SZ_16M);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900404
Paul Mundt281983d2010-03-04 16:44:20 +0900405 flush_cache_vmap(orig_addr, orig_addr + orig_size);
406
Paul Mundt6eb3c732010-03-02 17:22:29 +0900407 return 0;
408}
409
410void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
411 pgprot_t prot, void *caller)
412{
Paul Mundt281983d2010-03-04 16:44:20 +0900413 unsigned long vaddr;
Paul Mundt6eb3c732010-03-02 17:22:29 +0900414 phys_addr_t offset, last_addr;
415 phys_addr_t align_mask;
416 unsigned long aligned;
417 struct vm_struct *area;
418 int i, ret;
419
420 if (!pmb_iomapping_enabled)
421 return NULL;
422
423 /*
424 * Small mappings need to go through the TLB.
425 */
426 if (size < SZ_16M)
427 return ERR_PTR(-EINVAL);
428 if (!pmb_prot_valid(prot))
429 return ERR_PTR(-EINVAL);
430
431 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
432 if (size >= pmb_sizes[i].size)
433 break;
434
435 last_addr = phys + size;
436 align_mask = ~(pmb_sizes[i].size - 1);
437 offset = phys & ~align_mask;
438 phys &= align_mask;
439 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
440
Paul Mundt281983d2010-03-04 16:44:20 +0900441 /*
442 * XXX: This should really start from uncached_end, but this
443 * causes the MMU to reset, so for now we restrict it to the
444 * 0xb000...0xc000 range.
445 */
446 area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
Paul Mundt6eb3c732010-03-02 17:22:29 +0900447 P3SEG, caller);
448 if (!area)
449 return NULL;
450
451 area->phys_addr = phys;
Paul Mundt281983d2010-03-04 16:44:20 +0900452 vaddr = (unsigned long)area->addr;
Paul Mundt6eb3c732010-03-02 17:22:29 +0900453
454 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
Paul Mundta1042aa2010-03-03 13:13:25 +0900455 if (unlikely(ret != 0))
Paul Mundt6eb3c732010-03-02 17:22:29 +0900456 return ERR_PTR(ret);
457
Paul Mundt281983d2010-03-04 16:44:20 +0900458 return (void __iomem *)(offset + (char *)vaddr);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900459}
460
Paul Mundt90e7d642010-02-23 16:20:53 +0900461int pmb_unmap(void __iomem *addr)
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900462{
Paul Mundtd53a0d32010-02-17 21:17:02 +0900463 struct pmb_entry *pmbe = NULL;
Paul Mundt90e7d642010-02-23 16:20:53 +0900464 unsigned long vaddr = (unsigned long __force)addr;
465 int i, found = 0;
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900466
Paul Mundtd53a0d32010-02-17 21:17:02 +0900467 read_lock(&pmb_rwlock);
468
Matt Flemingedd7de82009-10-06 21:22:29 +0000469 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
Paul Mundt51becfd2010-02-17 15:33:30 +0900470 if (test_bit(i, pmb_map)) {
Matt Flemingedd7de82009-10-06 21:22:29 +0000471 pmbe = &pmb_entry_list[i];
Paul Mundt90e7d642010-02-23 16:20:53 +0900472 if (pmbe->vpn == vaddr) {
473 found = 1;
Matt Flemingedd7de82009-10-06 21:22:29 +0000474 break;
Paul Mundt90e7d642010-02-23 16:20:53 +0900475 }
Matt Flemingedd7de82009-10-06 21:22:29 +0000476 }
477 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900478
479 read_unlock(&pmb_rwlock);
480
Paul Mundt90e7d642010-02-23 16:20:53 +0900481 if (found) {
482 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
483 return 0;
484 }
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900485
Paul Mundt90e7d642010-02-23 16:20:53 +0900486 return -EINVAL;
Paul Mundtd01447b2010-02-18 18:13:51 +0900487}
488
489static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
490{
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900491 do {
492 struct pmb_entry *pmblink = pmbe;
493
Matt Fleming067784f2009-10-06 21:22:23 +0000494 /*
495 * We may be called before this pmb_entry has been
496 * entered into the PMB table via set_pmb_entry(), but
497 * that's OK because we've allocated a unique slot for
498 * this entry in pmb_alloc() (even if we haven't filled
499 * it yet).
500 *
Paul Mundtd53a0d32010-02-17 21:17:02 +0900501 * Therefore, calling __clear_pmb_entry() is safe as no
Matt Fleming067784f2009-10-06 21:22:23 +0000502 * other mapping can be using that slot.
503 */
Paul Mundtd53a0d32010-02-17 21:17:02 +0900504 __clear_pmb_entry(pmbe);
Matt Flemingfc2bdef2009-10-06 21:22:22 +0000505
Paul Mundt281983d2010-03-04 16:44:20 +0900506 flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
507
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900508 pmbe = pmblink->link;
509
510 pmb_free(pmblink);
Paul Mundtd01447b2010-02-18 18:13:51 +0900511 } while (pmbe && --depth);
512}
Paul Mundtd53a0d32010-02-17 21:17:02 +0900513
Paul Mundtd01447b2010-02-18 18:13:51 +0900514static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
515{
516 unsigned long flags;
517
518 if (unlikely(!pmbe))
519 return;
520
521 write_lock_irqsave(&pmb_rwlock, flags);
522 __pmb_unmap_entry(pmbe, depth);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900523 write_unlock_irqrestore(&pmb_rwlock, flags);
Paul Mundtd7cdc9e2006-09-27 15:16:42 +0900524}
525
Paul Mundtd01447b2010-02-18 18:13:51 +0900526static void __init pmb_notify(void)
Matt Fleming20b50142009-10-06 21:22:33 +0000527{
Paul Mundtd01447b2010-02-18 18:13:51 +0900528 int i;
Matt Fleming3d467672010-01-18 19:33:10 +0900529
Paul Mundtefd54ea2010-02-16 18:39:30 +0900530 pr_info("PMB: boot mappings:\n");
Matt Fleming3d467672010-01-18 19:33:10 +0900531
Paul Mundtd01447b2010-02-18 18:13:51 +0900532 read_lock(&pmb_rwlock);
533
534 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
535 struct pmb_entry *pmbe;
536
537 if (!test_bit(i, pmb_map))
538 continue;
539
540 pmbe = &pmb_entry_list[i];
541
542 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
543 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
544 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
545 }
546
547 read_unlock(&pmb_rwlock);
548}
549
550/*
551 * Sync our software copy of the PMB mappings with those in hardware. The
552 * mappings in the hardware PMB were either set up by the bootloader or
553 * very early on by the kernel.
554 */
555static void __init pmb_synchronize(void)
556{
557 struct pmb_entry *pmbp = NULL;
558 int i, j;
559
Matt Fleming3d467672010-01-18 19:33:10 +0900560 /*
Paul Mundtefd54ea2010-02-16 18:39:30 +0900561 * Run through the initial boot mappings, log the established
562 * ones, and blow away anything that falls outside of the valid
563 * PPN range. Specifically, we only care about existing mappings
564 * that impact the cached/uncached sections.
Matt Fleming3d467672010-01-18 19:33:10 +0900565 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900566 * Note that touching these can be a bit of a minefield; the boot
567 * loader can establish multi-page mappings with the same caching
568 * attributes, so we need to ensure that we aren't modifying a
569 * mapping that we're presently executing from, or may execute
570 * from in the case of straddling page boundaries.
Matt Fleming3d467672010-01-18 19:33:10 +0900571 *
Paul Mundtefd54ea2010-02-16 18:39:30 +0900572 * In the future we will have to tidy up after the boot loader by
573 * jumping between the cached and uncached mappings and tearing
574 * down alternating mappings while executing from the other.
Matt Fleming3d467672010-01-18 19:33:10 +0900575 */
Paul Mundt51becfd2010-02-17 15:33:30 +0900576 for (i = 0; i < NR_PMB_ENTRIES; i++) {
Matt Fleming3d467672010-01-18 19:33:10 +0900577 unsigned long addr, data;
578 unsigned long addr_val, data_val;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900579 unsigned long ppn, vpn, flags;
Paul Mundtd53a0d32010-02-17 21:17:02 +0900580 unsigned long irqflags;
Paul Mundtd7813bc2010-02-17 17:56:38 +0900581 unsigned int size;
Paul Mundtefd54ea2010-02-16 18:39:30 +0900582 struct pmb_entry *pmbe;
Matt Fleming3d467672010-01-18 19:33:10 +0900583
584 addr = mk_pmb_addr(i);
585 data = mk_pmb_data(i);
586
587 addr_val = __raw_readl(addr);
588 data_val = __raw_readl(data);
589
590 /*
591 * Skip over any bogus entries
592 */
593 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
594 continue;
595
596 ppn = data_val & PMB_PFN_MASK;
597 vpn = addr_val & PMB_PFN_MASK;
598
599 /*
600 * Only preserve in-range mappings.
601 */
Paul Mundtefd54ea2010-02-16 18:39:30 +0900602 if (!pmb_ppn_in_range(ppn)) {
Matt Fleming3d467672010-01-18 19:33:10 +0900603 /*
604 * Invalidate anything out of bounds.
605 */
Paul Mundt2e450642010-02-18 13:26:05 +0900606 writel_uncached(addr_val & ~PMB_V, addr);
607 writel_uncached(data_val & ~PMB_V, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900608 continue;
Matt Fleming3d467672010-01-18 19:33:10 +0900609 }
Paul Mundtefd54ea2010-02-16 18:39:30 +0900610
611 /*
612 * Update the caching attributes if necessary
613 */
614 if (data_val & PMB_C) {
Paul Mundt0065b962010-02-17 18:05:23 +0900615 data_val &= ~PMB_CACHE_MASK;
616 data_val |= pmb_cache_flags();
Paul Mundt2e450642010-02-18 13:26:05 +0900617
618 writel_uncached(data_val, data);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900619 }
620
Paul Mundtd7813bc2010-02-17 17:56:38 +0900621 size = data_val & PMB_SZ_MASK;
622 flags = size | (data_val & PMB_CACHE_MASK);
Paul Mundtefd54ea2010-02-16 18:39:30 +0900623
624 pmbe = pmb_alloc(vpn, ppn, flags, i);
625 if (IS_ERR(pmbe)) {
626 WARN_ON_ONCE(1);
627 continue;
628 }
629
Paul Mundtf7fcec92010-10-14 03:49:15 +0900630 raw_spin_lock_irqsave(&pmbe->lock, irqflags);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900631
Paul Mundtd7813bc2010-02-17 17:56:38 +0900632 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
633 if (pmb_sizes[j].flag == size)
634 pmbe->size = pmb_sizes[j].size;
635
Paul Mundtd53a0d32010-02-17 21:17:02 +0900636 if (pmbp) {
Paul Mundtf7fcec92010-10-14 03:49:15 +0900637 raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900638 /*
639 * Compare the previous entry against the current one to
640 * see if the entries span a contiguous mapping. If so,
Paul Mundtd01447b2010-02-18 18:13:51 +0900641 * setup the entry links accordingly. Compound mappings
642 * are later coalesced.
Paul Mundtd53a0d32010-02-17 21:17:02 +0900643 */
Paul Mundtd01447b2010-02-18 18:13:51 +0900644 if (pmb_can_merge(pmbp, pmbe))
Paul Mundtd53a0d32010-02-17 21:17:02 +0900645 pmbp->link = pmbe;
Paul Mundtf7fcec92010-10-14 03:49:15 +0900646 raw_spin_unlock(&pmbp->lock);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900647 }
Paul Mundtd7813bc2010-02-17 17:56:38 +0900648
649 pmbp = pmbe;
650
Paul Mundtf7fcec92010-10-14 03:49:15 +0900651 raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
Matt Fleming3d467672010-01-18 19:33:10 +0900652 }
Matt Fleming3d467672010-01-18 19:33:10 +0900653}
Matt Fleming3d467672010-01-18 19:33:10 +0900654
Paul Mundtd01447b2010-02-18 18:13:51 +0900655static void __init pmb_merge(struct pmb_entry *head)
Matt Fleming3d467672010-01-18 19:33:10 +0900656{
Paul Mundtd01447b2010-02-18 18:13:51 +0900657 unsigned long span, newsize;
658 struct pmb_entry *tail;
659 int i = 1, depth = 0;
660
661 span = newsize = head->size;
662
663 tail = head->link;
664 while (tail) {
665 span += tail->size;
666
667 if (pmb_size_valid(span)) {
668 newsize = span;
669 depth = i;
670 }
671
672 /* This is the end of the line.. */
673 if (!tail->link)
674 break;
675
676 tail = tail->link;
677 i++;
678 }
Matt Fleming20b50142009-10-06 21:22:33 +0000679
Matt Fleming3d467672010-01-18 19:33:10 +0900680 /*
Paul Mundtd01447b2010-02-18 18:13:51 +0900681 * The merged page size must be valid.
Matt Fleming3d467672010-01-18 19:33:10 +0900682 */
Matt Flemingc7b03fa2010-04-25 17:29:07 +0100683 if (!depth || !pmb_size_valid(newsize))
Paul Mundtd01447b2010-02-18 18:13:51 +0900684 return;
685
686 head->flags &= ~PMB_SZ_MASK;
687 head->flags |= pmb_size_to_flags(newsize);
688
689 head->size = newsize;
690
691 __pmb_unmap_entry(head->link, depth);
692 __set_pmb_entry(head);
693}
694
695static void __init pmb_coalesce(void)
696{
697 unsigned long flags;
698 int i;
699
700 write_lock_irqsave(&pmb_rwlock, flags);
701
702 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
703 struct pmb_entry *pmbe;
704
705 if (!test_bit(i, pmb_map))
706 continue;
707
708 pmbe = &pmb_entry_list[i];
709
710 /*
711 * We're only interested in compound mappings
712 */
713 if (!pmbe->link)
714 continue;
715
716 /*
717 * Nothing to do if it already uses the largest possible
718 * page size.
719 */
720 if (pmbe->size == SZ_512M)
721 continue;
722
723 pmb_merge(pmbe);
724 }
725
726 write_unlock_irqrestore(&pmb_rwlock, flags);
727}
728
729#ifdef CONFIG_UNCACHED_MAPPING
730static void __init pmb_resize(void)
731{
732 int i;
733
734 /*
735 * If the uncached mapping was constructed by the kernel, it will
736 * already be a reasonable size.
737 */
738 if (uncached_size == SZ_16M)
739 return;
740
741 read_lock(&pmb_rwlock);
742
743 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
744 struct pmb_entry *pmbe;
745 unsigned long flags;
746
747 if (!test_bit(i, pmb_map))
748 continue;
749
750 pmbe = &pmb_entry_list[i];
751
752 if (pmbe->vpn != uncached_start)
753 continue;
754
755 /*
756 * Found it, now resize it.
757 */
Paul Mundtf7fcec92010-10-14 03:49:15 +0900758 raw_spin_lock_irqsave(&pmbe->lock, flags);
Paul Mundtd01447b2010-02-18 18:13:51 +0900759
760 pmbe->size = SZ_16M;
761 pmbe->flags &= ~PMB_SZ_MASK;
762 pmbe->flags |= pmb_size_to_flags(pmbe->size);
763
764 uncached_resize(pmbe->size);
765
766 __set_pmb_entry(pmbe);
767
Paul Mundtf7fcec92010-10-14 03:49:15 +0900768 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
Paul Mundtd01447b2010-02-18 18:13:51 +0900769 }
770
Julia Lawall0e6f9892010-06-20 11:24:54 +0000771 read_unlock(&pmb_rwlock);
Paul Mundtd01447b2010-02-18 18:13:51 +0900772}
773#endif
774
Paul Mundt4cfa8e72010-03-02 16:49:50 +0900775static int __init early_pmb(char *p)
776{
777 if (!p)
778 return 0;
779
780 if (strstr(p, "iomap"))
781 pmb_iomapping_enabled = 1;
782
783 return 0;
784}
785early_param("pmb", early_pmb);
786
Paul Mundtd01447b2010-02-18 18:13:51 +0900787void __init pmb_init(void)
788{
789 /* Synchronize software state */
790 pmb_synchronize();
791
792 /* Attempt to combine compound mappings */
793 pmb_coalesce();
794
795#ifdef CONFIG_UNCACHED_MAPPING
796 /* Resize initial mappings, if necessary */
797 pmb_resize();
798#endif
799
800 /* Log them */
801 pmb_notify();
Matt Fleming20b50142009-10-06 21:22:33 +0000802
Paul Mundt2e450642010-02-18 13:26:05 +0900803 writel_uncached(0, PMB_IRMCR);
Paul Mundta0ab3662010-01-13 18:31:48 +0900804
Paul Mundta0ab3662010-01-13 18:31:48 +0900805 /* Flush out the TLB */
Matt Flemingb5b6c7e2010-03-21 19:51:52 +0000806 local_flush_tlb_all();
Paul Mundt2e450642010-02-18 13:26:05 +0900807 ctrl_barrier();
Matt Fleming20b50142009-10-06 21:22:33 +0000808}
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900809
Paul Mundt2efa53b2010-01-20 16:40:48 +0900810bool __in_29bit_mode(void)
811{
812 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
813}
814
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900815static int pmb_seq_show(struct seq_file *file, void *iter)
816{
817 int i;
818
819 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
820 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
821 seq_printf(file, "ety vpn ppn size flags\n");
822
823 for (i = 0; i < NR_PMB_ENTRIES; i++) {
824 unsigned long addr, data;
825 unsigned int size;
826 char *sz_str = NULL;
827
Paul Mundt9d56dd32010-01-26 12:58:40 +0900828 addr = __raw_readl(mk_pmb_addr(i));
829 data = __raw_readl(mk_pmb_data(i));
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900830
831 size = data & PMB_SZ_MASK;
832 sz_str = (size == PMB_SZ_16M) ? " 16MB":
833 (size == PMB_SZ_64M) ? " 64MB":
834 (size == PMB_SZ_128M) ? "128MB":
835 "512MB";
836
837 /* 02: V 0x88 0x08 128MB C CB B */
838 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
839 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
840 (addr >> 24) & 0xff, (data >> 24) & 0xff,
841 sz_str, (data & PMB_C) ? 'C' : ' ',
842 (data & PMB_WT) ? "WT" : "CB",
843 (data & PMB_UB) ? "UB" : " B");
844 }
845
846 return 0;
847}
848
849static int pmb_debugfs_open(struct inode *inode, struct file *file)
850{
851 return single_open(file, pmb_seq_show, NULL);
852}
853
Arjan van de Ven5dfe4c92007-02-12 00:55:31 -0800854static const struct file_operations pmb_debugfs_fops = {
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900855 .owner = THIS_MODULE,
856 .open = pmb_debugfs_open,
857 .read = seq_read,
858 .llseek = seq_lseek,
Li Zefan45dabf12008-06-24 13:30:23 +0800859 .release = single_release,
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900860};
861
862static int __init pmb_debugfs_init(void)
863{
864 struct dentry *dentry;
865
866 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
Paul Mundt3f224f42010-09-24 04:04:26 +0900867 arch_debugfs_dir, NULL, &pmb_debugfs_fops);
Zhaolei25627c72008-10-17 19:25:09 +0800868 if (!dentry)
869 return -ENOMEM;
Paul Mundt0c7b1df2006-09-27 15:08:07 +0900870
871 return 0;
872}
Pawel Moll62c8cbb2010-02-19 10:26:31 +0000873subsys_initcall(pmb_debugfs_init);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000874
875#ifdef CONFIG_PM
Paul Mundtd4cc1832011-03-23 19:05:18 +0900876static void pmb_syscore_resume(void)
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000877{
Paul Mundtd4cc1832011-03-23 19:05:18 +0900878 struct pmb_entry *pmbe;
Matt Flemingedd7de82009-10-06 21:22:29 +0000879 int i;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000880
Paul Mundtd4cc1832011-03-23 19:05:18 +0900881 read_lock(&pmb_rwlock);
Paul Mundtd53a0d32010-02-17 21:17:02 +0900882
Paul Mundtd4cc1832011-03-23 19:05:18 +0900883 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
884 if (test_bit(i, pmb_map)) {
885 pmbe = &pmb_entry_list[i];
886 set_pmb_entry(pmbe);
Matt Flemingedd7de82009-10-06 21:22:29 +0000887 }
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000888 }
Paul Mundtd53a0d32010-02-17 21:17:02 +0900889
Paul Mundtd4cc1832011-03-23 19:05:18 +0900890 read_unlock(&pmb_rwlock);
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000891}
892
Paul Mundtd4cc1832011-03-23 19:05:18 +0900893static struct syscore_ops pmb_syscore_ops = {
894 .resume = pmb_syscore_resume,
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000895};
896
897static int __init pmb_sysdev_init(void)
898{
Paul Mundtd4cc1832011-03-23 19:05:18 +0900899 register_syscore_ops(&pmb_syscore_ops);
900 return 0;
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000901}
Francesco VIRLINZIa83c0b72009-03-11 10:39:02 +0000902subsys_initcall(pmb_sysdev_init);
903#endif