Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/mm/pmb.c |
| 3 | * |
| 4 | * Privileged Space Mapping Buffer (PMB) Support. |
| 5 | * |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 6 | * Copyright (C) 2005, 2006 Paul Mundt |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 7 | * |
| 8 | * P1/P2 Section mapping definitions from map32.h, which was: |
| 9 | * |
| 10 | * Copyright 2003 (c) Lineo Solutions,Inc. |
| 11 | * |
| 12 | * This file is subject to the terms and conditions of the GNU General Public |
| 13 | * License. See the file "COPYING" in the main directory of this archive |
| 14 | * for more details. |
| 15 | */ |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/bitops.h> |
| 21 | #include <linux/debugfs.h> |
| 22 | #include <linux/fs.h> |
| 23 | #include <linux/seq_file.h> |
| 24 | #include <linux/err.h> |
| 25 | #include <asm/system.h> |
| 26 | #include <asm/uaccess.h> |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 27 | #include <asm/pgtable.h> |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 28 | #include <asm/mmu.h> |
| 29 | #include <asm/io.h> |
| 30 | |
| 31 | #define NR_PMB_ENTRIES 16 |
| 32 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 33 | static struct kmem_cache *pmb_cache; |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 34 | static unsigned long pmb_map; |
| 35 | |
| 36 | static struct pmb_entry pmb_init_map[] = { |
| 37 | /* vpn ppn flags (ub/sz/c/wt) */ |
| 38 | |
| 39 | /* P1 Section Mappings */ |
| 40 | { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, |
| 41 | { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, |
| 42 | { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, |
| 43 | { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, |
| 44 | { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, |
| 45 | { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, |
| 46 | |
| 47 | /* P2 Section Mappings */ |
| 48 | { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, |
| 49 | { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, |
| 50 | { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, |
| 51 | { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, |
| 52 | { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, |
| 53 | { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, |
| 54 | }; |
| 55 | |
| 56 | static inline unsigned long mk_pmb_entry(unsigned int entry) |
| 57 | { |
| 58 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
| 59 | } |
| 60 | |
| 61 | static inline unsigned long mk_pmb_addr(unsigned int entry) |
| 62 | { |
| 63 | return mk_pmb_entry(entry) | PMB_ADDR; |
| 64 | } |
| 65 | |
| 66 | static inline unsigned long mk_pmb_data(unsigned int entry) |
| 67 | { |
| 68 | return mk_pmb_entry(entry) | PMB_DATA; |
| 69 | } |
| 70 | |
| 71 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, |
| 72 | unsigned long flags) |
| 73 | { |
| 74 | struct pmb_entry *pmbe; |
| 75 | |
| 76 | pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); |
| 77 | if (!pmbe) |
| 78 | return ERR_PTR(-ENOMEM); |
| 79 | |
| 80 | pmbe->vpn = vpn; |
| 81 | pmbe->ppn = ppn; |
| 82 | pmbe->flags = flags; |
| 83 | |
| 84 | return pmbe; |
| 85 | } |
| 86 | |
| 87 | void pmb_free(struct pmb_entry *pmbe) |
| 88 | { |
| 89 | kmem_cache_free(pmb_cache, pmbe); |
| 90 | } |
| 91 | |
| 92 | /* |
| 93 | * Must be in P2 for __set_pmb_entry() |
| 94 | */ |
| 95 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, |
| 96 | unsigned long flags, int *entry) |
| 97 | { |
| 98 | unsigned int pos = *entry; |
| 99 | |
| 100 | if (unlikely(pos == PMB_NO_ENTRY)) |
| 101 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); |
| 102 | |
| 103 | repeat: |
| 104 | if (unlikely(pos > NR_PMB_ENTRIES)) |
| 105 | return -ENOSPC; |
| 106 | |
| 107 | if (test_and_set_bit(pos, &pmb_map)) { |
| 108 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); |
| 109 | goto repeat; |
| 110 | } |
| 111 | |
| 112 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); |
| 113 | |
| 114 | #ifdef CONFIG_SH_WRITETHROUGH |
| 115 | /* |
| 116 | * When we are in 32-bit address extended mode, CCR.CB becomes |
| 117 | * invalid, so care must be taken to manually adjust cacheable |
| 118 | * translations. |
| 119 | */ |
| 120 | if (likely(flags & PMB_C)) |
| 121 | flags |= PMB_WT; |
| 122 | #endif |
| 123 | |
| 124 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); |
| 125 | |
| 126 | *entry = pos; |
| 127 | |
| 128 | return 0; |
| 129 | } |
| 130 | |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 131 | int set_pmb_entry(struct pmb_entry *pmbe) |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 132 | { |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 133 | int ret; |
| 134 | |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 135 | jump_to_P2(); |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 136 | ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 137 | back_to_P1(); |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 138 | |
| 139 | return ret; |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | void clear_pmb_entry(struct pmb_entry *pmbe) |
| 143 | { |
| 144 | unsigned int entry = pmbe->entry; |
| 145 | unsigned long addr; |
| 146 | |
| 147 | /* |
| 148 | * Don't allow clearing of wired init entries, P1 or P2 access |
| 149 | * without a corresponding mapping in the PMB will lead to reset |
| 150 | * by the TLB. |
| 151 | */ |
| 152 | if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || |
| 153 | entry >= NR_PMB_ENTRIES)) |
| 154 | return; |
| 155 | |
| 156 | jump_to_P2(); |
| 157 | |
| 158 | /* Clear V-bit */ |
| 159 | addr = mk_pmb_addr(entry); |
| 160 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); |
| 161 | |
| 162 | addr = mk_pmb_data(entry); |
| 163 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); |
| 164 | |
| 165 | back_to_P1(); |
| 166 | |
| 167 | clear_bit(entry, &pmb_map); |
| 168 | } |
| 169 | |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 170 | static DEFINE_SPINLOCK(pmb_list_lock); |
| 171 | static struct pmb_entry *pmb_list; |
| 172 | |
| 173 | static inline void pmb_list_add(struct pmb_entry *pmbe) |
| 174 | { |
| 175 | struct pmb_entry **p, *tmp; |
| 176 | |
| 177 | p = &pmb_list; |
| 178 | while ((tmp = *p) != NULL) |
| 179 | p = &tmp->next; |
| 180 | |
| 181 | pmbe->next = tmp; |
| 182 | *p = pmbe; |
| 183 | } |
| 184 | |
| 185 | static inline void pmb_list_del(struct pmb_entry *pmbe) |
| 186 | { |
| 187 | struct pmb_entry **p, *tmp; |
| 188 | |
| 189 | for (p = &pmb_list; (tmp = *p); p = &tmp->next) |
| 190 | if (tmp == pmbe) { |
| 191 | *p = tmp->next; |
| 192 | return; |
| 193 | } |
| 194 | } |
| 195 | |
| 196 | static struct { |
| 197 | unsigned long size; |
| 198 | int flag; |
| 199 | } pmb_sizes[] = { |
| 200 | { .size = 0x20000000, .flag = PMB_SZ_512M, }, |
| 201 | { .size = 0x08000000, .flag = PMB_SZ_128M, }, |
| 202 | { .size = 0x04000000, .flag = PMB_SZ_64M, }, |
| 203 | { .size = 0x01000000, .flag = PMB_SZ_16M, }, |
| 204 | }; |
| 205 | |
| 206 | long pmb_remap(unsigned long vaddr, unsigned long phys, |
| 207 | unsigned long size, unsigned long flags) |
| 208 | { |
| 209 | struct pmb_entry *pmbp; |
| 210 | unsigned long wanted; |
| 211 | int pmb_flags, i; |
| 212 | |
| 213 | /* Convert typical pgprot value to the PMB equivalent */ |
| 214 | if (flags & _PAGE_CACHABLE) { |
| 215 | if (flags & _PAGE_WT) |
| 216 | pmb_flags = PMB_WT; |
| 217 | else |
| 218 | pmb_flags = PMB_C; |
| 219 | } else |
| 220 | pmb_flags = PMB_WT | PMB_UB; |
| 221 | |
| 222 | pmbp = NULL; |
| 223 | wanted = size; |
| 224 | |
| 225 | again: |
| 226 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
| 227 | struct pmb_entry *pmbe; |
| 228 | int ret; |
| 229 | |
| 230 | if (size < pmb_sizes[i].size) |
| 231 | continue; |
| 232 | |
| 233 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); |
| 234 | if (IS_ERR(pmbe)) |
| 235 | return PTR_ERR(pmbe); |
| 236 | |
| 237 | ret = set_pmb_entry(pmbe); |
| 238 | if (ret != 0) { |
| 239 | pmb_free(pmbe); |
| 240 | return -EBUSY; |
| 241 | } |
| 242 | |
| 243 | phys += pmb_sizes[i].size; |
| 244 | vaddr += pmb_sizes[i].size; |
| 245 | size -= pmb_sizes[i].size; |
| 246 | |
| 247 | /* |
| 248 | * Link adjacent entries that span multiple PMB entries |
| 249 | * for easier tear-down. |
| 250 | */ |
| 251 | if (likely(pmbp)) |
| 252 | pmbp->link = pmbe; |
| 253 | |
| 254 | pmbp = pmbe; |
| 255 | } |
| 256 | |
| 257 | if (size >= 0x1000000) |
| 258 | goto again; |
| 259 | |
| 260 | return wanted - size; |
| 261 | } |
| 262 | |
| 263 | void pmb_unmap(unsigned long addr) |
| 264 | { |
| 265 | struct pmb_entry **p, *pmbe; |
| 266 | |
| 267 | for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) |
| 268 | if (pmbe->vpn == addr) |
| 269 | break; |
| 270 | |
| 271 | if (unlikely(!pmbe)) |
| 272 | return; |
| 273 | |
| 274 | WARN_ON(!test_bit(pmbe->entry, &pmb_map)); |
| 275 | |
| 276 | do { |
| 277 | struct pmb_entry *pmblink = pmbe; |
| 278 | |
| 279 | clear_pmb_entry(pmbe); |
| 280 | pmbe = pmblink->link; |
| 281 | |
| 282 | pmb_free(pmblink); |
| 283 | } while (pmbe); |
| 284 | } |
| 285 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 286 | static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags) |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 287 | { |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 288 | struct pmb_entry *pmbe = pmb; |
| 289 | |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 290 | memset(pmb, 0, sizeof(struct pmb_entry)); |
| 291 | |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 292 | spin_lock_irq(&pmb_list_lock); |
| 293 | |
| 294 | pmbe->entry = PMB_NO_ENTRY; |
| 295 | pmb_list_add(pmbe); |
| 296 | |
| 297 | spin_unlock_irq(&pmb_list_lock); |
| 298 | } |
| 299 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 300 | static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags) |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 301 | { |
| 302 | spin_lock_irq(&pmb_list_lock); |
| 303 | pmb_list_del(pmb); |
| 304 | spin_unlock_irq(&pmb_list_lock); |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 305 | } |
| 306 | |
| 307 | static int __init pmb_init(void) |
| 308 | { |
| 309 | unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); |
| 310 | unsigned int entry; |
| 311 | |
| 312 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); |
| 313 | |
| 314 | pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), |
Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 315 | 0, 0, pmb_cache_ctor, pmb_cache_dtor); |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 316 | BUG_ON(!pmb_cache); |
| 317 | |
| 318 | jump_to_P2(); |
| 319 | |
| 320 | /* |
| 321 | * Ordering is important, P2 must be mapped in the PMB before we |
| 322 | * can set PMB.SE, and P1 must be mapped before we jump back to |
| 323 | * P1 space. |
| 324 | */ |
| 325 | for (entry = 0; entry < nr_entries; entry++) { |
| 326 | struct pmb_entry *pmbe = pmb_init_map + entry; |
| 327 | |
| 328 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); |
| 329 | } |
| 330 | |
| 331 | ctrl_outl(0, PMB_IRMCR); |
| 332 | |
| 333 | /* PMB.SE and UB[7] */ |
| 334 | ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); |
| 335 | |
| 336 | back_to_P1(); |
| 337 | |
| 338 | return 0; |
| 339 | } |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 340 | arch_initcall(pmb_init); |
| 341 | |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 342 | static int pmb_seq_show(struct seq_file *file, void *iter) |
| 343 | { |
| 344 | int i; |
| 345 | |
| 346 | seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n" |
| 347 | "CB: Copy-Back, B: Buffered, UB: Unbuffered\n"); |
| 348 | seq_printf(file, "ety vpn ppn size flags\n"); |
| 349 | |
| 350 | for (i = 0; i < NR_PMB_ENTRIES; i++) { |
| 351 | unsigned long addr, data; |
| 352 | unsigned int size; |
| 353 | char *sz_str = NULL; |
| 354 | |
| 355 | addr = ctrl_inl(mk_pmb_addr(i)); |
| 356 | data = ctrl_inl(mk_pmb_data(i)); |
| 357 | |
| 358 | size = data & PMB_SZ_MASK; |
| 359 | sz_str = (size == PMB_SZ_16M) ? " 16MB": |
| 360 | (size == PMB_SZ_64M) ? " 64MB": |
| 361 | (size == PMB_SZ_128M) ? "128MB": |
| 362 | "512MB"; |
| 363 | |
| 364 | /* 02: V 0x88 0x08 128MB C CB B */ |
| 365 | seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n", |
| 366 | i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ', |
| 367 | (addr >> 24) & 0xff, (data >> 24) & 0xff, |
| 368 | sz_str, (data & PMB_C) ? 'C' : ' ', |
| 369 | (data & PMB_WT) ? "WT" : "CB", |
| 370 | (data & PMB_UB) ? "UB" : " B"); |
| 371 | } |
| 372 | |
| 373 | return 0; |
| 374 | } |
| 375 | |
| 376 | static int pmb_debugfs_open(struct inode *inode, struct file *file) |
| 377 | { |
| 378 | return single_open(file, pmb_seq_show, NULL); |
| 379 | } |
| 380 | |
| 381 | static struct file_operations pmb_debugfs_fops = { |
| 382 | .owner = THIS_MODULE, |
| 383 | .open = pmb_debugfs_open, |
| 384 | .read = seq_read, |
| 385 | .llseek = seq_lseek, |
| 386 | .release = seq_release, |
| 387 | }; |
| 388 | |
| 389 | static int __init pmb_debugfs_init(void) |
| 390 | { |
| 391 | struct dentry *dentry; |
| 392 | |
| 393 | dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, |
| 394 | NULL, NULL, &pmb_debugfs_fops); |
| 395 | if (IS_ERR(dentry)) |
| 396 | return PTR_ERR(dentry); |
| 397 | |
| 398 | return 0; |
| 399 | } |
Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 400 | postcore_initcall(pmb_debugfs_init); |