Konrad Eisele | 5213a78 | 2009-08-17 00:13:29 +0000 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/sparc/mm/leon_m.c |
| 3 | * |
| 4 | * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research |
| 5 | * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB |
| 6 | * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB |
| 7 | * |
| 8 | * do srmmu probe in software |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <asm/asi.h> |
| 15 | #include <asm/leon.h> |
| 16 | #include <asm/tlbflush.h> |
| 17 | |
| 18 | int leon_flush_during_switch = 1; |
| 19 | int srmmu_swprobe_trace; |
| 20 | |
| 21 | unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr) |
| 22 | { |
| 23 | |
| 24 | unsigned int ctxtbl; |
| 25 | unsigned int pgd, pmd, ped; |
| 26 | unsigned int ptr; |
| 27 | unsigned int lvl, pte, paddrbase; |
| 28 | unsigned int ctx; |
| 29 | unsigned int paddr_calc; |
| 30 | |
| 31 | paddrbase = 0; |
| 32 | |
| 33 | if (srmmu_swprobe_trace) |
| 34 | printk(KERN_INFO "swprobe: trace on\n"); |
| 35 | |
| 36 | ctxtbl = srmmu_get_ctable_ptr(); |
| 37 | if (!(ctxtbl)) { |
| 38 | if (srmmu_swprobe_trace) |
| 39 | printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n"); |
| 40 | return 0; |
| 41 | } |
| 42 | if (!_pfn_valid(PFN(ctxtbl))) { |
| 43 | if (srmmu_swprobe_trace) |
| 44 | printk(KERN_INFO |
| 45 | "swprobe: !_pfn_valid(%x)=>0\n", |
| 46 | PFN(ctxtbl)); |
| 47 | return 0; |
| 48 | } |
| 49 | |
| 50 | ctx = srmmu_get_context(); |
| 51 | if (srmmu_swprobe_trace) |
| 52 | printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx); |
| 53 | |
| 54 | pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4)); |
| 55 | |
| 56 | if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { |
| 57 | if (srmmu_swprobe_trace) |
| 58 | printk(KERN_INFO "swprobe: pgd is entry level 3\n"); |
| 59 | lvl = 3; |
| 60 | pte = pgd; |
| 61 | paddrbase = pgd & _SRMMU_PTE_PMASK_LEON; |
| 62 | goto ready; |
| 63 | } |
| 64 | if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { |
| 65 | if (srmmu_swprobe_trace) |
| 66 | printk(KERN_INFO "swprobe: pgd is invalid => 0\n"); |
| 67 | return 0; |
| 68 | } |
| 69 | |
| 70 | if (srmmu_swprobe_trace) |
| 71 | printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd); |
| 72 | |
| 73 | ptr = (pgd & SRMMU_PTD_PMASK) << 4; |
| 74 | ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4); |
| 75 | if (!_pfn_valid(PFN(ptr))) |
| 76 | return 0; |
| 77 | |
| 78 | pmd = LEON_BYPASS_LOAD_PA(ptr); |
| 79 | if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { |
| 80 | if (srmmu_swprobe_trace) |
| 81 | printk(KERN_INFO "swprobe: pmd is entry level 2\n"); |
| 82 | lvl = 2; |
| 83 | pte = pmd; |
| 84 | paddrbase = pmd & _SRMMU_PTE_PMASK_LEON; |
| 85 | goto ready; |
| 86 | } |
| 87 | if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { |
| 88 | if (srmmu_swprobe_trace) |
| 89 | printk(KERN_INFO "swprobe: pmd is invalid => 0\n"); |
| 90 | return 0; |
| 91 | } |
| 92 | |
| 93 | if (srmmu_swprobe_trace) |
| 94 | printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd); |
| 95 | |
| 96 | ptr = (pmd & SRMMU_PTD_PMASK) << 4; |
| 97 | ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4); |
| 98 | if (!_pfn_valid(PFN(ptr))) { |
| 99 | if (srmmu_swprobe_trace) |
| 100 | printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n", |
| 101 | PFN(ptr)); |
| 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | ped = LEON_BYPASS_LOAD_PA(ptr); |
| 106 | |
| 107 | if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { |
| 108 | if (srmmu_swprobe_trace) |
| 109 | printk(KERN_INFO "swprobe: ped is entry level 1\n"); |
| 110 | lvl = 1; |
| 111 | pte = ped; |
| 112 | paddrbase = ped & _SRMMU_PTE_PMASK_LEON; |
| 113 | goto ready; |
| 114 | } |
| 115 | if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { |
| 116 | if (srmmu_swprobe_trace) |
| 117 | printk(KERN_INFO "swprobe: ped is invalid => 0\n"); |
| 118 | return 0; |
| 119 | } |
| 120 | |
| 121 | if (srmmu_swprobe_trace) |
| 122 | printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped); |
| 123 | |
| 124 | ptr = (ped & SRMMU_PTD_PMASK) << 4; |
| 125 | ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4); |
| 126 | if (!_pfn_valid(PFN(ptr))) |
| 127 | return 0; |
| 128 | |
| 129 | ptr = LEON_BYPASS_LOAD_PA(ptr); |
| 130 | if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { |
| 131 | if (srmmu_swprobe_trace) |
| 132 | printk(KERN_INFO "swprobe: ptr is entry level 0\n"); |
| 133 | lvl = 0; |
| 134 | pte = ptr; |
| 135 | paddrbase = ptr & _SRMMU_PTE_PMASK_LEON; |
| 136 | goto ready; |
| 137 | } |
| 138 | if (srmmu_swprobe_trace) |
| 139 | printk(KERN_INFO "swprobe: ptr is invalid => 0\n"); |
| 140 | return 0; |
| 141 | |
| 142 | ready: |
| 143 | switch (lvl) { |
| 144 | case 0: |
| 145 | paddr_calc = |
| 146 | (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4); |
| 147 | break; |
| 148 | case 1: |
| 149 | paddr_calc = |
| 150 | (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4); |
| 151 | break; |
| 152 | case 2: |
| 153 | paddr_calc = |
| 154 | (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4); |
| 155 | break; |
| 156 | default: |
| 157 | case 3: |
| 158 | paddr_calc = vaddr; |
| 159 | break; |
| 160 | } |
| 161 | if (srmmu_swprobe_trace) |
| 162 | printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); |
| 163 | if (paddr) |
| 164 | *paddr = paddr_calc; |
| 165 | return paddrbase; |
| 166 | } |
| 167 | |
| 168 | void leon_flush_icache_all(void) |
| 169 | { |
| 170 | __asm__ __volatile__(" flush "); /*iflush*/ |
| 171 | } |
| 172 | |
| 173 | void leon_flush_dcache_all(void) |
| 174 | { |
| 175 | __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : |
| 176 | "i"(ASI_LEON_DFLUSH) : "memory"); |
| 177 | } |
| 178 | |
| 179 | void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page) |
| 180 | { |
| 181 | if (vma->vm_flags & VM_EXEC) |
| 182 | leon_flush_icache_all(); |
| 183 | leon_flush_dcache_all(); |
| 184 | } |
| 185 | |
| 186 | void leon_flush_cache_all(void) |
| 187 | { |
| 188 | __asm__ __volatile__(" flush "); /*iflush*/ |
| 189 | __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : |
| 190 | "i"(ASI_LEON_DFLUSH) : "memory"); |
| 191 | } |
| 192 | |
| 193 | void leon_flush_tlb_all(void) |
| 194 | { |
| 195 | leon_flush_cache_all(); |
| 196 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400), |
| 197 | "i"(ASI_LEON_MMUFLUSH) : "memory"); |
| 198 | } |
| 199 | |
| 200 | /* get all cache regs */ |
| 201 | void leon3_getCacheRegs(struct leon3_cacheregs *regs) |
| 202 | { |
| 203 | unsigned long ccr, iccr, dccr; |
| 204 | |
| 205 | if (!regs) |
| 206 | return; |
| 207 | /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */ |
| 208 | __asm__ __volatile__("lda [%%g0] %3, %0\n\t" |
| 209 | "mov 0x08, %%g1\n\t" |
| 210 | "lda [%%g1] %3, %1\n\t" |
| 211 | "mov 0x0c, %%g1\n\t" |
| 212 | "lda [%%g1] %3, %2\n\t" |
| 213 | : "=r"(ccr), "=r"(iccr), "=r"(dccr) |
| 214 | /* output */ |
| 215 | : "i"(ASI_LEON_CACHEREGS) /* input */ |
| 216 | : "g1" /* clobber list */ |
| 217 | ); |
| 218 | regs->ccr = ccr; |
| 219 | regs->iccr = iccr; |
| 220 | regs->dccr = dccr; |
| 221 | } |
| 222 | |
| 223 | /* Due to virtual cache we need to check cache configuration if |
| 224 | * it is possible to skip flushing in some cases. |
| 225 | * |
| 226 | * Leon2 and Leon3 differ in their way of telling cache information |
| 227 | * |
| 228 | */ |
Matthias Rosenfelder | 6d999da | 2011-06-13 07:04:05 +0000 | [diff] [blame] | 229 | int __init leon_flush_needed(void) |
Konrad Eisele | 5213a78 | 2009-08-17 00:13:29 +0000 | [diff] [blame] | 230 | { |
| 231 | int flush_needed = -1; |
| 232 | unsigned int ssize, sets; |
| 233 | char *setStr[4] = |
| 234 | { "direct mapped", "2-way associative", "3-way associative", |
| 235 | "4-way associative" |
| 236 | }; |
| 237 | /* leon 3 */ |
| 238 | struct leon3_cacheregs cregs; |
| 239 | leon3_getCacheRegs(&cregs); |
| 240 | sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24; |
| 241 | /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */ |
| 242 | ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20); |
| 243 | |
| 244 | printk(KERN_INFO "CACHE: %s cache, set size %dk\n", |
| 245 | sets > 3 ? "unknown" : setStr[sets], ssize); |
| 246 | if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) { |
| 247 | /* Set Size <= Page size ==> |
| 248 | flush on every context switch not needed. */ |
| 249 | flush_needed = 0; |
| 250 | printk(KERN_INFO "CACHE: not flushing on every context switch\n"); |
| 251 | } |
| 252 | return flush_needed; |
| 253 | } |
| 254 | |
| 255 | void leon_switch_mm(void) |
| 256 | { |
| 257 | flush_tlb_mm((void *)0); |
| 258 | if (leon_flush_during_switch) |
| 259 | leon_flush_cache_all(); |
| 260 | } |