David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 1 | #ifndef _SPARC64_TSB_H |
| 2 | #define _SPARC64_TSB_H |
| 3 | |
| 4 | /* The sparc64 TSB is similar to the powerpc hashtables. It's a |
| 5 | * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes |
| 6 | * pointers into this table for 8K and 64K page sizes, and also a |
| 7 | * comparison TAG based upon the virtual address and context which |
| 8 | * faults. |
| 9 | * |
| 10 | * TLB miss trap handler software does the actual lookup via something |
| 11 | * of the form: |
| 12 | * |
| 13 | * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1 |
| 14 | * ldxa [%g0] ASI_{D,I}MMU, %g6 |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 15 | * sllx %g6, 22, %g6 |
| 16 | * srlx %g6, 22, %g6 |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 17 | * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 |
| 18 | * cmp %g4, %g6 |
| 19 | * bne,pn %xcc, tsb_miss_{d,i}tlb |
| 20 | * mov FAULT_CODE_{D,I}TLB, %g3 |
| 21 | * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN |
| 22 | * retry |
| 23 | * |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 24 | * |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 25 | * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte |
| 26 | * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu |
| 27 | * register which is: |
| 28 | * |
| 29 | * ------------------------------------------------- |
| 30 | * | - | CONTEXT | - | VADDR bits 63:22 | |
| 31 | * ------------------------------------------------- |
| 32 | * 63 61 60 48 47 42 41 0 |
| 33 | * |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 34 | * But actually, since we use per-mm TSB's, we zero out the CONTEXT |
| 35 | * field. |
| 36 | * |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 37 | * Like the powerpc hashtables we need to use locking in order to |
| 38 | * synchronize while we update the entries. PTE updates need locking |
| 39 | * as well. |
| 40 | * |
| 41 | * We need to carefully choose a lock bits for the TSB entry. We |
| 42 | * choose to use bit 47 in the tag. Also, since we never map anything |
| 43 | * at page zero in context zero, we use zero as an invalid tag entry. |
| 44 | * When the lock bit is set, this forces a tag comparison failure. |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 45 | */ |
| 46 | |
David S. Miller | 4753eb2 | 2006-01-31 18:32:44 -0800 | [diff] [blame] | 47 | #define TSB_TAG_LOCK_BIT 47 |
| 48 | #define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32)) |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 49 | |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 50 | #define TSB_TAG_INVALID_BIT 46 |
| 51 | #define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32)) |
| 52 | |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 53 | #define TSB_MEMBAR membar #StoreStore |
| 54 | |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 55 | /* Some cpus support physical address quad loads. We want to use |
| 56 | * those if possible so we don't need to hard-lock the TSB mapping |
| 57 | * into the TLB. We encode some instruction patching in order to |
| 58 | * support this. |
| 59 | * |
| 60 | * The kernel TSB is locked into the TLB by virtue of being in the |
| 61 | * kernel image, so we don't play these games for swapper_tsb access. |
| 62 | */ |
| 63 | #ifndef __ASSEMBLY__ |
David S. Miller | d257d5d | 2006-02-06 23:44:37 -0800 | [diff] [blame] | 64 | struct tsb_ldquad_phys_patch_entry { |
| 65 | unsigned int addr; |
| 66 | unsigned int sun4u_insn; |
| 67 | unsigned int sun4v_insn; |
| 68 | }; |
| 69 | extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch, |
| 70 | __tsb_ldquad_phys_patch_end; |
| 71 | |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 72 | struct tsb_phys_patch_entry { |
| 73 | unsigned int addr; |
| 74 | unsigned int insn; |
| 75 | }; |
| 76 | extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; |
| 77 | #endif |
| 78 | #define TSB_LOAD_QUAD(TSB, REG) \ |
| 79 | 661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \ |
David S. Miller | d257d5d | 2006-02-06 23:44:37 -0800 | [diff] [blame] | 80 | .section .tsb_ldquad_phys_patch, "ax"; \ |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 81 | .word 661b; \ |
| 82 | ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \ |
David S. Miller | d257d5d | 2006-02-06 23:44:37 -0800 | [diff] [blame] | 83 | ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \ |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 84 | .previous |
| 85 | |
| 86 | #define TSB_LOAD_TAG_HIGH(TSB, REG) \ |
| 87 | 661: lduwa [TSB] ASI_N, REG; \ |
| 88 | .section .tsb_phys_patch, "ax"; \ |
| 89 | .word 661b; \ |
| 90 | lduwa [TSB] ASI_PHYS_USE_EC, REG; \ |
| 91 | .previous |
| 92 | |
| 93 | #define TSB_LOAD_TAG(TSB, REG) \ |
| 94 | 661: ldxa [TSB] ASI_N, REG; \ |
| 95 | .section .tsb_phys_patch, "ax"; \ |
| 96 | .word 661b; \ |
| 97 | ldxa [TSB] ASI_PHYS_USE_EC, REG; \ |
| 98 | .previous |
| 99 | |
| 100 | #define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \ |
| 101 | 661: casa [TSB] ASI_N, REG1, REG2; \ |
| 102 | .section .tsb_phys_patch, "ax"; \ |
| 103 | .word 661b; \ |
| 104 | casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \ |
| 105 | .previous |
| 106 | |
| 107 | #define TSB_CAS_TAG(TSB, REG1, REG2) \ |
| 108 | 661: casxa [TSB] ASI_N, REG1, REG2; \ |
| 109 | .section .tsb_phys_patch, "ax"; \ |
| 110 | .word 661b; \ |
| 111 | casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \ |
| 112 | .previous |
| 113 | |
| 114 | #define TSB_STORE(ADDR, VAL) \ |
| 115 | 661: stxa VAL, [ADDR] ASI_N; \ |
| 116 | .section .tsb_phys_patch, "ax"; \ |
| 117 | .word 661b; \ |
| 118 | stxa VAL, [ADDR] ASI_PHYS_USE_EC; \ |
| 119 | .previous |
| 120 | |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 121 | #define TSB_LOCK_TAG(TSB, REG1, REG2) \ |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 122 | 99: TSB_LOAD_TAG_HIGH(TSB, REG1); \ |
| 123 | sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ |
| 124 | andcc REG1, REG2, %g0; \ |
| 125 | bne,pn %icc, 99b; \ |
| 126 | nop; \ |
| 127 | TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \ |
| 128 | cmp REG1, REG2; \ |
| 129 | bne,pn %icc, 99b; \ |
| 130 | nop; \ |
| 131 | TSB_MEMBAR |
| 132 | |
| 133 | #define TSB_WRITE(TSB, TTE, TAG) \ |
| 134 | add TSB, 0x8, TSB; \ |
| 135 | TSB_STORE(TSB, TTE); \ |
| 136 | sub TSB, 0x8, TSB; \ |
| 137 | TSB_MEMBAR; \ |
| 138 | TSB_STORE(TSB, TAG); |
| 139 | |
| 140 | #define KTSB_LOAD_QUAD(TSB, REG) \ |
| 141 | ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; |
| 142 | |
| 143 | #define KTSB_STORE(ADDR, VAL) \ |
| 144 | stxa VAL, [ADDR] ASI_N; |
| 145 | |
| 146 | #define KTSB_LOCK_TAG(TSB, REG1, REG2) \ |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 147 | 99: lduwa [TSB] ASI_N, REG1; \ |
David S. Miller | 4753eb2 | 2006-01-31 18:32:44 -0800 | [diff] [blame] | 148 | sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\ |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 149 | andcc REG1, REG2, %g0; \ |
| 150 | bne,pn %icc, 99b; \ |
| 151 | nop; \ |
| 152 | casa [TSB] ASI_N, REG1, REG2;\ |
| 153 | cmp REG1, REG2; \ |
| 154 | bne,pn %icc, 99b; \ |
| 155 | nop; \ |
| 156 | TSB_MEMBAR |
| 157 | |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 158 | #define KTSB_WRITE(TSB, TTE, TAG) \ |
| 159 | add TSB, 0x8, TSB; \ |
| 160 | stxa TTE, [TSB] ASI_N; \ |
| 161 | sub TSB, 0x8, TSB; \ |
| 162 | TSB_MEMBAR; \ |
| 163 | stxa TAG, [TSB] ASI_N; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 164 | |
| 165 | /* Do a kernel page table walk. Leaves physical PTE pointer in |
| 166 | * REG1. Jumps to FAIL_LABEL on early page table walk termination. |
| 167 | * VADDR will not be clobbered, but REG2 will. |
| 168 | */ |
| 169 | #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ |
| 170 | sethi %hi(swapper_pg_dir), REG1; \ |
| 171 | or REG1, %lo(swapper_pg_dir), REG1; \ |
| 172 | sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ |
| 173 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
| 174 | andn REG2, 0x3, REG2; \ |
| 175 | lduw [REG1 + REG2], REG1; \ |
| 176 | brz,pn REG1, FAIL_LABEL; \ |
| 177 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ |
| 178 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
| 179 | sllx REG1, 11, REG1; \ |
| 180 | andn REG2, 0x3, REG2; \ |
| 181 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
| 182 | brz,pn REG1, FAIL_LABEL; \ |
| 183 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ |
| 184 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
| 185 | sllx REG1, 11, REG1; \ |
| 186 | andn REG2, 0x7, REG2; \ |
| 187 | add REG1, REG2, REG1; |
| 188 | |
| 189 | /* Do a user page table walk in MMU globals. Leaves physical PTE |
| 190 | * pointer in REG1. Jumps to FAIL_LABEL on early page table walk |
| 191 | * termination. Physical base of page tables is in PHYS_PGD which |
| 192 | * will not be modified. |
| 193 | * |
| 194 | * VADDR will not be clobbered, but REG1 and REG2 will. |
| 195 | */ |
| 196 | #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \ |
| 197 | sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ |
| 198 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
| 199 | andn REG2, 0x3, REG2; \ |
| 200 | lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ |
| 201 | brz,pn REG1, FAIL_LABEL; \ |
| 202 | sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ |
| 203 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
| 204 | sllx REG1, 11, REG1; \ |
| 205 | andn REG2, 0x3, REG2; \ |
| 206 | lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ |
| 207 | brz,pn REG1, FAIL_LABEL; \ |
| 208 | sllx VADDR, 64 - PMD_SHIFT, REG2; \ |
| 209 | srlx REG2, 64 - PAGE_SHIFT, REG2; \ |
| 210 | sllx REG1, 11, REG1; \ |
| 211 | andn REG2, 0x7, REG2; \ |
| 212 | add REG1, REG2, REG1; |
| 213 | |
| 214 | /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0. |
| 215 | * If no entry is found, FAIL_LABEL will be branched to. On success |
| 216 | * the resulting PTE value will be left in REG1. VADDR is preserved |
| 217 | * by this routine. |
| 218 | */ |
| 219 | #define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \ |
| 220 | sethi %hi(prom_trans), REG1; \ |
| 221 | or REG1, %lo(prom_trans), REG1; \ |
| 222 | 97: ldx [REG1 + 0x00], REG2; \ |
| 223 | brz,pn REG2, FAIL_LABEL; \ |
| 224 | nop; \ |
| 225 | ldx [REG1 + 0x08], REG3; \ |
| 226 | add REG2, REG3, REG3; \ |
| 227 | cmp REG2, VADDR; \ |
| 228 | bgu,pt %xcc, 98f; \ |
| 229 | cmp VADDR, REG3; \ |
| 230 | bgeu,pt %xcc, 98f; \ |
| 231 | ldx [REG1 + 0x10], REG3; \ |
| 232 | sub VADDR, REG2, REG2; \ |
| 233 | ba,pt %xcc, 99f; \ |
| 234 | add REG3, REG2, REG1; \ |
| 235 | 98: ba,pt %xcc, 97b; \ |
| 236 | add REG1, (3 * 8), REG1; \ |
| 237 | 99: |
| 238 | |
David S. Miller | 2f7ee7c | 2006-01-31 18:33:49 -0800 | [diff] [blame] | 239 | /* We use a 32K TSB for the whole kernel, this allows to |
| 240 | * handle about 16MB of modules and vmalloc mappings without |
| 241 | * incurring many hash conflicts. |
| 242 | */ |
| 243 | #define KERNEL_TSB_SIZE_BYTES (32 * 1024) |
| 244 | #define KERNEL_TSB_NENTRIES \ |
| 245 | (KERNEL_TSB_SIZE_BYTES / 16) |
David S. Miller | d7744a0 | 2006-02-21 22:31:11 -0800 | [diff] [blame] | 246 | #define KERNEL_TSB4M_NENTRIES 4096 |
David S. Miller | 2f7ee7c | 2006-01-31 18:33:49 -0800 | [diff] [blame] | 247 | |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 248 | /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL |
| 249 | * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries |
| 250 | * and the found TTE will be left in REG1. REG3 and REG4 must |
| 251 | * be an even/odd pair of registers. |
| 252 | * |
| 253 | * VADDR and TAG will be preserved and not clobbered by this macro. |
| 254 | */ |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 255 | #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ |
| 256 | sethi %hi(swapper_tsb), REG1; \ |
| 257 | or REG1, %lo(swapper_tsb), REG1; \ |
David S. Miller | 2f7ee7c | 2006-01-31 18:33:49 -0800 | [diff] [blame] | 258 | srlx VADDR, PAGE_SHIFT, REG2; \ |
| 259 | and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 260 | sllx REG2, 4, REG2; \ |
| 261 | add REG1, REG2, REG2; \ |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 262 | KTSB_LOAD_QUAD(REG2, REG3); \ |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 263 | cmp REG3, TAG; \ |
| 264 | be,a,pt %xcc, OK_LABEL; \ |
| 265 | mov REG4, REG1; |
| 266 | |
David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 267 | #ifndef CONFIG_DEBUG_PAGEALLOC |
David S. Miller | d7744a0 | 2006-02-21 22:31:11 -0800 | [diff] [blame] | 268 | /* This version uses a trick, the TAG is already (VADDR >> 22) so |
| 269 | * we can make use of that for the index computation. |
| 270 | */ |
| 271 | #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ |
| 272 | sethi %hi(swapper_4m_tsb), REG1; \ |
| 273 | or REG1, %lo(swapper_4m_tsb), REG1; \ |
| 274 | and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \ |
| 275 | sllx REG2, 4, REG2; \ |
| 276 | add REG1, REG2, REG2; \ |
| 277 | KTSB_LOAD_QUAD(REG2, REG3); \ |
| 278 | cmp REG3, TAG; \ |
| 279 | be,a,pt %xcc, OK_LABEL; \ |
| 280 | mov REG4, REG1; |
David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 281 | #endif |
David S. Miller | d7744a0 | 2006-02-21 22:31:11 -0800 | [diff] [blame] | 282 | |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 283 | #endif /* !(_SPARC64_TSB_H) */ |