Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $ |
| 2 | * ultra.S: Don't expand these all over the place... |
| 3 | * |
| 4 | * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com) |
| 5 | */ |
| 6 | |
| 7 | #include <linux/config.h> |
| 8 | #include <asm/asi.h> |
| 9 | #include <asm/pgtable.h> |
| 10 | #include <asm/page.h> |
| 11 | #include <asm/spitfire.h> |
| 12 | #include <asm/mmu_context.h> |
| 13 | #include <asm/pil.h> |
| 14 | #include <asm/head.h> |
| 15 | #include <asm/thread_info.h> |
| 16 | #include <asm/cacheflush.h> |
| 17 | |
| 18 | /* Basically, most of the Spitfire vs. Cheetah madness |
| 19 | * has to do with the fact that Cheetah does not support |
| 20 | * IMMU flushes out of the secondary context. Someone needs |
| 21 | * to throw a south lake birthday party for the folks |
| 22 | * in Microelectronics who refused to fix this shit. |
| 23 | */ |
| 24 | |
| 25 | /* This file is meant to be read efficiently by the CPU, not humans. |
| 26 | * Staraj sie tego nikomu nie pierdolnac... |
| 27 | */ |
| 28 | .text |
| 29 | .align 32 |
| 30 | .globl __flush_tlb_mm |
| 31 | __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ |
| 32 | ldxa [%o1] ASI_DMMU, %g2 |
| 33 | cmp %g2, %o0 |
| 34 | bne,pn %icc, __spitfire_flush_tlb_mm_slow |
| 35 | mov 0x50, %g3 |
| 36 | stxa %g0, [%g3] ASI_DMMU_DEMAP |
| 37 | stxa %g0, [%g3] ASI_IMMU_DEMAP |
| 38 | retl |
| 39 | flush %g6 |
| 40 | nop |
| 41 | nop |
| 42 | nop |
| 43 | nop |
| 44 | nop |
| 45 | nop |
| 46 | nop |
| 47 | nop |
| 48 | |
| 49 | .align 32 |
| 50 | .globl __flush_tlb_pending |
| 51 | __flush_tlb_pending: |
| 52 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
| 53 | rdpr %pstate, %g7 |
| 54 | sllx %o1, 3, %o1 |
| 55 | andn %g7, PSTATE_IE, %g2 |
| 56 | wrpr %g2, %pstate |
| 57 | mov SECONDARY_CONTEXT, %o4 |
| 58 | ldxa [%o4] ASI_DMMU, %g2 |
| 59 | stxa %o0, [%o4] ASI_DMMU |
| 60 | 1: sub %o1, (1 << 3), %o1 |
| 61 | ldx [%o2 + %o1], %o3 |
| 62 | andcc %o3, 1, %g0 |
| 63 | andn %o3, 1, %o3 |
| 64 | be,pn %icc, 2f |
| 65 | or %o3, 0x10, %o3 |
| 66 | stxa %g0, [%o3] ASI_IMMU_DEMAP |
| 67 | 2: stxa %g0, [%o3] ASI_DMMU_DEMAP |
| 68 | membar #Sync |
| 69 | brnz,pt %o1, 1b |
| 70 | nop |
| 71 | stxa %g2, [%o4] ASI_DMMU |
| 72 | flush %g6 |
| 73 | retl |
| 74 | wrpr %g7, 0x0, %pstate |
| 75 | |
| 76 | .align 32 |
| 77 | .globl __flush_tlb_kernel_range |
| 78 | __flush_tlb_kernel_range: /* %o0=start, %o1=end */ |
| 79 | cmp %o0, %o1 |
| 80 | be,pn %xcc, 2f |
| 81 | sethi %hi(PAGE_SIZE), %o4 |
| 82 | sub %o1, %o0, %o3 |
| 83 | sub %o3, %o4, %o3 |
| 84 | or %o0, 0x20, %o0 ! Nucleus |
| 85 | 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP |
| 86 | stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP |
| 87 | membar #Sync |
| 88 | brnz,pt %o3, 1b |
| 89 | sub %o3, %o4, %o3 |
| 90 | 2: retl |
| 91 | flush %g6 |
| 92 | |
| 93 | __spitfire_flush_tlb_mm_slow: |
| 94 | rdpr %pstate, %g1 |
| 95 | wrpr %g1, PSTATE_IE, %pstate |
| 96 | stxa %o0, [%o1] ASI_DMMU |
| 97 | stxa %g0, [%g3] ASI_DMMU_DEMAP |
| 98 | stxa %g0, [%g3] ASI_IMMU_DEMAP |
| 99 | flush %g6 |
| 100 | stxa %g2, [%o1] ASI_DMMU |
| 101 | flush %g6 |
| 102 | retl |
| 103 | wrpr %g1, 0, %pstate |
| 104 | |
| 105 | /* |
| 106 | * The following code flushes one page_size worth. |
| 107 | */ |
| 108 | #if (PAGE_SHIFT == 13) |
| 109 | #define ITAG_MASK 0xfe |
| 110 | #elif (PAGE_SHIFT == 16) |
| 111 | #define ITAG_MASK 0x7fe |
| 112 | #else |
| 113 | #error unsupported PAGE_SIZE |
| 114 | #endif |
| 115 | .align 32 |
| 116 | .globl __flush_icache_page |
| 117 | __flush_icache_page: /* %o0 = phys_page */ |
| 118 | membar #StoreStore |
| 119 | srlx %o0, PAGE_SHIFT, %o0 |
| 120 | sethi %uhi(PAGE_OFFSET), %g1 |
| 121 | sllx %o0, PAGE_SHIFT, %o0 |
| 122 | sethi %hi(PAGE_SIZE), %g2 |
| 123 | sllx %g1, 32, %g1 |
| 124 | add %o0, %g1, %o0 |
| 125 | 1: subcc %g2, 32, %g2 |
| 126 | bne,pt %icc, 1b |
| 127 | flush %o0 + %g2 |
| 128 | retl |
| 129 | nop |
| 130 | |
| 131 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 132 | |
| 133 | #if (PAGE_SHIFT != 13) |
| 134 | #error only page shift of 13 is supported by dcache flush |
| 135 | #endif |
| 136 | |
| 137 | #define DTAG_MASK 0x3 |
| 138 | |
| 139 | .align 64 |
| 140 | .globl __flush_dcache_page |
| 141 | __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ |
| 142 | sethi %uhi(PAGE_OFFSET), %g1 |
| 143 | sllx %g1, 32, %g1 |
| 144 | sub %o0, %g1, %o0 |
| 145 | clr %o4 |
| 146 | srlx %o0, 11, %o0 |
| 147 | sethi %hi(1 << 14), %o2 |
| 148 | 1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group |
| 149 | add %o4, (1 << 5), %o4 ! IEU0 |
| 150 | ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group |
| 151 | add %o4, (1 << 5), %o4 ! IEU0 |
| 152 | ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available |
| 153 | add %o4, (1 << 5), %o4 ! IEU0 |
| 154 | andn %o3, DTAG_MASK, %o3 ! IEU1 |
| 155 | ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group |
| 156 | add %o4, (1 << 5), %o4 ! IEU0 |
| 157 | andn %g1, DTAG_MASK, %g1 ! IEU1 |
| 158 | cmp %o0, %o3 ! IEU1 Group |
| 159 | be,a,pn %xcc, dflush1 ! CTI |
| 160 | sub %o4, (4 << 5), %o4 ! IEU0 (Group) |
| 161 | cmp %o0, %g1 ! IEU1 Group |
| 162 | andn %g2, DTAG_MASK, %g2 ! IEU0 |
| 163 | be,a,pn %xcc, dflush2 ! CTI |
| 164 | sub %o4, (3 << 5), %o4 ! IEU0 (Group) |
| 165 | cmp %o0, %g2 ! IEU1 Group |
| 166 | andn %g3, DTAG_MASK, %g3 ! IEU0 |
| 167 | be,a,pn %xcc, dflush3 ! CTI |
| 168 | sub %o4, (2 << 5), %o4 ! IEU0 (Group) |
| 169 | cmp %o0, %g3 ! IEU1 Group |
| 170 | be,a,pn %xcc, dflush4 ! CTI |
| 171 | sub %o4, (1 << 5), %o4 ! IEU0 |
| 172 | 2: cmp %o4, %o2 ! IEU1 Group |
| 173 | bne,pt %xcc, 1b ! CTI |
| 174 | nop ! IEU0 |
| 175 | |
| 176 | /* The I-cache does not snoop local stores so we |
| 177 | * better flush that too when necessary. |
| 178 | */ |
| 179 | brnz,pt %o1, __flush_icache_page |
| 180 | sllx %o0, 11, %o0 |
| 181 | retl |
| 182 | nop |
| 183 | |
| 184 | dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG |
| 185 | add %o4, (1 << 5), %o4 |
| 186 | dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG |
| 187 | add %o4, (1 << 5), %o4 |
| 188 | dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG |
| 189 | add %o4, (1 << 5), %o4 |
| 190 | dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG |
| 191 | add %o4, (1 << 5), %o4 |
| 192 | membar #Sync |
| 193 | ba,pt %xcc, 2b |
| 194 | nop |
| 195 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
| 196 | |
| 197 | .align 32 |
| 198 | __prefill_dtlb: |
| 199 | rdpr %pstate, %g7 |
| 200 | wrpr %g7, PSTATE_IE, %pstate |
| 201 | mov TLB_TAG_ACCESS, %g1 |
| 202 | stxa %o5, [%g1] ASI_DMMU |
| 203 | stxa %o2, [%g0] ASI_DTLB_DATA_IN |
| 204 | flush %g6 |
| 205 | retl |
| 206 | wrpr %g7, %pstate |
| 207 | __prefill_itlb: |
| 208 | rdpr %pstate, %g7 |
| 209 | wrpr %g7, PSTATE_IE, %pstate |
| 210 | mov TLB_TAG_ACCESS, %g1 |
| 211 | stxa %o5, [%g1] ASI_IMMU |
| 212 | stxa %o2, [%g0] ASI_ITLB_DATA_IN |
| 213 | flush %g6 |
| 214 | retl |
| 215 | wrpr %g7, %pstate |
| 216 | |
| 217 | .globl __update_mmu_cache |
| 218 | __update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */ |
| 219 | srlx %o1, PAGE_SHIFT, %o1 |
| 220 | andcc %o3, FAULT_CODE_DTLB, %g0 |
| 221 | sllx %o1, PAGE_SHIFT, %o5 |
| 222 | bne,pt %xcc, __prefill_dtlb |
| 223 | or %o5, %o0, %o5 |
| 224 | ba,a,pt %xcc, __prefill_itlb |
| 225 | |
| 226 | /* Cheetah specific versions, patched at boot time. |
| 227 | * |
| 228 | * This writes of the PRIMARY_CONTEXT register in this file are |
| 229 | * safe even on Cheetah+ and later wrt. the page size fields. |
| 230 | * The nucleus page size fields do not matter because we make |
| 231 | * no data references, and these instructions execute out of a |
| 232 | * locked I-TLB entry sitting in the fully assosciative I-TLB. |
| 233 | * This sequence should also never trap. |
| 234 | */ |
| 235 | __cheetah_flush_tlb_mm: /* 15 insns */ |
| 236 | rdpr %pstate, %g7 |
| 237 | andn %g7, PSTATE_IE, %g2 |
| 238 | wrpr %g2, 0x0, %pstate |
| 239 | wrpr %g0, 1, %tl |
| 240 | mov PRIMARY_CONTEXT, %o2 |
| 241 | mov 0x40, %g3 |
| 242 | ldxa [%o2] ASI_DMMU, %g2 |
| 243 | stxa %o0, [%o2] ASI_DMMU |
| 244 | stxa %g0, [%g3] ASI_DMMU_DEMAP |
| 245 | stxa %g0, [%g3] ASI_IMMU_DEMAP |
| 246 | stxa %g2, [%o2] ASI_DMMU |
| 247 | flush %g6 |
| 248 | wrpr %g0, 0, %tl |
| 249 | retl |
| 250 | wrpr %g7, 0x0, %pstate |
| 251 | |
| 252 | __cheetah_flush_tlb_pending: /* 22 insns */ |
| 253 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
| 254 | rdpr %pstate, %g7 |
| 255 | sllx %o1, 3, %o1 |
| 256 | andn %g7, PSTATE_IE, %g2 |
| 257 | wrpr %g2, 0x0, %pstate |
| 258 | wrpr %g0, 1, %tl |
| 259 | mov PRIMARY_CONTEXT, %o4 |
| 260 | ldxa [%o4] ASI_DMMU, %g2 |
| 261 | stxa %o0, [%o4] ASI_DMMU |
| 262 | 1: sub %o1, (1 << 3), %o1 |
| 263 | ldx [%o2 + %o1], %o3 |
| 264 | andcc %o3, 1, %g0 |
| 265 | be,pn %icc, 2f |
| 266 | andn %o3, 1, %o3 |
| 267 | stxa %g0, [%o3] ASI_IMMU_DEMAP |
| 268 | 2: stxa %g0, [%o3] ASI_DMMU_DEMAP |
David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame^] | 269 | membar #Sync |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | brnz,pt %o1, 1b |
David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame^] | 271 | nop |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | stxa %g2, [%o4] ASI_DMMU |
| 273 | flush %g6 |
| 274 | wrpr %g0, 0, %tl |
| 275 | retl |
| 276 | wrpr %g7, 0x0, %pstate |
| 277 | |
| 278 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 279 | flush_dcpage_cheetah: /* 11 insns */ |
| 280 | sethi %uhi(PAGE_OFFSET), %g1 |
| 281 | sllx %g1, 32, %g1 |
| 282 | sub %o0, %g1, %o0 |
| 283 | sethi %hi(PAGE_SIZE), %o4 |
| 284 | 1: subcc %o4, (1 << 5), %o4 |
| 285 | stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE |
| 286 | membar #Sync |
| 287 | bne,pt %icc, 1b |
| 288 | nop |
| 289 | retl /* I-cache flush never needed on Cheetah, see callers. */ |
| 290 | nop |
| 291 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
| 292 | |
| 293 | cheetah_patch_one: |
| 294 | 1: lduw [%o1], %g1 |
| 295 | stw %g1, [%o0] |
| 296 | flush %o0 |
| 297 | subcc %o2, 1, %o2 |
| 298 | add %o1, 4, %o1 |
| 299 | bne,pt %icc, 1b |
| 300 | add %o0, 4, %o0 |
| 301 | retl |
| 302 | nop |
| 303 | |
| 304 | .globl cheetah_patch_cachetlbops |
| 305 | cheetah_patch_cachetlbops: |
| 306 | save %sp, -128, %sp |
| 307 | |
| 308 | sethi %hi(__flush_tlb_mm), %o0 |
| 309 | or %o0, %lo(__flush_tlb_mm), %o0 |
| 310 | sethi %hi(__cheetah_flush_tlb_mm), %o1 |
| 311 | or %o1, %lo(__cheetah_flush_tlb_mm), %o1 |
| 312 | call cheetah_patch_one |
| 313 | mov 15, %o2 |
| 314 | |
| 315 | sethi %hi(__flush_tlb_pending), %o0 |
| 316 | or %o0, %lo(__flush_tlb_pending), %o0 |
| 317 | sethi %hi(__cheetah_flush_tlb_pending), %o1 |
| 318 | or %o1, %lo(__cheetah_flush_tlb_pending), %o1 |
| 319 | call cheetah_patch_one |
| 320 | mov 22, %o2 |
| 321 | |
| 322 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 323 | sethi %hi(__flush_dcache_page), %o0 |
| 324 | or %o0, %lo(__flush_dcache_page), %o0 |
| 325 | sethi %hi(flush_dcpage_cheetah), %o1 |
| 326 | or %o1, %lo(flush_dcpage_cheetah), %o1 |
| 327 | call cheetah_patch_one |
| 328 | mov 11, %o2 |
| 329 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
| 330 | |
| 331 | ret |
| 332 | restore |
| 333 | |
| 334 | #ifdef CONFIG_SMP |
| 335 | /* These are all called by the slaves of a cross call, at |
| 336 | * trap level 1, with interrupts fully disabled. |
| 337 | * |
| 338 | * Register usage: |
| 339 | * %g5 mm->context (all tlb flushes) |
| 340 | * %g1 address arg 1 (tlb page and range flushes) |
| 341 | * %g7 address arg 2 (tlb range flush only) |
| 342 | * |
| 343 | * %g6 ivector table, don't touch |
| 344 | * %g2 scratch 1 |
| 345 | * %g3 scratch 2 |
| 346 | * %g4 scratch 3 |
| 347 | * |
| 348 | * TODO: Make xcall TLB range flushes use the tricks above... -DaveM |
| 349 | */ |
| 350 | .align 32 |
| 351 | .globl xcall_flush_tlb_mm |
| 352 | xcall_flush_tlb_mm: |
| 353 | mov PRIMARY_CONTEXT, %g2 |
| 354 | mov 0x40, %g4 |
| 355 | ldxa [%g2] ASI_DMMU, %g3 |
| 356 | stxa %g5, [%g2] ASI_DMMU |
| 357 | stxa %g0, [%g4] ASI_DMMU_DEMAP |
| 358 | stxa %g0, [%g4] ASI_IMMU_DEMAP |
| 359 | stxa %g3, [%g2] ASI_DMMU |
| 360 | retry |
| 361 | |
| 362 | .globl xcall_flush_tlb_pending |
| 363 | xcall_flush_tlb_pending: |
| 364 | /* %g5=context, %g1=nr, %g7=vaddrs[] */ |
| 365 | sllx %g1, 3, %g1 |
| 366 | mov PRIMARY_CONTEXT, %g4 |
| 367 | ldxa [%g4] ASI_DMMU, %g2 |
| 368 | stxa %g5, [%g4] ASI_DMMU |
| 369 | 1: sub %g1, (1 << 3), %g1 |
| 370 | ldx [%g7 + %g1], %g5 |
| 371 | andcc %g5, 0x1, %g0 |
| 372 | be,pn %icc, 2f |
| 373 | |
| 374 | andn %g5, 0x1, %g5 |
| 375 | stxa %g0, [%g5] ASI_IMMU_DEMAP |
| 376 | 2: stxa %g0, [%g5] ASI_DMMU_DEMAP |
| 377 | membar #Sync |
| 378 | brnz,pt %g1, 1b |
| 379 | nop |
| 380 | stxa %g2, [%g4] ASI_DMMU |
| 381 | retry |
| 382 | |
| 383 | .globl xcall_flush_tlb_kernel_range |
| 384 | xcall_flush_tlb_kernel_range: |
| 385 | sethi %hi(PAGE_SIZE - 1), %g2 |
| 386 | or %g2, %lo(PAGE_SIZE - 1), %g2 |
| 387 | andn %g1, %g2, %g1 |
| 388 | andn %g7, %g2, %g7 |
| 389 | sub %g7, %g1, %g3 |
| 390 | add %g2, 1, %g2 |
| 391 | sub %g3, %g2, %g3 |
| 392 | or %g1, 0x20, %g1 ! Nucleus |
| 393 | 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP |
| 394 | stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP |
| 395 | membar #Sync |
| 396 | brnz,pt %g3, 1b |
| 397 | sub %g3, %g2, %g3 |
| 398 | retry |
| 399 | nop |
| 400 | nop |
| 401 | |
| 402 | /* This runs in a very controlled environment, so we do |
| 403 | * not need to worry about BH races etc. |
| 404 | */ |
| 405 | .globl xcall_sync_tick |
| 406 | xcall_sync_tick: |
| 407 | rdpr %pstate, %g2 |
| 408 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate |
| 409 | rdpr %pil, %g2 |
| 410 | wrpr %g0, 15, %pil |
| 411 | sethi %hi(109f), %g7 |
| 412 | b,pt %xcc, etrap_irq |
| 413 | 109: or %g7, %lo(109b), %g7 |
| 414 | call smp_synchronize_tick_client |
| 415 | nop |
| 416 | clr %l6 |
| 417 | b rtrap_xcall |
| 418 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 |
| 419 | |
| 420 | /* NOTE: This is SPECIAL!! We do etrap/rtrap however |
| 421 | * we choose to deal with the "BH's run with |
| 422 | * %pil==15" problem (described in asm/pil.h) |
| 423 | * by just invoking rtrap directly past where |
| 424 | * BH's are checked for. |
| 425 | * |
| 426 | * We do it like this because we do not want %pil==15 |
| 427 | * lockups to prevent regs being reported. |
| 428 | */ |
| 429 | .globl xcall_report_regs |
| 430 | xcall_report_regs: |
| 431 | rdpr %pstate, %g2 |
| 432 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate |
| 433 | rdpr %pil, %g2 |
| 434 | wrpr %g0, 15, %pil |
| 435 | sethi %hi(109f), %g7 |
| 436 | b,pt %xcc, etrap_irq |
| 437 | 109: or %g7, %lo(109b), %g7 |
| 438 | call __show_regs |
| 439 | add %sp, PTREGS_OFF, %o0 |
| 440 | clr %l6 |
| 441 | /* Has to be a non-v9 branch due to the large distance. */ |
| 442 | b rtrap_xcall |
| 443 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 |
| 444 | |
| 445 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 446 | .align 32 |
| 447 | .globl xcall_flush_dcache_page_cheetah |
| 448 | xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */ |
| 449 | sethi %hi(PAGE_SIZE), %g3 |
| 450 | 1: subcc %g3, (1 << 5), %g3 |
| 451 | stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE |
| 452 | membar #Sync |
| 453 | bne,pt %icc, 1b |
| 454 | nop |
| 455 | retry |
| 456 | nop |
| 457 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
| 458 | |
| 459 | .globl xcall_flush_dcache_page_spitfire |
| 460 | xcall_flush_dcache_page_spitfire: /* %g1 == physical page address |
| 461 | %g7 == kernel page virtual address |
| 462 | %g5 == (page->mapping != NULL) */ |
| 463 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 464 | srlx %g1, (13 - 2), %g1 ! Form tag comparitor |
| 465 | sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K |
| 466 | sub %g3, (1 << 5), %g3 ! D$ linesize == 32 |
| 467 | 1: ldxa [%g3] ASI_DCACHE_TAG, %g2 |
| 468 | andcc %g2, 0x3, %g0 |
| 469 | be,pn %xcc, 2f |
| 470 | andn %g2, 0x3, %g2 |
| 471 | cmp %g2, %g1 |
| 472 | |
| 473 | bne,pt %xcc, 2f |
| 474 | nop |
| 475 | stxa %g0, [%g3] ASI_DCACHE_TAG |
| 476 | membar #Sync |
| 477 | 2: cmp %g3, 0 |
| 478 | bne,pt %xcc, 1b |
| 479 | sub %g3, (1 << 5), %g3 |
| 480 | |
| 481 | brz,pn %g5, 2f |
| 482 | #endif /* DCACHE_ALIASING_POSSIBLE */ |
| 483 | sethi %hi(PAGE_SIZE), %g3 |
| 484 | |
| 485 | 1: flush %g7 |
| 486 | subcc %g3, (1 << 5), %g3 |
| 487 | bne,pt %icc, 1b |
| 488 | add %g7, (1 << 5), %g7 |
| 489 | |
| 490 | 2: retry |
| 491 | nop |
| 492 | nop |
| 493 | |
| 494 | .globl xcall_promstop |
| 495 | xcall_promstop: |
| 496 | rdpr %pstate, %g2 |
| 497 | wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate |
| 498 | rdpr %pil, %g2 |
| 499 | wrpr %g0, 15, %pil |
| 500 | sethi %hi(109f), %g7 |
| 501 | b,pt %xcc, etrap_irq |
| 502 | 109: or %g7, %lo(109b), %g7 |
| 503 | flushw |
| 504 | call prom_stopself |
| 505 | nop |
| 506 | /* We should not return, just spin if we do... */ |
| 507 | 1: b,a,pt %xcc, 1b |
| 508 | nop |
| 509 | |
| 510 | .data |
| 511 | |
| 512 | errata32_hwbug: |
| 513 | .xword 0 |
| 514 | |
| 515 | .text |
| 516 | |
| 517 | /* These two are not performance critical... */ |
| 518 | .globl xcall_flush_tlb_all_spitfire |
| 519 | xcall_flush_tlb_all_spitfire: |
| 520 | /* Spitfire Errata #32 workaround. */ |
| 521 | sethi %hi(errata32_hwbug), %g4 |
| 522 | stx %g0, [%g4 + %lo(errata32_hwbug)] |
| 523 | |
| 524 | clr %g2 |
| 525 | clr %g3 |
| 526 | 1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4 |
| 527 | and %g4, _PAGE_L, %g5 |
| 528 | brnz,pn %g5, 2f |
| 529 | mov TLB_TAG_ACCESS, %g7 |
| 530 | |
| 531 | stxa %g0, [%g7] ASI_DMMU |
| 532 | membar #Sync |
| 533 | stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS |
| 534 | membar #Sync |
| 535 | |
| 536 | /* Spitfire Errata #32 workaround. */ |
| 537 | sethi %hi(errata32_hwbug), %g4 |
| 538 | stx %g0, [%g4 + %lo(errata32_hwbug)] |
| 539 | |
| 540 | 2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4 |
| 541 | and %g4, _PAGE_L, %g5 |
| 542 | brnz,pn %g5, 2f |
| 543 | mov TLB_TAG_ACCESS, %g7 |
| 544 | |
| 545 | stxa %g0, [%g7] ASI_IMMU |
| 546 | membar #Sync |
| 547 | stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS |
| 548 | membar #Sync |
| 549 | |
| 550 | /* Spitfire Errata #32 workaround. */ |
| 551 | sethi %hi(errata32_hwbug), %g4 |
| 552 | stx %g0, [%g4 + %lo(errata32_hwbug)] |
| 553 | |
| 554 | 2: add %g2, 1, %g2 |
| 555 | cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT |
| 556 | ble,pt %icc, 1b |
| 557 | sll %g2, 3, %g3 |
| 558 | flush %g6 |
| 559 | retry |
| 560 | |
| 561 | .globl xcall_flush_tlb_all_cheetah |
| 562 | xcall_flush_tlb_all_cheetah: |
| 563 | mov 0x80, %g2 |
| 564 | stxa %g0, [%g2] ASI_DMMU_DEMAP |
| 565 | stxa %g0, [%g2] ASI_IMMU_DEMAP |
| 566 | retry |
| 567 | |
| 568 | /* These just get rescheduled to PIL vectors. */ |
| 569 | .globl xcall_call_function |
| 570 | xcall_call_function: |
| 571 | wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint |
| 572 | retry |
| 573 | |
| 574 | .globl xcall_receive_signal |
| 575 | xcall_receive_signal: |
| 576 | wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint |
| 577 | retry |
| 578 | |
| 579 | .globl xcall_capture |
| 580 | xcall_capture: |
| 581 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint |
| 582 | retry |
| 583 | |
| 584 | #endif /* CONFIG_SMP */ |