blob: 5ff5e42fb9d427ff3233d01141481767be84c50e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <asm/asi.h>
9#include <asm/pgtable.h>
10#include <asm/page.h>
11#include <asm/spitfire.h>
12#include <asm/mmu_context.h>
David S. Miller2ef27772005-08-30 20:21:34 -070013#include <asm/mmu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/pil.h>
15#include <asm/head.h>
16#include <asm/thread_info.h>
17#include <asm/cacheflush.h>
18
19 /* Basically, most of the Spitfire vs. Cheetah madness
20 * has to do with the fact that Cheetah does not support
21 * IMMU flushes out of the secondary context. Someone needs
22 * to throw a south lake birthday party for the folks
23 * in Microelectronics who refused to fix this shit.
24 */
25
26 /* This file is meant to be read efficiently by the CPU, not humans.
27 * Staraj sie tego nikomu nie pierdolnac...
28 */
29 .text
30 .align 32
31 .globl __flush_tlb_mm
32__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
33 ldxa [%o1] ASI_DMMU, %g2
34 cmp %g2, %o0
35 bne,pn %icc, __spitfire_flush_tlb_mm_slow
36 mov 0x50, %g3
37 stxa %g0, [%g3] ASI_DMMU_DEMAP
38 stxa %g0, [%g3] ASI_IMMU_DEMAP
39 retl
40 flush %g6
41 nop
42 nop
43 nop
44 nop
45 nop
46 nop
47 nop
48 nop
David S. Miller2ef27772005-08-30 20:21:34 -070049 nop
50 nop
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
52 .align 32
53 .globl __flush_tlb_pending
54__flush_tlb_pending:
55 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
56 rdpr %pstate, %g7
57 sllx %o1, 3, %o1
58 andn %g7, PSTATE_IE, %g2
59 wrpr %g2, %pstate
60 mov SECONDARY_CONTEXT, %o4
61 ldxa [%o4] ASI_DMMU, %g2
62 stxa %o0, [%o4] ASI_DMMU
631: sub %o1, (1 << 3), %o1
64 ldx [%o2 + %o1], %o3
65 andcc %o3, 1, %g0
66 andn %o3, 1, %o3
67 be,pn %icc, 2f
68 or %o3, 0x10, %o3
69 stxa %g0, [%o3] ASI_IMMU_DEMAP
702: stxa %g0, [%o3] ASI_DMMU_DEMAP
71 membar #Sync
72 brnz,pt %o1, 1b
73 nop
74 stxa %g2, [%o4] ASI_DMMU
75 flush %g6
76 retl
77 wrpr %g7, 0x0, %pstate
David S. Millerfef43da2005-07-05 19:45:24 -070078 nop
David S. Miller2ef27772005-08-30 20:21:34 -070079 nop
80 nop
81 nop
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83 .align 32
84 .globl __flush_tlb_kernel_range
85__flush_tlb_kernel_range: /* %o0=start, %o1=end */
86 cmp %o0, %o1
87 be,pn %xcc, 2f
88 sethi %hi(PAGE_SIZE), %o4
89 sub %o1, %o0, %o3
90 sub %o3, %o4, %o3
91 or %o0, 0x20, %o0 ! Nucleus
921: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
93 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
94 membar #Sync
95 brnz,pt %o3, 1b
96 sub %o3, %o4, %o3
972: retl
98 flush %g6
99
100__spitfire_flush_tlb_mm_slow:
101 rdpr %pstate, %g1
102 wrpr %g1, PSTATE_IE, %pstate
103 stxa %o0, [%o1] ASI_DMMU
104 stxa %g0, [%g3] ASI_DMMU_DEMAP
105 stxa %g0, [%g3] ASI_IMMU_DEMAP
106 flush %g6
107 stxa %g2, [%o1] ASI_DMMU
108 flush %g6
109 retl
110 wrpr %g1, 0, %pstate
111
112/*
113 * The following code flushes one page_size worth.
114 */
115#if (PAGE_SHIFT == 13)
116#define ITAG_MASK 0xfe
117#elif (PAGE_SHIFT == 16)
118#define ITAG_MASK 0x7fe
119#else
120#error unsupported PAGE_SIZE
121#endif
Prasanna S Panchamukhi83005162005-09-06 15:19:31 -0700122 .section .kprobes.text, "ax"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .align 32
124 .globl __flush_icache_page
125__flush_icache_page: /* %o0 = phys_page */
126 membar #StoreStore
127 srlx %o0, PAGE_SHIFT, %o0
128 sethi %uhi(PAGE_OFFSET), %g1
129 sllx %o0, PAGE_SHIFT, %o0
130 sethi %hi(PAGE_SIZE), %g2
131 sllx %g1, 32, %g1
132 add %o0, %g1, %o0
1331: subcc %g2, 32, %g2
134 bne,pt %icc, 1b
135 flush %o0 + %g2
136 retl
137 nop
138
139#ifdef DCACHE_ALIASING_POSSIBLE
140
141#if (PAGE_SHIFT != 13)
142#error only page shift of 13 is supported by dcache flush
143#endif
144
145#define DTAG_MASK 0x3
146
David S. Millerc5bd50a2005-09-26 16:06:03 -0700147 /* This routine is Spitfire specific so the hardcoded
148 * D-cache size and line-size are OK.
149 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 .align 64
151 .globl __flush_dcache_page
152__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
153 sethi %uhi(PAGE_OFFSET), %g1
154 sllx %g1, 32, %g1
David S. Millerc5bd50a2005-09-26 16:06:03 -0700155 sub %o0, %g1, %o0 ! physical address
156 srlx %o0, 11, %o0 ! make D-cache TAG
157 sethi %hi(1 << 14), %o2 ! D-cache size
158 sub %o2, (1 << 5), %o2 ! D-cache line size
1591: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
160 andcc %o3, DTAG_MASK, %g0 ! Valid?
161 be,pn %xcc, 2f ! Nope, branch
162 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
163 cmp %o3, %o0 ! TAG match?
164 bne,pt %xcc, 2f ! Nope, branch
165 nop
166 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
167 membar #Sync
1682: brnz,pt %o2, 1b
169 sub %o2, (1 << 5), %o2 ! D-cache line size
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 /* The I-cache does not snoop local stores so we
172 * better flush that too when necessary.
173 */
174 brnz,pt %o1, __flush_icache_page
175 sllx %o0, 11, %o0
176 retl
177 nop
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179#endif /* DCACHE_ALIASING_POSSIBLE */
180
David S. Millerc5bd50a2005-09-26 16:06:03 -0700181 .previous
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 .align 32
184__prefill_dtlb:
185 rdpr %pstate, %g7
186 wrpr %g7, PSTATE_IE, %pstate
187 mov TLB_TAG_ACCESS, %g1
188 stxa %o5, [%g1] ASI_DMMU
189 stxa %o2, [%g0] ASI_DTLB_DATA_IN
190 flush %g6
191 retl
192 wrpr %g7, %pstate
193__prefill_itlb:
194 rdpr %pstate, %g7
195 wrpr %g7, PSTATE_IE, %pstate
196 mov TLB_TAG_ACCESS, %g1
197 stxa %o5, [%g1] ASI_IMMU
198 stxa %o2, [%g0] ASI_ITLB_DATA_IN
199 flush %g6
200 retl
201 wrpr %g7, %pstate
202
203 .globl __update_mmu_cache
204__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
205 srlx %o1, PAGE_SHIFT, %o1
206 andcc %o3, FAULT_CODE_DTLB, %g0
207 sllx %o1, PAGE_SHIFT, %o5
208 bne,pt %xcc, __prefill_dtlb
209 or %o5, %o0, %o5
210 ba,a,pt %xcc, __prefill_itlb
211
David S. Miller2ef27772005-08-30 20:21:34 -0700212 /* Cheetah specific versions, patched at boot time. */
213__cheetah_flush_tlb_mm: /* 18 insns */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 rdpr %pstate, %g7
215 andn %g7, PSTATE_IE, %g2
216 wrpr %g2, 0x0, %pstate
217 wrpr %g0, 1, %tl
218 mov PRIMARY_CONTEXT, %o2
219 mov 0x40, %g3
220 ldxa [%o2] ASI_DMMU, %g2
David S. Miller2ef27772005-08-30 20:21:34 -0700221 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
222 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
223 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 stxa %o0, [%o2] ASI_DMMU
225 stxa %g0, [%g3] ASI_DMMU_DEMAP
226 stxa %g0, [%g3] ASI_IMMU_DEMAP
227 stxa %g2, [%o2] ASI_DMMU
228 flush %g6
229 wrpr %g0, 0, %tl
230 retl
231 wrpr %g7, 0x0, %pstate
232
David S. Miller2ef27772005-08-30 20:21:34 -0700233__cheetah_flush_tlb_pending: /* 26 insns */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
235 rdpr %pstate, %g7
236 sllx %o1, 3, %o1
237 andn %g7, PSTATE_IE, %g2
238 wrpr %g2, 0x0, %pstate
239 wrpr %g0, 1, %tl
240 mov PRIMARY_CONTEXT, %o4
241 ldxa [%o4] ASI_DMMU, %g2
David S. Miller2ef27772005-08-30 20:21:34 -0700242 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
243 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
244 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 stxa %o0, [%o4] ASI_DMMU
2461: sub %o1, (1 << 3), %o1
247 ldx [%o2 + %o1], %o3
248 andcc %o3, 1, %g0
249 be,pn %icc, 2f
250 andn %o3, 1, %o3
251 stxa %g0, [%o3] ASI_IMMU_DEMAP
2522: stxa %g0, [%o3] ASI_DMMU_DEMAP
David S. Millerb445e262005-06-27 15:42:04 -0700253 membar #Sync
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 brnz,pt %o1, 1b
David S. Millerb445e262005-06-27 15:42:04 -0700255 nop
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 stxa %g2, [%o4] ASI_DMMU
257 flush %g6
258 wrpr %g0, 0, %tl
259 retl
260 wrpr %g7, 0x0, %pstate
261
262#ifdef DCACHE_ALIASING_POSSIBLE
David S. Millerc5bd50a2005-09-26 16:06:03 -0700263__cheetah_flush_dcache_page: /* 11 insns */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 sethi %uhi(PAGE_OFFSET), %g1
265 sllx %g1, 32, %g1
266 sub %o0, %g1, %o0
267 sethi %hi(PAGE_SIZE), %o4
2681: subcc %o4, (1 << 5), %o4
269 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
270 membar #Sync
271 bne,pt %icc, 1b
272 nop
273 retl /* I-cache flush never needed on Cheetah, see callers. */
274 nop
275#endif /* DCACHE_ALIASING_POSSIBLE */
276
277cheetah_patch_one:
2781: lduw [%o1], %g1
279 stw %g1, [%o0]
280 flush %o0
281 subcc %o2, 1, %o2
282 add %o1, 4, %o1
283 bne,pt %icc, 1b
284 add %o0, 4, %o0
285 retl
286 nop
287
288 .globl cheetah_patch_cachetlbops
289cheetah_patch_cachetlbops:
290 save %sp, -128, %sp
291
292 sethi %hi(__flush_tlb_mm), %o0
293 or %o0, %lo(__flush_tlb_mm), %o0
294 sethi %hi(__cheetah_flush_tlb_mm), %o1
295 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
296 call cheetah_patch_one
David S. Miller2ef27772005-08-30 20:21:34 -0700297 mov 18, %o2
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
299 sethi %hi(__flush_tlb_pending), %o0
300 or %o0, %lo(__flush_tlb_pending), %o0
301 sethi %hi(__cheetah_flush_tlb_pending), %o1
302 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
303 call cheetah_patch_one
David S. Miller2ef27772005-08-30 20:21:34 -0700304 mov 26, %o2
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306#ifdef DCACHE_ALIASING_POSSIBLE
307 sethi %hi(__flush_dcache_page), %o0
308 or %o0, %lo(__flush_dcache_page), %o0
David S. Millerc5bd50a2005-09-26 16:06:03 -0700309 sethi %hi(__cheetah_flush_dcache_page), %o1
310 or %o1, %lo(__cheetah_flush_dcache_page), %o1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 call cheetah_patch_one
312 mov 11, %o2
313#endif /* DCACHE_ALIASING_POSSIBLE */
314
315 ret
316 restore
317
318#ifdef CONFIG_SMP
319 /* These are all called by the slaves of a cross call, at
320 * trap level 1, with interrupts fully disabled.
321 *
322 * Register usage:
323 * %g5 mm->context (all tlb flushes)
324 * %g1 address arg 1 (tlb page and range flushes)
325 * %g7 address arg 2 (tlb range flush only)
326 *
327 * %g6 ivector table, don't touch
328 * %g2 scratch 1
329 * %g3 scratch 2
330 * %g4 scratch 3
331 *
332 * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
333 */
334 .align 32
335 .globl xcall_flush_tlb_mm
336xcall_flush_tlb_mm:
337 mov PRIMARY_CONTEXT, %g2
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 ldxa [%g2] ASI_DMMU, %g3
David S. Miller2ef27772005-08-30 20:21:34 -0700339 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
340 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
341 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 stxa %g5, [%g2] ASI_DMMU
David S. Miller2ef27772005-08-30 20:21:34 -0700343 mov 0x40, %g4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 stxa %g0, [%g4] ASI_DMMU_DEMAP
345 stxa %g0, [%g4] ASI_IMMU_DEMAP
346 stxa %g3, [%g2] ASI_DMMU
347 retry
348
349 .globl xcall_flush_tlb_pending
350xcall_flush_tlb_pending:
351 /* %g5=context, %g1=nr, %g7=vaddrs[] */
352 sllx %g1, 3, %g1
353 mov PRIMARY_CONTEXT, %g4
354 ldxa [%g4] ASI_DMMU, %g2
David S. Miller2ef27772005-08-30 20:21:34 -0700355 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
356 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
357 or %g5, %g4, %g5
358 mov PRIMARY_CONTEXT, %g4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 stxa %g5, [%g4] ASI_DMMU
3601: sub %g1, (1 << 3), %g1
361 ldx [%g7 + %g1], %g5
362 andcc %g5, 0x1, %g0
363 be,pn %icc, 2f
364
365 andn %g5, 0x1, %g5
366 stxa %g0, [%g5] ASI_IMMU_DEMAP
3672: stxa %g0, [%g5] ASI_DMMU_DEMAP
368 membar #Sync
369 brnz,pt %g1, 1b
370 nop
371 stxa %g2, [%g4] ASI_DMMU
372 retry
373
374 .globl xcall_flush_tlb_kernel_range
375xcall_flush_tlb_kernel_range:
376 sethi %hi(PAGE_SIZE - 1), %g2
377 or %g2, %lo(PAGE_SIZE - 1), %g2
378 andn %g1, %g2, %g1
379 andn %g7, %g2, %g7
380 sub %g7, %g1, %g3
381 add %g2, 1, %g2
382 sub %g3, %g2, %g3
383 or %g1, 0x20, %g1 ! Nucleus
3841: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
385 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
386 membar #Sync
387 brnz,pt %g3, 1b
388 sub %g3, %g2, %g3
389 retry
390 nop
391 nop
392
393 /* This runs in a very controlled environment, so we do
394 * not need to worry about BH races etc.
395 */
396 .globl xcall_sync_tick
397xcall_sync_tick:
398 rdpr %pstate, %g2
399 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
400 rdpr %pil, %g2
401 wrpr %g0, 15, %pil
402 sethi %hi(109f), %g7
403 b,pt %xcc, etrap_irq
404109: or %g7, %lo(109b), %g7
405 call smp_synchronize_tick_client
406 nop
407 clr %l6
408 b rtrap_xcall
409 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
410
411 /* NOTE: This is SPECIAL!! We do etrap/rtrap however
412 * we choose to deal with the "BH's run with
413 * %pil==15" problem (described in asm/pil.h)
414 * by just invoking rtrap directly past where
415 * BH's are checked for.
416 *
417 * We do it like this because we do not want %pil==15
418 * lockups to prevent regs being reported.
419 */
420 .globl xcall_report_regs
421xcall_report_regs:
422 rdpr %pstate, %g2
423 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
424 rdpr %pil, %g2
425 wrpr %g0, 15, %pil
426 sethi %hi(109f), %g7
427 b,pt %xcc, etrap_irq
428109: or %g7, %lo(109b), %g7
429 call __show_regs
430 add %sp, PTREGS_OFF, %o0
431 clr %l6
432 /* Has to be a non-v9 branch due to the large distance. */
433 b rtrap_xcall
434 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
435
436#ifdef DCACHE_ALIASING_POSSIBLE
437 .align 32
438 .globl xcall_flush_dcache_page_cheetah
439xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
440 sethi %hi(PAGE_SIZE), %g3
4411: subcc %g3, (1 << 5), %g3
442 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
443 membar #Sync
444 bne,pt %icc, 1b
445 nop
446 retry
447 nop
448#endif /* DCACHE_ALIASING_POSSIBLE */
449
450 .globl xcall_flush_dcache_page_spitfire
451xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
452 %g7 == kernel page virtual address
453 %g5 == (page->mapping != NULL) */
454#ifdef DCACHE_ALIASING_POSSIBLE
455 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
456 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
457 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
4581: ldxa [%g3] ASI_DCACHE_TAG, %g2
459 andcc %g2, 0x3, %g0
460 be,pn %xcc, 2f
461 andn %g2, 0x3, %g2
462 cmp %g2, %g1
463
464 bne,pt %xcc, 2f
465 nop
466 stxa %g0, [%g3] ASI_DCACHE_TAG
467 membar #Sync
4682: cmp %g3, 0
469 bne,pt %xcc, 1b
470 sub %g3, (1 << 5), %g3
471
472 brz,pn %g5, 2f
473#endif /* DCACHE_ALIASING_POSSIBLE */
474 sethi %hi(PAGE_SIZE), %g3
475
4761: flush %g7
477 subcc %g3, (1 << 5), %g3
478 bne,pt %icc, 1b
479 add %g7, (1 << 5), %g7
480
4812: retry
482 nop
483 nop
484
485 .globl xcall_promstop
486xcall_promstop:
487 rdpr %pstate, %g2
488 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
489 rdpr %pil, %g2
490 wrpr %g0, 15, %pil
491 sethi %hi(109f), %g7
492 b,pt %xcc, etrap_irq
493109: or %g7, %lo(109b), %g7
494 flushw
495 call prom_stopself
496 nop
497 /* We should not return, just spin if we do... */
4981: b,a,pt %xcc, 1b
499 nop
500
501 .data
502
503errata32_hwbug:
504 .xword 0
505
506 .text
507
508 /* These two are not performance critical... */
509 .globl xcall_flush_tlb_all_spitfire
510xcall_flush_tlb_all_spitfire:
511 /* Spitfire Errata #32 workaround. */
512 sethi %hi(errata32_hwbug), %g4
513 stx %g0, [%g4 + %lo(errata32_hwbug)]
514
515 clr %g2
516 clr %g3
5171: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4
518 and %g4, _PAGE_L, %g5
519 brnz,pn %g5, 2f
520 mov TLB_TAG_ACCESS, %g7
521
522 stxa %g0, [%g7] ASI_DMMU
523 membar #Sync
524 stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS
525 membar #Sync
526
527 /* Spitfire Errata #32 workaround. */
528 sethi %hi(errata32_hwbug), %g4
529 stx %g0, [%g4 + %lo(errata32_hwbug)]
530
5312: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4
532 and %g4, _PAGE_L, %g5
533 brnz,pn %g5, 2f
534 mov TLB_TAG_ACCESS, %g7
535
536 stxa %g0, [%g7] ASI_IMMU
537 membar #Sync
538 stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS
539 membar #Sync
540
541 /* Spitfire Errata #32 workaround. */
542 sethi %hi(errata32_hwbug), %g4
543 stx %g0, [%g4 + %lo(errata32_hwbug)]
544
5452: add %g2, 1, %g2
546 cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT
547 ble,pt %icc, 1b
548 sll %g2, 3, %g3
549 flush %g6
550 retry
551
552 .globl xcall_flush_tlb_all_cheetah
553xcall_flush_tlb_all_cheetah:
554 mov 0x80, %g2
555 stxa %g0, [%g2] ASI_DMMU_DEMAP
556 stxa %g0, [%g2] ASI_IMMU_DEMAP
557 retry
558
559 /* These just get rescheduled to PIL vectors. */
560 .globl xcall_call_function
561xcall_call_function:
562 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
563 retry
564
565 .globl xcall_receive_signal
566xcall_receive_signal:
567 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
568 retry
569
570 .globl xcall_capture
571xcall_capture:
572 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
573 retry
574
575#endif /* CONFIG_SMP */