blob: a4edff4c3be3a9a661a5e80d9d50ee477db8bd23 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: entry.S,v 1.170 2001/11/13 00:57:05 davem Exp $
2 * arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/errno.h>
12
13#include <asm/head.h>
14#include <asm/asi.h>
15#include <asm/smp.h>
16#include <asm/kgdb.h>
17#include <asm/contregs.h>
18#include <asm/ptrace.h>
Sam Ravnborg47003492005-09-09 20:35:55 +020019#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/psr.h>
21#include <asm/vaddrs.h>
22#include <asm/memreg.h>
23#include <asm/page.h>
24#ifdef CONFIG_SUN4
25#include <asm/pgtsun4.h>
26#else
27#include <asm/pgtsun4c.h>
28#endif
29#include <asm/winmacro.h>
30#include <asm/signal.h>
31#include <asm/obio.h>
32#include <asm/mxcc.h>
33#include <asm/thread_info.h>
34#include <asm/param.h>
35
36#include <asm/asmmacro.h>
37
38#define curptr g6
39
David S. Miller1b9a4282006-02-07 18:11:24 -080040#define NR_SYSCALLS 300 /* Each OS is different... */
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* These are just handy. */
43#define _SV save %sp, -STACKFRAME_SZ, %sp
44#define _RS restore
45
46#define FLUSH_ALL_KERNEL_WINDOWS \
47 _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
48 _RS; _RS; _RS; _RS; _RS; _RS; _RS;
49
50/* First, KGDB low level things. This is a rewrite
51 * of the routines found in the sparc-stub.c asm() statement
52 * from the gdb distribution. This is also dual-purpose
53 * as a software trap for userlevel programs.
54 */
55 .data
56 .align 4
57
58in_trap_handler:
59 .word 0
60
61 .text
62 .align 4
63
64#if 0 /* kgdb is dropped from 2.5.33 */
65! This function is called when any SPARC trap (except window overflow or
66! underflow) occurs. It makes sure that the invalid register window is still
67! available before jumping into C code. It will also restore the world if you
68! return from handle_exception.
69
70 .globl trap_low
71trap_low:
72 rd %wim, %l3
73 SAVE_ALL
74
75 sethi %hi(in_trap_handler), %l4
76 ld [%lo(in_trap_handler) + %l4], %l5
77 inc %l5
78 st %l5, [%lo(in_trap_handler) + %l4]
79
80 /* Make sure kgdb sees the same state we just saved. */
81 LOAD_PT_GLOBALS(sp)
82 LOAD_PT_INS(sp)
83 ld [%sp + STACKFRAME_SZ + PT_Y], %l4
84 ld [%sp + STACKFRAME_SZ + PT_WIM], %l3
85 ld [%sp + STACKFRAME_SZ + PT_PSR], %l0
86 ld [%sp + STACKFRAME_SZ + PT_PC], %l1
87 ld [%sp + STACKFRAME_SZ + PT_NPC], %l2
88 rd %tbr, %l5 /* Never changes... */
89
90 /* Make kgdb exception frame. */
91 sub %sp,(16+1+6+1+72)*4,%sp ! Make room for input & locals
92 ! + hidden arg + arg spill
93 ! + doubleword alignment
94 ! + registers[72] local var
95 SAVE_KGDB_GLOBALS(sp)
96 SAVE_KGDB_INS(sp)
97 SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
98
99 /* We are increasing PIL, so two writes. */
100 or %l0, PSR_PIL, %l0
101 wr %l0, 0, %psr
102 WRITE_PAUSE
103 wr %l0, PSR_ET, %psr
104 WRITE_PAUSE
105
106 call handle_exception
107 add %sp, STACKFRAME_SZ, %o0 ! Pass address of registers
108
109 /* Load new kgdb register set. */
110 LOAD_KGDB_GLOBALS(sp)
111 LOAD_KGDB_INS(sp)
112 LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
113 wr %l4, 0x0, %y
114
115 sethi %hi(in_trap_handler), %l4
116 ld [%lo(in_trap_handler) + %l4], %l5
117 dec %l5
118 st %l5, [%lo(in_trap_handler) + %l4]
119
120 add %sp,(16+1+6+1+72)*4,%sp ! Undo the kgdb trap frame.
121
122 /* Now take what kgdb did and place it into the pt_regs
123 * frame which SparcLinux RESTORE_ALL understands.,
124 */
125 STORE_PT_INS(sp)
126 STORE_PT_GLOBALS(sp)
127 STORE_PT_YREG(sp, g2)
128 STORE_PT_PRIV(sp, l0, l1, l2)
129
130 RESTORE_ALL
131#endif
132
133#ifdef CONFIG_BLK_DEV_FD
134 .text
135 .align 4
136 .globl floppy_hardint
137floppy_hardint:
138 /*
139 * This code cannot touch registers %l0 %l1 and %l2
140 * because SAVE_ALL depends on their values. It depends
141 * on %l3 also, but we regenerate it before a call.
142 * Other registers are:
143 * %l3 -- base address of fdc registers
144 * %l4 -- pdma_vaddr
145 * %l5 -- scratch for ld/st address
146 * %l6 -- pdma_size
147 * %l7 -- scratch [floppy byte, ld/st address, aux. data]
148 */
149
150 /* Do we have work to do? */
151 sethi %hi(doing_pdma), %l7
152 ld [%l7 + %lo(doing_pdma)], %l7
153 cmp %l7, 0
154 be floppy_dosoftint
155 nop
156
157 /* Load fdc register base */
158 sethi %hi(fdc_status), %l3
159 ld [%l3 + %lo(fdc_status)], %l3
160
161 /* Setup register addresses */
162 sethi %hi(pdma_vaddr), %l5 ! transfer buffer
163 ld [%l5 + %lo(pdma_vaddr)], %l4
164 sethi %hi(pdma_size), %l5 ! bytes to go
165 ld [%l5 + %lo(pdma_size)], %l6
166next_byte:
167 ldub [%l3], %l7
168
169 andcc %l7, 0x80, %g0 ! Does fifo still have data
170 bz floppy_fifo_emptied ! fifo has been emptied...
171 andcc %l7, 0x20, %g0 ! in non-dma mode still?
172 bz floppy_overrun ! nope, overrun
173 andcc %l7, 0x40, %g0 ! 0=write 1=read
174 bz floppy_write
175 sub %l6, 0x1, %l6
176
177 /* Ok, actually read this byte */
178 ldub [%l3 + 1], %l7
179 orcc %g0, %l6, %g0
180 stb %l7, [%l4]
181 bne next_byte
182 add %l4, 0x1, %l4
183
184 b floppy_tdone
185 nop
186
187floppy_write:
188 /* Ok, actually write this byte */
189 ldub [%l4], %l7
190 orcc %g0, %l6, %g0
191 stb %l7, [%l3 + 1]
192 bne next_byte
193 add %l4, 0x1, %l4
194
195 /* fall through... */
196floppy_tdone:
197 sethi %hi(pdma_vaddr), %l5
198 st %l4, [%l5 + %lo(pdma_vaddr)]
199 sethi %hi(pdma_size), %l5
200 st %l6, [%l5 + %lo(pdma_size)]
201 /* Flip terminal count pin */
202 set auxio_register, %l7
203 ld [%l7], %l7
204
205 set sparc_cpu_model, %l5
206 ld [%l5], %l5
207 subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */
208 be 1f
209 ldub [%l7], %l5
210
211 or %l5, 0xc2, %l5
212 stb %l5, [%l7]
213 andn %l5, 0x02, %l5
214 b 2f
215 nop
216
2171:
218 or %l5, 0xf4, %l5
219 stb %l5, [%l7]
220 andn %l5, 0x04, %l5
221
2222:
223 /* Kill some time so the bits set */
224 WRITE_PAUSE
225 WRITE_PAUSE
226
227 stb %l5, [%l7]
228
229 /* Prevent recursion */
230 sethi %hi(doing_pdma), %l7
231 b floppy_dosoftint
232 st %g0, [%l7 + %lo(doing_pdma)]
233
234 /* We emptied the FIFO, but we haven't read everything
235 * as of yet. Store the current transfer address and
236 * bytes left to read so we can continue when the next
237 * fast IRQ comes in.
238 */
239floppy_fifo_emptied:
240 sethi %hi(pdma_vaddr), %l5
241 st %l4, [%l5 + %lo(pdma_vaddr)]
242 sethi %hi(pdma_size), %l7
243 st %l6, [%l7 + %lo(pdma_size)]
244
245 /* Restore condition codes */
246 wr %l0, 0x0, %psr
247 WRITE_PAUSE
248
249 jmp %l1
250 rett %l2
251
252floppy_overrun:
253 sethi %hi(pdma_vaddr), %l5
254 st %l4, [%l5 + %lo(pdma_vaddr)]
255 sethi %hi(pdma_size), %l5
256 st %l6, [%l5 + %lo(pdma_size)]
257 /* Prevent recursion */
258 sethi %hi(doing_pdma), %l7
259 st %g0, [%l7 + %lo(doing_pdma)]
260
261 /* fall through... */
262floppy_dosoftint:
263 rd %wim, %l3
264 SAVE_ALL
265
266 /* Set all IRQs off. */
267 or %l0, PSR_PIL, %l4
268 wr %l4, 0x0, %psr
269 WRITE_PAUSE
270 wr %l4, PSR_ET, %psr
271 WRITE_PAUSE
272
273 mov 11, %o0 ! floppy irq level (unused anyway)
274 mov %g0, %o1 ! devid is not used in fast interrupts
275 call sparc_floppy_irq
276 add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
277
278 RESTORE_ALL
279
280#endif /* (CONFIG_BLK_DEV_FD) */
281
282 /* Bad trap handler */
283 .globl bad_trap_handler
284bad_trap_handler:
285 SAVE_ALL
286
287 wr %l0, PSR_ET, %psr
288 WRITE_PAUSE
289
290 add %sp, STACKFRAME_SZ, %o0 ! pt_regs
291 call do_hw_interrupt
292 mov %l7, %o1 ! trap number
293
294 RESTORE_ALL
295
296/* For now all IRQ's not registered get sent here. handler_irq() will
297 * see if a routine is registered to handle this interrupt and if not
298 * it will say so on the console.
299 */
300
301 .align 4
302 .globl real_irq_entry, patch_handler_irq
303real_irq_entry:
304 SAVE_ALL
305
306#ifdef CONFIG_SMP
307 .globl patchme_maybe_smp_msg
308
309 cmp %l7, 12
310patchme_maybe_smp_msg:
311 bgu maybe_smp4m_msg
312 nop
313#endif
314
315real_irq_continue:
316 or %l0, PSR_PIL, %g2
317 wr %g2, 0x0, %psr
318 WRITE_PAUSE
319 wr %g2, PSR_ET, %psr
320 WRITE_PAUSE
321 mov %l7, %o0 ! irq level
322patch_handler_irq:
323 call handler_irq
324 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
325 or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq
326 wr %g2, PSR_ET, %psr ! keep ET up
327 WRITE_PAUSE
328
329 RESTORE_ALL
330
331#ifdef CONFIG_SMP
332 /* SMP per-cpu ticker interrupts are handled specially. */
333smp4m_ticker:
334 bne real_irq_continue+4
335 or %l0, PSR_PIL, %g2
336 wr %g2, 0x0, %psr
337 WRITE_PAUSE
338 wr %g2, PSR_ET, %psr
339 WRITE_PAUSE
340 call smp4m_percpu_timer_interrupt
341 add %sp, STACKFRAME_SZ, %o0
342 wr %l0, PSR_ET, %psr
343 WRITE_PAUSE
344 RESTORE_ALL
345
346 /* Here is where we check for possible SMP IPI passed to us
347 * on some level other than 15 which is the NMI and only used
348 * for cross calls. That has a separate entry point below.
349 */
350maybe_smp4m_msg:
351 GET_PROCESSOR4M_ID(o3)
352 set sun4m_interrupts, %l5
353 ld [%l5], %o5
354 sethi %hi(0x40000000), %o2
355 sll %o3, 12, %o3
356 ld [%o5 + %o3], %o1
357 andcc %o1, %o2, %g0
358 be,a smp4m_ticker
359 cmp %l7, 14
360 st %o2, [%o5 + 0x4]
361 WRITE_PAUSE
362 ld [%o5], %g0
363 WRITE_PAUSE
364 or %l0, PSR_PIL, %l4
365 wr %l4, 0x0, %psr
366 WRITE_PAUSE
367 wr %l4, PSR_ET, %psr
368 WRITE_PAUSE
369 call smp_reschedule_irq
370 nop
371
372 RESTORE_ALL
373
374 .align 4
375 .globl linux_trap_ipi15_sun4m
376linux_trap_ipi15_sun4m:
377 SAVE_ALL
378 sethi %hi(0x80000000), %o2
379 GET_PROCESSOR4M_ID(o0)
380 set sun4m_interrupts, %l5
381 ld [%l5], %o5
382 sll %o0, 12, %o0
383 add %o5, %o0, %o5
384 ld [%o5], %o3
385 andcc %o3, %o2, %g0
386 be 1f ! Must be an NMI async memory error
387 st %o2, [%o5 + 4]
388 WRITE_PAUSE
389 ld [%o5], %g0
390 WRITE_PAUSE
391 or %l0, PSR_PIL, %l4
392 wr %l4, 0x0, %psr
393 WRITE_PAUSE
394 wr %l4, PSR_ET, %psr
395 WRITE_PAUSE
396 call smp4m_cross_call_irq
397 nop
398 b ret_trap_lockless_ipi
399 clr %l6
4001:
401 /* NMI async memory error handling. */
402 sethi %hi(0x80000000), %l4
403 sethi %hi(0x4000), %o3
404 sub %o5, %o0, %o5
405 add %o5, %o3, %l5
406 st %l4, [%l5 + 0xc]
407 WRITE_PAUSE
408 ld [%l5], %g0
409 WRITE_PAUSE
410 or %l0, PSR_PIL, %l4
411 wr %l4, 0x0, %psr
412 WRITE_PAUSE
413 wr %l4, PSR_ET, %psr
414 WRITE_PAUSE
415 call sun4m_nmi
416 nop
417 st %l4, [%l5 + 0x8]
418 WRITE_PAUSE
419 ld [%l5], %g0
420 WRITE_PAUSE
421 RESTORE_ALL
422
423 .globl smp4d_ticker
424 /* SMP per-cpu ticker interrupts are handled specially. */
425smp4d_ticker:
426 SAVE_ALL
427 or %l0, PSR_PIL, %g2
428 sethi %hi(CC_ICLR), %o0
429 sethi %hi(1 << 14), %o1
430 or %o0, %lo(CC_ICLR), %o0
431 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */
432 wr %g2, 0x0, %psr
433 WRITE_PAUSE
434 wr %g2, PSR_ET, %psr
435 WRITE_PAUSE
436 call smp4d_percpu_timer_interrupt
437 add %sp, STACKFRAME_SZ, %o0
438 wr %l0, PSR_ET, %psr
439 WRITE_PAUSE
440 RESTORE_ALL
441
442 .align 4
443 .globl linux_trap_ipi15_sun4d
444linux_trap_ipi15_sun4d:
445 SAVE_ALL
446 sethi %hi(CC_BASE), %o4
447 sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
448 or %o4, (CC_EREG - CC_BASE), %o0
449 ldda [%o0] ASI_M_MXCC, %o0
450 andcc %o0, %o2, %g0
451 bne 1f
452 sethi %hi(BB_STAT2), %o2
453 lduba [%o2] ASI_M_CTL, %o2
454 andcc %o2, BB_STAT2_MASK, %g0
455 bne 2f
456 or %o4, (CC_ICLR - CC_BASE), %o0
457 sethi %hi(1 << 15), %o1
458 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */
459 or %l0, PSR_PIL, %l4
460 wr %l4, 0x0, %psr
461 WRITE_PAUSE
462 wr %l4, PSR_ET, %psr
463 WRITE_PAUSE
464 call smp4d_cross_call_irq
465 nop
466 b ret_trap_lockless_ipi
467 clr %l6
468
4691: /* MXCC error */
4702: /* BB error */
471 /* Disable PIL 15 */
472 set CC_IMSK, %l4
473 lduha [%l4] ASI_M_MXCC, %l5
474 sethi %hi(1 << 15), %l7
475 or %l5, %l7, %l5
476 stha %l5, [%l4] ASI_M_MXCC
477 /* FIXME */
4781: b,a 1b
479
480#endif /* CONFIG_SMP */
481
482 /* This routine handles illegal instructions and privileged
483 * instruction attempts from user code.
484 */
485 .align 4
486 .globl bad_instruction
487bad_instruction:
488 sethi %hi(0xc1f80000), %l4
489 ld [%l1], %l5
490 sethi %hi(0x81d80000), %l7
491 and %l5, %l4, %l5
492 cmp %l5, %l7
493 be 1f
494 SAVE_ALL
495
496 wr %l0, PSR_ET, %psr ! re-enable traps
497 WRITE_PAUSE
498
499 add %sp, STACKFRAME_SZ, %o0
500 mov %l1, %o1
501 mov %l2, %o2
502 call do_illegal_instruction
503 mov %l0, %o3
504
505 RESTORE_ALL
506
5071: /* unimplemented flush - just skip */
508 jmpl %l2, %g0
509 rett %l2 + 4
510
511 .align 4
512 .globl priv_instruction
513priv_instruction:
514 SAVE_ALL
515
516 wr %l0, PSR_ET, %psr
517 WRITE_PAUSE
518
519 add %sp, STACKFRAME_SZ, %o0
520 mov %l1, %o1
521 mov %l2, %o2
522 call do_priv_instruction
523 mov %l0, %o3
524
525 RESTORE_ALL
526
527 /* This routine handles unaligned data accesses. */
528 .align 4
529 .globl mna_handler
530mna_handler:
531 andcc %l0, PSR_PS, %g0
532 be mna_fromuser
533 nop
534
535 SAVE_ALL
536
537 wr %l0, PSR_ET, %psr
538 WRITE_PAUSE
539
540 ld [%l1], %o1
541 call kernel_unaligned_trap
542 add %sp, STACKFRAME_SZ, %o0
543
544 RESTORE_ALL
545
546mna_fromuser:
547 SAVE_ALL
548
549 wr %l0, PSR_ET, %psr ! re-enable traps
550 WRITE_PAUSE
551
552 ld [%l1], %o1
553 call user_unaligned_trap
554 add %sp, STACKFRAME_SZ, %o0
555
556 RESTORE_ALL
557
558 /* This routine handles floating point disabled traps. */
559 .align 4
560 .globl fpd_trap_handler
561fpd_trap_handler:
562 SAVE_ALL
563
564 wr %l0, PSR_ET, %psr ! re-enable traps
565 WRITE_PAUSE
566
567 add %sp, STACKFRAME_SZ, %o0
568 mov %l1, %o1
569 mov %l2, %o2
570 call do_fpd_trap
571 mov %l0, %o3
572
573 RESTORE_ALL
574
575 /* This routine handles Floating Point Exceptions. */
576 .align 4
577 .globl fpe_trap_handler
578fpe_trap_handler:
579 set fpsave_magic, %l5
580 cmp %l1, %l5
581 be 1f
582 sethi %hi(fpsave), %l5
583 or %l5, %lo(fpsave), %l5
584 cmp %l1, %l5
585 bne 2f
586 sethi %hi(fpsave_catch2), %l5
587 or %l5, %lo(fpsave_catch2), %l5
588 wr %l0, 0x0, %psr
589 WRITE_PAUSE
590 jmp %l5
591 rett %l5 + 4
5921:
593 sethi %hi(fpsave_catch), %l5
594 or %l5, %lo(fpsave_catch), %l5
595 wr %l0, 0x0, %psr
596 WRITE_PAUSE
597 jmp %l5
598 rett %l5 + 4
599
6002:
601 SAVE_ALL
602
603 wr %l0, PSR_ET, %psr ! re-enable traps
604 WRITE_PAUSE
605
606 add %sp, STACKFRAME_SZ, %o0
607 mov %l1, %o1
608 mov %l2, %o2
609 call do_fpe_trap
610 mov %l0, %o3
611
612 RESTORE_ALL
613
614 /* This routine handles Tag Overflow Exceptions. */
615 .align 4
616 .globl do_tag_overflow
617do_tag_overflow:
618 SAVE_ALL
619
620 wr %l0, PSR_ET, %psr ! re-enable traps
621 WRITE_PAUSE
622
623 add %sp, STACKFRAME_SZ, %o0
624 mov %l1, %o1
625 mov %l2, %o2
626 call handle_tag_overflow
627 mov %l0, %o3
628
629 RESTORE_ALL
630
631 /* This routine handles Watchpoint Exceptions. */
632 .align 4
633 .globl do_watchpoint
634do_watchpoint:
635 SAVE_ALL
636
637 wr %l0, PSR_ET, %psr ! re-enable traps
638 WRITE_PAUSE
639
640 add %sp, STACKFRAME_SZ, %o0
641 mov %l1, %o1
642 mov %l2, %o2
643 call handle_watchpoint
644 mov %l0, %o3
645
646 RESTORE_ALL
647
648 /* This routine handles Register Access Exceptions. */
649 .align 4
650 .globl do_reg_access
651do_reg_access:
652 SAVE_ALL
653
654 wr %l0, PSR_ET, %psr ! re-enable traps
655 WRITE_PAUSE
656
657 add %sp, STACKFRAME_SZ, %o0
658 mov %l1, %o1
659 mov %l2, %o2
660 call handle_reg_access
661 mov %l0, %o3
662
663 RESTORE_ALL
664
665 /* This routine handles Co-Processor Disabled Exceptions. */
666 .align 4
667 .globl do_cp_disabled
668do_cp_disabled:
669 SAVE_ALL
670
671 wr %l0, PSR_ET, %psr ! re-enable traps
672 WRITE_PAUSE
673
674 add %sp, STACKFRAME_SZ, %o0
675 mov %l1, %o1
676 mov %l2, %o2
677 call handle_cp_disabled
678 mov %l0, %o3
679
680 RESTORE_ALL
681
682 /* This routine handles Co-Processor Exceptions. */
683 .align 4
684 .globl do_cp_exception
685do_cp_exception:
686 SAVE_ALL
687
688 wr %l0, PSR_ET, %psr ! re-enable traps
689 WRITE_PAUSE
690
691 add %sp, STACKFRAME_SZ, %o0
692 mov %l1, %o1
693 mov %l2, %o2
694 call handle_cp_exception
695 mov %l0, %o3
696
697 RESTORE_ALL
698
699 /* This routine handles Hardware Divide By Zero Exceptions. */
700 .align 4
701 .globl do_hw_divzero
702do_hw_divzero:
703 SAVE_ALL
704
705 wr %l0, PSR_ET, %psr ! re-enable traps
706 WRITE_PAUSE
707
708 add %sp, STACKFRAME_SZ, %o0
709 mov %l1, %o1
710 mov %l2, %o2
711 call handle_hw_divzero
712 mov %l0, %o3
713
714 RESTORE_ALL
715
716 .align 4
717 .globl do_flush_windows
718do_flush_windows:
719 SAVE_ALL
720
721 wr %l0, PSR_ET, %psr
722 WRITE_PAUSE
723
724 andcc %l0, PSR_PS, %g0
725 bne dfw_kernel
726 nop
727
728 call flush_user_windows
729 nop
730
731 /* Advance over the trap instruction. */
732 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1
733 add %l1, 0x4, %l2
734 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
735 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
736
737 RESTORE_ALL
738
739 .globl flush_patch_one
740
741 /* We get these for debugging routines using __builtin_return_address() */
742dfw_kernel:
743flush_patch_one:
744 FLUSH_ALL_KERNEL_WINDOWS
745
746 /* Advance over the trap instruction. */
747 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1
748 add %l1, 0x4, %l2
749 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
750 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
751
752 RESTORE_ALL
753
754 /* The getcc software trap. The user wants the condition codes from
755 * the %psr in register %g1.
756 */
757
758 .align 4
759 .globl getcc_trap_handler
760getcc_trap_handler:
761 srl %l0, 20, %g1 ! give user
762 and %g1, 0xf, %g1 ! only ICC bits in %psr
763 jmp %l2 ! advance over trap instruction
764 rett %l2 + 0x4 ! like this...
765
766 /* The setcc software trap. The user has condition codes in %g1
767 * that it would like placed in the %psr. Be careful not to flip
768 * any unintentional bits!
769 */
770
771 .align 4
772 .globl setcc_trap_handler
773setcc_trap_handler:
774 sll %g1, 0x14, %l4
775 set PSR_ICC, %l5
776 andn %l0, %l5, %l0 ! clear ICC bits in %psr
777 and %l4, %l5, %l4 ! clear non-ICC bits in user value
778 or %l4, %l0, %l4 ! or them in... mix mix mix
779
780 wr %l4, 0x0, %psr ! set new %psr
781 WRITE_PAUSE ! TI scumbags...
782
783 jmp %l2 ! advance over trap instruction
784 rett %l2 + 0x4 ! like this...
785
786 .align 4
787 .globl linux_trap_nmi_sun4c
788linux_trap_nmi_sun4c:
789 SAVE_ALL
790
791 /* Ugh, we need to clear the IRQ line. This is now
792 * a very sun4c specific trap handler...
793 */
794 sethi %hi(interrupt_enable), %l5
795 ld [%l5 + %lo(interrupt_enable)], %l5
796 ldub [%l5], %l6
797 andn %l6, INTS_ENAB, %l6
798 stb %l6, [%l5]
799
800 /* Now it is safe to re-enable traps without recursion. */
801 or %l0, PSR_PIL, %l0
802 wr %l0, PSR_ET, %psr
803 WRITE_PAUSE
804
805 /* Now call the c-code with the pt_regs frame ptr and the
806 * memory error registers as arguments. The ordering chosen
807 * here is due to unlatching semantics.
808 */
809 sethi %hi(AC_SYNC_ERR), %o0
810 add %o0, 0x4, %o0
811 lda [%o0] ASI_CONTROL, %o2 ! sync vaddr
812 sub %o0, 0x4, %o0
813 lda [%o0] ASI_CONTROL, %o1 ! sync error
814 add %o0, 0xc, %o0
815 lda [%o0] ASI_CONTROL, %o4 ! async vaddr
816 sub %o0, 0x4, %o0
817 lda [%o0] ASI_CONTROL, %o3 ! async error
818 call sparc_lvl15_nmi
819 add %sp, STACKFRAME_SZ, %o0
820
821 RESTORE_ALL
822
823 .align 4
824 .globl invalid_segment_patch1_ff
825 .globl invalid_segment_patch2_ff
826invalid_segment_patch1_ff: cmp %l4, 0xff
827invalid_segment_patch2_ff: mov 0xff, %l3
828
829 .align 4
830 .globl invalid_segment_patch1_1ff
831 .globl invalid_segment_patch2_1ff
832invalid_segment_patch1_1ff: cmp %l4, 0x1ff
833invalid_segment_patch2_1ff: mov 0x1ff, %l3
834
835 .align 4
836 .globl num_context_patch1_16, num_context_patch2_16
837num_context_patch1_16: mov 0x10, %l7
838num_context_patch2_16: mov 0x10, %l7
839
840 .align 4
841 .globl vac_linesize_patch_32
842vac_linesize_patch_32: subcc %l7, 32, %l7
843
844 .align 4
845 .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on
846
847/*
848 * Ugly, but we cant use hardware flushing on the sun4 and we'd require
849 * two instructions (Anton)
850 */
851#ifdef CONFIG_SUN4
852vac_hwflush_patch1_on: nop
853#else
854vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7
855#endif
856
857vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG
858
859 .globl invalid_segment_patch1, invalid_segment_patch2
860 .globl num_context_patch1
861 .globl vac_linesize_patch, vac_hwflush_patch1
862 .globl vac_hwflush_patch2
863
864 .align 4
865 .globl sun4c_fault
866
867! %l0 = %psr
868! %l1 = %pc
869! %l2 = %npc
870! %l3 = %wim
871! %l7 = 1 for textfault
872! We want error in %l5, vaddr in %l6
873sun4c_fault:
874#ifdef CONFIG_SUN4
875 sethi %hi(sun4c_memerr_reg), %l4
876 ld [%l4+%lo(sun4c_memerr_reg)], %l4 ! memerr ctrl reg addr
877 ld [%l4], %l6 ! memerr ctrl reg
878 ld [%l4 + 4], %l5 ! memerr vaddr reg
879 andcc %l6, 0x80, %g0 ! check for error type
880 st %g0, [%l4 + 4] ! clear the error
881 be 0f ! normal error
882 sethi %hi(AC_BUS_ERROR), %l4 ! bus err reg addr
883
884 call prom_halt ! something weird happened
885 ! what exactly did happen?
886 ! what should we do here?
887
8880: or %l4, %lo(AC_BUS_ERROR), %l4 ! bus err reg addr
889 lduba [%l4] ASI_CONTROL, %l6 ! bus err reg
890
891 cmp %l7, 1 ! text fault?
892 be 1f ! yes
893 nop
894
895 ld [%l1], %l4 ! load instruction that caused fault
896 srl %l4, 21, %l4
897 andcc %l4, 1, %g0 ! store instruction?
898
899 be 1f ! no
900 sethi %hi(SUN4C_SYNC_BADWRITE), %l4 ! yep
901 ! %lo(SUN4C_SYNC_BADWRITE) = 0
902 or %l4, %l6, %l6 ! set write bit to emulate sun4c
9031:
904#else
905 sethi %hi(AC_SYNC_ERR), %l4
906 add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6
907 lda [%l6] ASI_CONTROL, %l5 ! Address
908 lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit
909#endif
910
911 andn %l5, 0xfff, %l5 ! Encode all info into l7
912 srl %l6, 14, %l4
913
914 and %l4, 2, %l4
915 or %l5, %l4, %l4
916
917 or %l4, %l7, %l7 ! l7 = [addr,write,txtfault]
918
919 andcc %l0, PSR_PS, %g0
920 be sun4c_fault_fromuser
921 andcc %l7, 1, %g0 ! Text fault?
922
923 be 1f
924 sethi %hi(KERNBASE), %l4
925
926 mov %l1, %l5 ! PC
927
9281:
929 cmp %l5, %l4
930 blu sun4c_fault_fromuser
931 sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
932
933 /* If the kernel references a bum kernel pointer, or a pte which
934 * points to a non existant page in ram, we will run this code
935 * _forever_ and lock up the machine!!!!! So we must check for
936 * this condition, the AC_SYNC_ERR bits are what we must examine.
937 * Also a parity error would make this happen as well. So we just
938 * check that we are in fact servicing a tlb miss and not some
939 * other type of fault for the kernel.
940 */
941 andcc %l6, 0x80, %g0
942 be sun4c_fault_fromuser
943 and %l5, %l4, %l5
944
945 /* Test for NULL pte_t * in vmalloc area. */
946 sethi %hi(VMALLOC_START), %l4
947 cmp %l5, %l4
948 blu,a invalid_segment_patch1
949 lduXa [%l5] ASI_SEGMAP, %l4
950
951 sethi %hi(swapper_pg_dir), %l4
952 srl %l5, SUN4C_PGDIR_SHIFT, %l6
953 or %l4, %lo(swapper_pg_dir), %l4
954 sll %l6, 2, %l6
955 ld [%l4 + %l6], %l4
956#ifdef CONFIG_SUN4
957 sethi %hi(PAGE_MASK), %l6
958 andcc %l4, %l6, %g0
959#else
960 andcc %l4, PAGE_MASK, %g0
961#endif
962 be sun4c_fault_fromuser
963 lduXa [%l5] ASI_SEGMAP, %l4
964
965invalid_segment_patch1:
966 cmp %l4, 0x7f
967 bne 1f
968 sethi %hi(sun4c_kfree_ring), %l4
969 or %l4, %lo(sun4c_kfree_ring), %l4
970 ld [%l4 + 0x18], %l3
971 deccc %l3 ! do we have a free entry?
972 bcs,a 2f ! no, unmap one.
973 sethi %hi(sun4c_kernel_ring), %l4
974
975 st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries--
976
977 ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next
978 st %l5, [%l6 + 0x08] ! entry->vaddr = address
979
980 ld [%l6 + 0x00], %l3 ! next = entry->next
981 ld [%l6 + 0x04], %l7 ! entry->prev
982
983 st %l7, [%l3 + 0x04] ! next->prev = entry->prev
984 st %l3, [%l7 + 0x00] ! entry->prev->next = next
985
986 sethi %hi(sun4c_kernel_ring), %l4
987 or %l4, %lo(sun4c_kernel_ring), %l4
988 ! head = &sun4c_kernel_ring.ringhd
989
990 ld [%l4 + 0x00], %l7 ! head->next
991
992 st %l4, [%l6 + 0x04] ! entry->prev = head
993 st %l7, [%l6 + 0x00] ! entry->next = head->next
994 st %l6, [%l7 + 0x04] ! head->next->prev = entry
995
996 st %l6, [%l4 + 0x00] ! head->next = entry
997
998 ld [%l4 + 0x18], %l3
999 inc %l3 ! sun4c_kernel_ring.num_entries++
1000 st %l3, [%l4 + 0x18]
1001 b 4f
1002 ld [%l6 + 0x08], %l5
1003
10042:
1005 or %l4, %lo(sun4c_kernel_ring), %l4
1006 ! head = &sun4c_kernel_ring.ringhd
1007
1008 ld [%l4 + 0x04], %l6 ! entry = head->prev
1009
1010 ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr
1011
1012 ! Flush segment from the cache.
1013#ifdef CONFIG_SUN4
1014 sethi %hi((128 * 1024)), %l7
1015#else
1016 sethi %hi((64 * 1024)), %l7
1017#endif
10189:
1019vac_hwflush_patch1:
1020vac_linesize_patch:
1021 subcc %l7, 16, %l7
1022 bne 9b
1023vac_hwflush_patch2:
1024 sta %g0, [%l3 + %l7] ASI_FLUSHSEG
1025
1026 st %l5, [%l6 + 0x08] ! entry->vaddr = address
1027
1028 ld [%l6 + 0x00], %l5 ! next = entry->next
1029 ld [%l6 + 0x04], %l7 ! entry->prev
1030
1031 st %l7, [%l5 + 0x04] ! next->prev = entry->prev
1032 st %l5, [%l7 + 0x00] ! entry->prev->next = next
1033 st %l4, [%l6 + 0x04] ! entry->prev = head
1034
1035 ld [%l4 + 0x00], %l7 ! head->next
1036
1037 st %l7, [%l6 + 0x00] ! entry->next = head->next
1038 st %l6, [%l7 + 0x04] ! head->next->prev = entry
1039 st %l6, [%l4 + 0x00] ! head->next = entry
1040
1041 mov %l3, %l5 ! address = tmp
1042
10434:
1044num_context_patch1:
1045 mov 0x08, %l7
1046
1047 ld [%l6 + 0x08], %l4
1048 ldub [%l6 + 0x0c], %l3
1049 or %l4, %l3, %l4 ! encode new vaddr/pseg into l4
1050
1051 sethi %hi(AC_CONTEXT), %l3
1052 lduba [%l3] ASI_CONTROL, %l6
1053
1054 /* Invalidate old mapping, instantiate new mapping,
1055 * for each context. Registers l6/l7 are live across
1056 * this loop.
1057 */
10583: deccc %l7
1059 sethi %hi(AC_CONTEXT), %l3
1060 stba %l7, [%l3] ASI_CONTROL
1061invalid_segment_patch2:
1062 mov 0x7f, %l3
1063 stXa %l3, [%l5] ASI_SEGMAP
1064 andn %l4, 0x1ff, %l3
1065 bne 3b
1066 stXa %l4, [%l3] ASI_SEGMAP
1067
1068 sethi %hi(AC_CONTEXT), %l3
1069 stba %l6, [%l3] ASI_CONTROL
1070
1071 andn %l4, 0x1ff, %l5
1072
10731:
1074 sethi %hi(VMALLOC_START), %l4
1075 cmp %l5, %l4
1076
1077 bgeu 1f
1078 mov 1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
1079
1080 sethi %hi(KERNBASE), %l6
1081
1082 sub %l5, %l6, %l4
1083 srl %l4, PAGE_SHIFT, %l4
1084 sethi %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
1085 or %l3, %l4, %l3
1086
1087 sethi %hi(PAGE_SIZE), %l4
1088
10892:
1090 sta %l3, [%l5] ASI_PTE
1091 deccc %l7
1092 inc %l3
1093 bne 2b
1094 add %l5, %l4, %l5
1095
1096 b 7f
1097 sethi %hi(sun4c_kernel_faults), %l4
1098
10991:
1100 srl %l5, SUN4C_PGDIR_SHIFT, %l3
1101 sethi %hi(swapper_pg_dir), %l4
1102 or %l4, %lo(swapper_pg_dir), %l4
1103 sll %l3, 2, %l3
1104 ld [%l4 + %l3], %l4
1105#ifndef CONFIG_SUN4
1106 and %l4, PAGE_MASK, %l4
1107#else
1108 sethi %hi(PAGE_MASK), %l6
1109 and %l4, %l6, %l4
1110#endif
1111
1112 srl %l5, (PAGE_SHIFT - 2), %l6
1113 and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
1114 add %l6, %l4, %l6
1115
1116 sethi %hi(PAGE_SIZE), %l4
1117
11182:
1119 ld [%l6], %l3
1120 deccc %l7
1121 sta %l3, [%l5] ASI_PTE
1122 add %l6, 0x4, %l6
1123 bne 2b
1124 add %l5, %l4, %l5
1125
1126 sethi %hi(sun4c_kernel_faults), %l4
11277:
1128 ld [%l4 + %lo(sun4c_kernel_faults)], %l3
1129 inc %l3
1130 st %l3, [%l4 + %lo(sun4c_kernel_faults)]
1131
1132 /* Restore condition codes */
1133 wr %l0, 0x0, %psr
1134 WRITE_PAUSE
1135 jmp %l1
1136 rett %l2
1137
1138sun4c_fault_fromuser:
1139 SAVE_ALL
1140 nop
1141
1142 mov %l7, %o1 ! Decode the info from %l7
1143 mov %l7, %o2
1144 and %o1, 1, %o1 ! arg2 = text_faultp
1145 mov %l7, %o3
1146 and %o2, 2, %o2 ! arg3 = writep
1147 andn %o3, 0xfff, %o3 ! arg4 = faulting address
1148
1149 wr %l0, PSR_ET, %psr
1150 WRITE_PAUSE
1151
1152 call do_sun4c_fault
1153 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1154
1155 RESTORE_ALL
1156
1157 .align 4
1158 .globl srmmu_fault
1159srmmu_fault:
1160 mov 0x400, %l5
1161 mov 0x300, %l4
1162
1163 lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first
1164 lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last
1165
1166 andn %l6, 0xfff, %l6
1167 srl %l5, 6, %l5 ! and encode all info into l7
1168
1169 and %l5, 2, %l5
1170 or %l5, %l6, %l6
1171
1172 or %l6, %l7, %l7 ! l7 = [addr,write,txtfault]
1173
1174 SAVE_ALL
1175
1176 mov %l7, %o1
1177 mov %l7, %o2
1178 and %o1, 1, %o1 ! arg2 = text_faultp
1179 mov %l7, %o3
1180 and %o2, 2, %o2 ! arg3 = writep
1181 andn %o3, 0xfff, %o3 ! arg4 = faulting address
1182
1183 wr %l0, PSR_ET, %psr
1184 WRITE_PAUSE
1185
1186 call do_sparc_fault
1187 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1188
1189 RESTORE_ALL
1190
1191#ifdef CONFIG_SUNOS_EMUL
1192 /* SunOS uses syscall zero as the 'indirect syscall' it looks
1193 * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
1194 * This is complete brain damage.
1195 */
1196 .globl sunos_indir
1197sunos_indir:
1198 mov %o7, %l4
1199 cmp %o0, NR_SYSCALLS
1200 blu,a 1f
1201 sll %o0, 0x2, %o0
1202
1203 sethi %hi(sunos_nosys), %l6
1204 b 2f
1205 or %l6, %lo(sunos_nosys), %l6
1206
12071:
1208 set sunos_sys_table, %l7
1209 ld [%l7 + %o0], %l6
1210
12112:
1212 mov %o1, %o0
1213 mov %o2, %o1
1214 mov %o3, %o2
1215 mov %o4, %o3
1216 mov %o5, %o4
1217 call %l6
1218 mov %l4, %o7
1219#endif
1220
1221 .align 4
1222 .globl sys_nis_syscall
1223sys_nis_syscall:
1224 mov %o7, %l5
1225 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
1226 call c_sys_nis_syscall
1227 mov %l5, %o7
1228
1229 .align 4
1230 .globl sys_ptrace
1231sys_ptrace:
1232 call do_ptrace
1233 add %sp, STACKFRAME_SZ, %o0
1234
1235 ld [%curptr + TI_FLAGS], %l5
1236 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1237 be 1f
1238 nop
1239
1240 call syscall_trace
1241 nop
1242
12431:
1244 RESTORE_ALL
1245
1246 .align 4
1247 .globl sys_execve
1248sys_execve:
1249 mov %o7, %l5
1250 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
1251 call sparc_execve
1252 mov %l5, %o7
1253
1254 .align 4
1255 .globl sys_pipe
1256sys_pipe:
1257 mov %o7, %l5
1258 add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg
1259 call sparc_pipe
1260 mov %l5, %o7
1261
1262 .align 4
1263 .globl sys_sigaltstack
1264sys_sigaltstack:
1265 mov %o7, %l5
1266 mov %fp, %o2
1267 call do_sigaltstack
1268 mov %l5, %o7
1269
1270 .align 4
1271 .globl sys_sigstack
1272sys_sigstack:
1273 mov %o7, %l5
1274 mov %fp, %o2
1275 call do_sys_sigstack
1276 mov %l5, %o7
1277
1278 .align 4
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 .globl sys_sigreturn
1280sys_sigreturn:
1281 call do_sigreturn
1282 add %sp, STACKFRAME_SZ, %o0
1283
1284 ld [%curptr + TI_FLAGS], %l5
1285 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1286 be 1f
1287 nop
1288
1289 call syscall_trace
1290 nop
1291
12921:
1293 /* We don't want to muck with user registers like a
1294 * normal syscall, just return.
1295 */
1296 RESTORE_ALL
1297
1298 .align 4
1299 .globl sys_rt_sigreturn
1300sys_rt_sigreturn:
1301 call do_rt_sigreturn
1302 add %sp, STACKFRAME_SZ, %o0
1303
1304 ld [%curptr + TI_FLAGS], %l5
1305 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1306 be 1f
1307 nop
1308
1309 call syscall_trace
1310 nop
1311
13121:
1313 /* We are returning to a signal handler. */
1314 RESTORE_ALL
1315
1316 /* Now that we have a real sys_clone, sys_fork() is
1317 * implemented in terms of it. Our _real_ implementation
1318 * of SunOS vfork() will use sys_vfork().
1319 *
1320 * XXX These three should be consolidated into mostly shared
1321 * XXX code just like on sparc64... -DaveM
1322 */
1323 .align 4
1324 .globl sys_fork, flush_patch_two
1325sys_fork:
1326 mov %o7, %l5
1327flush_patch_two:
1328 FLUSH_ALL_KERNEL_WINDOWS;
1329 ld [%curptr + TI_TASK], %o4
1330 rd %psr, %g4
1331 WRITE_PAUSE
1332 mov SIGCHLD, %o0 ! arg0: clone flags
1333 rd %wim, %g5
1334 WRITE_PAUSE
1335 mov %fp, %o1 ! arg1: usp
1336 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1337 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
1338 mov 0, %o3
1339 call sparc_do_fork
1340 mov %l5, %o7
1341
1342 /* Whee, kernel threads! */
1343 .globl sys_clone, flush_patch_three
1344sys_clone:
1345 mov %o7, %l5
1346flush_patch_three:
1347 FLUSH_ALL_KERNEL_WINDOWS;
1348 ld [%curptr + TI_TASK], %o4
1349 rd %psr, %g4
1350 WRITE_PAUSE
1351
1352 /* arg0,1: flags,usp -- loaded already */
1353 cmp %o1, 0x0 ! Is new_usp NULL?
1354 rd %wim, %g5
1355 WRITE_PAUSE
1356 be,a 1f
1357 mov %fp, %o1 ! yes, use callers usp
1358 andn %o1, 7, %o1 ! no, align to 8 bytes
13591:
1360 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1361 add %sp, STACKFRAME_SZ, %o2 ! arg2: pt_regs ptr
1362 mov 0, %o3
1363 call sparc_do_fork
1364 mov %l5, %o7
1365
1366 /* Whee, real vfork! */
1367 .globl sys_vfork, flush_patch_four
1368sys_vfork:
1369flush_patch_four:
1370 FLUSH_ALL_KERNEL_WINDOWS;
1371 ld [%curptr + TI_TASK], %o4
1372 rd %psr, %g4
1373 WRITE_PAUSE
1374 rd %wim, %g5
1375 WRITE_PAUSE
1376 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1377 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
1378 mov %fp, %o1
1379 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1380 sethi %hi(sparc_do_fork), %l1
1381 mov 0, %o3
1382 jmpl %l1 + %lo(sparc_do_fork), %g0
1383 add %sp, STACKFRAME_SZ, %o2
1384
1385 .align 4
1386linux_sparc_ni_syscall:
1387 sethi %hi(sys_ni_syscall), %l7
1388 b syscall_is_too_hard
1389 or %l7, %lo(sys_ni_syscall), %l7
1390
1391linux_fast_syscall:
1392 andn %l7, 3, %l7
1393 mov %i0, %o0
1394 mov %i1, %o1
1395 mov %i2, %o2
1396 jmpl %l7 + %g0, %g0
1397 mov %i3, %o3
1398
1399linux_syscall_trace:
1400 call syscall_trace
1401 nop
1402 mov %i0, %o0
1403 mov %i1, %o1
1404 mov %i2, %o2
1405 mov %i3, %o3
1406 b 2f
1407 mov %i4, %o4
1408
1409 .globl ret_from_fork
1410ret_from_fork:
1411 call schedule_tail
1412 mov %g3, %o0
1413 b ret_sys_call
1414 ld [%sp + STACKFRAME_SZ + PT_I0], %o0
1415
1416 /* Linux native and SunOS system calls enter here... */
1417 .align 4
1418 .globl linux_sparc_syscall
1419linux_sparc_syscall:
1420 /* Direct access to user regs, must faster. */
1421 cmp %g1, NR_SYSCALLS
1422 bgeu linux_sparc_ni_syscall
1423 sll %g1, 2, %l4
1424 ld [%l7 + %l4], %l7
1425 andcc %l7, 1, %g0
1426 bne linux_fast_syscall
1427 /* Just do first insn from SAVE_ALL in the delay slot */
1428
1429 .globl syscall_is_too_hard
1430syscall_is_too_hard:
1431 SAVE_ALL_HEAD
1432 rd %wim, %l3
1433
1434 wr %l0, PSR_ET, %psr
1435 mov %i0, %o0
1436 mov %i1, %o1
1437 mov %i2, %o2
1438
1439 ld [%curptr + TI_FLAGS], %l5
1440 mov %i3, %o3
1441 andcc %l5, _TIF_SYSCALL_TRACE, %g0
1442 mov %i4, %o4
1443 bne linux_syscall_trace
1444 mov %i0, %l5
14452:
1446 call %l7
1447 mov %i5, %o5
1448
1449 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1450
1451 .globl ret_sys_call
1452ret_sys_call:
1453 ld [%curptr + TI_FLAGS], %l6
1454 cmp %o0, -ERESTART_RESTARTBLOCK
1455 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
1456 set PSR_C, %g2
1457 bgeu 1f
1458 andcc %l6, _TIF_SYSCALL_TRACE, %g0
1459
1460 /* System call success, clear Carry condition code. */
1461 andn %g3, %g2, %g3
1462 clr %l6
1463 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1464 bne linux_syscall_trace2
1465 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1466 add %l1, 0x4, %l2 /* npc = npc+4 */
1467 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1468 b ret_trap_entry
1469 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
14701:
1471 /* System call failure, set Carry condition code.
1472 * Also, get abs(errno) to return to the process.
1473 */
1474 sub %g0, %o0, %o0
1475 or %g3, %g2, %g3
1476 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1477 mov 1, %l6
1478 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1479 bne linux_syscall_trace2
1480 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1481 add %l1, 0x4, %l2 /* npc = npc+4 */
1482 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1483 b ret_trap_entry
1484 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1485
1486linux_syscall_trace2:
1487 call syscall_trace
1488 add %l1, 0x4, %l2 /* npc = npc+4 */
1489 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1490 b ret_trap_entry
1491 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1492
1493
1494 /*
1495 * Solaris system calls and indirect system calls enter here.
1496 *
1497 * I have named the solaris indirect syscalls like that because
1498 * it seems like Solaris has some fast path syscalls that can
1499 * be handled as indirect system calls. - mig
1500 */
1501
1502linux_syscall_for_solaris:
1503 sethi %hi(sys_call_table), %l7
1504 b linux_sparc_syscall
1505 or %l7, %lo(sys_call_table), %l7
1506
1507 .align 4
1508 .globl solaris_syscall
1509solaris_syscall:
1510 cmp %g1,59
1511 be linux_syscall_for_solaris
1512 cmp %g1,2
1513 be linux_syscall_for_solaris
1514 cmp %g1,42
1515 be linux_syscall_for_solaris
1516 cmp %g1,119
1517 be,a linux_syscall_for_solaris
1518 mov 2, %g1
15191:
1520 SAVE_ALL_HEAD
1521 rd %wim, %l3
1522
1523 wr %l0, PSR_ET, %psr
1524 nop
1525 nop
1526 mov %i0, %l5
1527
1528 call do_solaris_syscall
1529 add %sp, STACKFRAME_SZ, %o0
1530
1531 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1532 set PSR_C, %g2
1533 cmp %o0, -ERESTART_RESTARTBLOCK
1534 bgeu 1f
1535 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
1536
1537 /* System call success, clear Carry condition code. */
1538 andn %g3, %g2, %g3
1539 clr %l6
1540 b 2f
1541 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1542
15431:
1544 /* System call failure, set Carry condition code.
1545 * Also, get abs(errno) to return to the process.
1546 */
1547 sub %g0, %o0, %o0
1548 mov 1, %l6
1549 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1550 or %g3, %g2, %g3
1551 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1552
1553 /* Advance the pc and npc over the trap instruction.
1554 * If the npc is unaligned (has a 1 in the lower byte), it means
1555 * the kernel does not want us to play magic (ie, skipping over
1556 * traps). Mainly when the Solaris code wants to set some PC and
1557 * nPC (setcontext).
1558 */
15592:
1560 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1561 andcc %l1, 1, %g0
1562 bne 1f
1563 add %l1, 0x4, %l2 /* npc = npc+4 */
1564 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1565 b ret_trap_entry
1566 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1567
1568 /* kernel knows what it is doing, fixup npc and continue */
15691:
1570 sub %l1, 1, %l1
1571 b ret_trap_entry
1572 st %l1, [%sp + STACKFRAME_SZ + PT_NPC]
1573
1574#ifndef CONFIG_SUNOS_EMUL
1575 .align 4
1576 .globl sunos_syscall
1577sunos_syscall:
1578 SAVE_ALL_HEAD
1579 rd %wim, %l3
1580 wr %l0, PSR_ET, %psr
1581 nop
1582 nop
1583 mov %i0, %l5
1584 call do_sunos_syscall
1585 add %sp, STACKFRAME_SZ, %o0
1586#endif
1587
1588 /* {net, open}bsd system calls enter here... */
1589 .align 4
1590 .globl bsd_syscall
1591bsd_syscall:
1592 /* Direct access to user regs, must faster. */
1593 cmp %g1, NR_SYSCALLS
1594 blu,a 1f
1595 sll %g1, 2, %l4
1596
1597 set sys_ni_syscall, %l7
1598 b bsd_is_too_hard
1599 nop
1600
16011:
1602 ld [%l7 + %l4], %l7
1603
1604 .globl bsd_is_too_hard
1605bsd_is_too_hard:
1606 rd %wim, %l3
1607 SAVE_ALL
1608
1609 wr %l0, PSR_ET, %psr
1610 WRITE_PAUSE
1611
16122:
1613 mov %i0, %o0
1614 mov %i1, %o1
1615 mov %i2, %o2
1616 mov %i0, %l5
1617 mov %i3, %o3
1618 mov %i4, %o4
1619 call %l7
1620 mov %i5, %o5
1621
1622 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1623 set PSR_C, %g2
1624 cmp %o0, -ERESTART_RESTARTBLOCK
1625 bgeu 1f
1626 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3
1627
1628 /* System call success, clear Carry condition code. */
1629 andn %g3, %g2, %g3
1630 clr %l6
1631 b 2f
1632 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1633
16341:
1635 /* System call failure, set Carry condition code.
1636 * Also, get abs(errno) to return to the process.
1637 */
1638 sub %g0, %o0, %o0
1639#if 0 /* XXX todo XXX */
1640 sethi %hi(bsd_xlatb_rorl), %o3
1641 or %o3, %lo(bsd_xlatb_rorl), %o3
1642 sll %o0, 2, %o0
1643 ld [%o3 + %o0], %o0
1644#endif
1645 mov 1, %l6
1646 st %o0, [%sp + STACKFRAME_SZ + PT_I0]
1647 or %g3, %g2, %g3
1648 st %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1649
1650 /* Advance the pc and npc over the trap instruction. */
16512:
1652 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1653 add %l1, 0x4, %l2 /* npc = npc+4 */
1654 st %l1, [%sp + STACKFRAME_SZ + PT_PC]
1655 b ret_trap_entry
1656 st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1657
1658/* Saving and restoring the FPU state is best done from lowlevel code.
1659 *
1660 * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1661 * void *fpqueue, unsigned long *fpqdepth)
1662 */
1663
1664 .globl fpsave
1665fpsave:
1666 st %fsr, [%o1] ! this can trap on us if fpu is in bogon state
1667 ld [%o1], %g1
1668 set 0x2000, %g4
1669 andcc %g1, %g4, %g0
1670 be 2f
1671 mov 0, %g2
1672
1673 /* We have an fpqueue to save. */
16741:
1675 std %fq, [%o2]
1676fpsave_magic:
1677 st %fsr, [%o1]
1678 ld [%o1], %g3
1679 andcc %g3, %g4, %g0
1680 add %g2, 1, %g2
1681 bne 1b
1682 add %o2, 8, %o2
1683
16842:
1685 st %g2, [%o3]
1686
1687 std %f0, [%o0 + 0x00]
1688 std %f2, [%o0 + 0x08]
1689 std %f4, [%o0 + 0x10]
1690 std %f6, [%o0 + 0x18]
1691 std %f8, [%o0 + 0x20]
1692 std %f10, [%o0 + 0x28]
1693 std %f12, [%o0 + 0x30]
1694 std %f14, [%o0 + 0x38]
1695 std %f16, [%o0 + 0x40]
1696 std %f18, [%o0 + 0x48]
1697 std %f20, [%o0 + 0x50]
1698 std %f22, [%o0 + 0x58]
1699 std %f24, [%o0 + 0x60]
1700 std %f26, [%o0 + 0x68]
1701 std %f28, [%o0 + 0x70]
1702 retl
1703 std %f30, [%o0 + 0x78]
1704
1705 /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1706 * code for pointing out this possible deadlock, while we save state
1707 * above we could trap on the fsr store so our low level fpu trap
1708 * code has to know how to deal with this.
1709 */
1710fpsave_catch:
1711 b fpsave_magic + 4
1712 st %fsr, [%o1]
1713
1714fpsave_catch2:
1715 b fpsave + 4
1716 st %fsr, [%o1]
1717
1718 /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1719
1720 .globl fpload
1721fpload:
1722 ldd [%o0 + 0x00], %f0
1723 ldd [%o0 + 0x08], %f2
1724 ldd [%o0 + 0x10], %f4
1725 ldd [%o0 + 0x18], %f6
1726 ldd [%o0 + 0x20], %f8
1727 ldd [%o0 + 0x28], %f10
1728 ldd [%o0 + 0x30], %f12
1729 ldd [%o0 + 0x38], %f14
1730 ldd [%o0 + 0x40], %f16
1731 ldd [%o0 + 0x48], %f18
1732 ldd [%o0 + 0x50], %f20
1733 ldd [%o0 + 0x58], %f22
1734 ldd [%o0 + 0x60], %f24
1735 ldd [%o0 + 0x68], %f26
1736 ldd [%o0 + 0x70], %f28
1737 ldd [%o0 + 0x78], %f30
1738 ld [%o1], %fsr
1739 retl
1740 nop
1741
1742 /* __ndelay and __udelay take two arguments:
1743 * 0 - nsecs or usecs to delay
1744 * 1 - per_cpu udelay_val (loops per jiffy)
1745 *
1746 * Note that ndelay gives HZ times higher resolution but has a 10ms
1747 * limit. udelay can handle up to 1s.
1748 */
1749 .globl __ndelay
1750__ndelay:
1751 save %sp, -STACKFRAME_SZ, %sp
1752 mov %i0, %o0
1753 call .umul
1754 mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ)
1755 call .umul
1756 mov %i1, %o1 ! udelay_val
1757 ba delay_continue
1758 mov %o1, %o0 ! >>32 later for better resolution
1759
1760 .globl __udelay
1761__udelay:
1762 save %sp, -STACKFRAME_SZ, %sp
1763 mov %i0, %o0
1764 sethi %hi(0x10c6), %o1
1765 call .umul
1766 or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000
1767 call .umul
1768 mov %i1, %o1 ! udelay_val
1769 call .umul
1770 mov HZ, %o0 ! >>32 earlier for wider range
1771
1772delay_continue:
1773 cmp %o0, 0x0
17741:
1775 bne 1b
1776 subcc %o0, 1, %o0
1777
1778 ret
1779 restore
1780
1781 /* Handle a software breakpoint */
1782 /* We have to inform parent that child has stopped */
1783 .align 4
1784 .globl breakpoint_trap
1785breakpoint_trap:
1786 rd %wim,%l3
1787 SAVE_ALL
1788 wr %l0, PSR_ET, %psr
1789 WRITE_PAUSE
1790
1791 st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1792 call sparc_breakpoint
1793 add %sp, STACKFRAME_SZ, %o0
1794
1795 RESTORE_ALL
1796
1797 .align 4
1798 .globl __handle_exception, flush_patch_exception
1799__handle_exception:
1800flush_patch_exception:
1801 FLUSH_ALL_KERNEL_WINDOWS;
1802 ldd [%o0], %o6
1803 jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h
1804 mov 1, %g1 ! signal EFAULT condition
1805
1806 .align 4
1807 .globl kill_user_windows, kuw_patch1_7win
1808 .globl kuw_patch1
1809kuw_patch1_7win: sll %o3, 6, %o3
1810
1811 /* No matter how much overhead this routine has in the worst
1812 * case scenerio, it is several times better than taking the
1813 * traps with the old method of just doing flush_user_windows().
1814 */
1815kill_user_windows:
1816 ld [%g6 + TI_UWINMASK], %o0 ! get current umask
1817 orcc %g0, %o0, %g0 ! if no bits set, we are done
1818 be 3f ! nothing to do
1819 rd %psr, %o5 ! must clear interrupts
1820 or %o5, PSR_PIL, %o4 ! or else that could change
1821 wr %o4, 0x0, %psr ! the uwinmask state
1822 WRITE_PAUSE ! burn them cycles
18231:
1824 ld [%g6 + TI_UWINMASK], %o0 ! get consistent state
1825 orcc %g0, %o0, %g0 ! did an interrupt come in?
1826 be 4f ! yep, we are done
1827 rd %wim, %o3 ! get current wim
1828 srl %o3, 1, %o4 ! simulate a save
1829kuw_patch1:
1830 sll %o3, 7, %o3 ! compute next wim
1831 or %o4, %o3, %o3 ! result
1832 andncc %o0, %o3, %o0 ! clean this bit in umask
1833 bne kuw_patch1 ! not done yet
1834 srl %o3, 1, %o4 ! begin another save simulation
1835 wr %o3, 0x0, %wim ! set the new wim
1836 st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask
18374:
1838 wr %o5, 0x0, %psr ! re-enable interrupts
1839 WRITE_PAUSE ! burn baby burn
18403:
1841 retl ! return
1842 st %g0, [%g6 + TI_W_SAVED] ! no windows saved
1843
1844 .align 4
1845 .globl restore_current
1846restore_current:
1847 LOAD_CURRENT(g6, o0)
1848 retl
1849 nop
1850
1851#ifdef CONFIG_PCI
1852#include <asm/pcic.h>
1853
1854 .align 4
1855 .globl linux_trap_ipi15_pcic
1856linux_trap_ipi15_pcic:
1857 rd %wim, %l3
1858 SAVE_ALL
1859
1860 /*
1861 * First deactivate NMI
1862 * or we cannot drop ET, cannot get window spill traps.
1863 * The busy loop is necessary because the PIO error
1864 * sometimes does not go away quickly and we trap again.
1865 */
1866 sethi %hi(pcic_regs), %o1
1867 ld [%o1 + %lo(pcic_regs)], %o2
1868
1869 ! Get pending status for printouts later.
1870 ld [%o2 + PCI_SYS_INT_PENDING], %o0
1871
1872 mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1873 stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
18741:
1875 ld [%o2 + PCI_SYS_INT_PENDING], %o1
1876 andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1877 bne 1b
1878 nop
1879
1880 or %l0, PSR_PIL, %l4
1881 wr %l4, 0x0, %psr
1882 WRITE_PAUSE
1883 wr %l4, PSR_ET, %psr
1884 WRITE_PAUSE
1885
1886 call pcic_nmi
1887 add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
1888 RESTORE_ALL
1889
1890 .globl pcic_nmi_trap_patch
1891pcic_nmi_trap_patch:
1892 sethi %hi(linux_trap_ipi15_pcic), %l3
1893 jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0
1894 rd %psr, %l0
1895 .word 0
1896
1897#endif /* CONFIG_PCI */
1898
1899/* End of entry.S */