blob: b061d98a3218f2ea7daf979c67dd26cee14dc055 [file] [log] [blame]
Michal Simekca545022009-05-26 16:30:21 +02001/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
Michal Simek11d51362009-12-07 08:21:34 +010034#undef DEBUG
35
Michal Simekca545022009-05-26 16:30:21 +020036/* The size of a state save frame. */
37#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name) .globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50 .macro clear_bip
51 msrclr r11, MSR_BIP
52 nop
53 .endm
54
55 .macro set_bip
56 msrset r11, MSR_BIP
57 nop
58 .endm
59
60 .macro clear_eip
61 msrclr r11, MSR_EIP
62 nop
63 .endm
64
65 .macro set_ee
66 msrset r11, MSR_EE
67 nop
68 .endm
69
70 .macro disable_irq
71 msrclr r11, MSR_IE
72 nop
73 .endm
74
75 .macro enable_irq
76 msrset r11, MSR_IE
77 nop
78 .endm
79
80 .macro set_ums
81 msrset r11, MSR_UMS
82 nop
83 msrclr r11, MSR_VMS
84 nop
85 .endm
86
87 .macro set_vms
88 msrclr r11, MSR_UMS
89 nop
90 msrset r11, MSR_VMS
91 nop
92 .endm
93
94 .macro clear_vms_ums
95 msrclr r11, MSR_VMS
96 nop
97 msrclr r11, MSR_UMS
98 nop
99 .endm
100#else
101 .macro clear_bip
102 mfs r11, rmsr
103 nop
104 andi r11, r11, ~MSR_BIP
105 mts rmsr, r11
106 nop
107 .endm
108
109 .macro set_bip
110 mfs r11, rmsr
111 nop
112 ori r11, r11, MSR_BIP
113 mts rmsr, r11
114 nop
115 .endm
116
117 .macro clear_eip
118 mfs r11, rmsr
119 nop
120 andi r11, r11, ~MSR_EIP
121 mts rmsr, r11
122 nop
123 .endm
124
125 .macro set_ee
126 mfs r11, rmsr
127 nop
128 ori r11, r11, MSR_EE
129 mts rmsr, r11
130 nop
131 .endm
132
133 .macro disable_irq
134 mfs r11, rmsr
135 nop
136 andi r11, r11, ~MSR_IE
137 mts rmsr, r11
138 nop
139 .endm
140
141 .macro enable_irq
142 mfs r11, rmsr
143 nop
144 ori r11, r11, MSR_IE
145 mts rmsr, r11
146 nop
147 .endm
148
149 .macro set_ums
150 mfs r11, rmsr
151 nop
152 ori r11, r11, MSR_VMS
153 andni r11, r11, MSR_UMS
154 mts rmsr, r11
155 nop
156 .endm
157
158 .macro set_vms
159 mfs r11, rmsr
160 nop
161 ori r11, r11, MSR_VMS
162 andni r11, r11, MSR_UMS
163 mts rmsr, r11
164 nop
165 .endm
166
167 .macro clear_vms_ums
168 mfs r11, rmsr
169 nop
170 andni r11, r11, (MSR_VMS|MSR_UMS)
171 mts rmsr,r11
172 nop
173 .endm
174#endif
175
176/* Define how to call high-level functions. With MMU, virtual mode must be
177 * enabled when calling the high-level function. Clobbers R11.
178 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 */
180
181/* turn on virtual protected mode save */
182#define VM_ON \
183 set_ums; \
184 rted r0, 2f; \
1852: nop;
186
187/* turn off virtual protected mode save and user mode save*/
188#define VM_OFF \
189 clear_vms_ums; \
190 rted r0, TOPHYS(1f); \
1911: nop;
192
193#define SAVE_REGS \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
195 swi r5, r1, PTO+PT_R5; \
196 swi r6, r1, PTO+PT_R6; \
197 swi r7, r1, PTO+PT_R7; \
198 swi r8, r1, PTO+PT_R8; \
199 swi r9, r1, PTO+PT_R9; \
200 swi r10, r1, PTO+PT_R10; \
201 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
202 swi r12, r1, PTO+PT_R12; \
203 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
204 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
205 swi r15, r1, PTO+PT_R15; /* Save LP */ \
206 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
207 swi r19, r1, PTO+PT_R19; \
208 swi r20, r1, PTO+PT_R20; \
209 swi r21, r1, PTO+PT_R21; \
210 swi r22, r1, PTO+PT_R22; \
211 swi r23, r1, PTO+PT_R23; \
212 swi r24, r1, PTO+PT_R24; \
213 swi r25, r1, PTO+PT_R25; \
214 swi r26, r1, PTO+PT_R26; \
215 swi r27, r1, PTO+PT_R27; \
216 swi r28, r1, PTO+PT_R28; \
217 swi r29, r1, PTO+PT_R29; \
218 swi r30, r1, PTO+PT_R30; \
219 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
220 mfs r11, rmsr; /* save MSR */ \
221 nop; \
222 swi r11, r1, PTO+PT_MSR;
223
224#define RESTORE_REGS \
225 lwi r11, r1, PTO+PT_MSR; \
226 mts rmsr , r11; \
227 nop; \
228 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
229 lwi r5, r1, PTO+PT_R5; \
230 lwi r6, r1, PTO+PT_R6; \
231 lwi r7, r1, PTO+PT_R7; \
232 lwi r8, r1, PTO+PT_R8; \
233 lwi r9, r1, PTO+PT_R9; \
234 lwi r10, r1, PTO+PT_R10; \
235 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
236 lwi r12, r1, PTO+PT_R12; \
237 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
238 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
239 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
240 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
241 lwi r19, r1, PTO+PT_R19; \
242 lwi r20, r1, PTO+PT_R20; \
243 lwi r21, r1, PTO+PT_R21; \
244 lwi r22, r1, PTO+PT_R22; \
245 lwi r23, r1, PTO+PT_R23; \
246 lwi r24, r1, PTO+PT_R24; \
247 lwi r25, r1, PTO+PT_R25; \
248 lwi r26, r1, PTO+PT_R26; \
249 lwi r27, r1, PTO+PT_R27; \
250 lwi r28, r1, PTO+PT_R28; \
251 lwi r29, r1, PTO+PT_R29; \
252 lwi r30, r1, PTO+PT_R30; \
253 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
254
255.text
256
257/*
258 * User trap.
259 *
260 * System calls are handled here.
261 *
262 * Syscall protocol:
263 * Syscall number in r12, args in r5-r10
264 * Return value in r3
265 *
266 * Trap entered via brki instruction, so BIP bit is set, and interrupts
267 * are masked. This is nice, means we don't have to CLI before state save
268 */
269C_ENTRY(_user_exception):
270 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
271 addi r14, r14, 4 /* return address is 4 byte after call */
272 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
273
274 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
275 beqi r11, 1f; /* Jump ahead if coming from user */
276/* Kernel-mode state save. */
277 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
278 tophys(r1,r11);
279 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
280 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
281
282 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
283 SAVE_REGS
284
285 addi r11, r0, 1; /* Was in kernel-mode. */
286 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
287 brid 2f;
288 nop; /* Fill delay slot */
289
290/* User-mode state save. */
2911:
292 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
293 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
294 tophys(r1,r1);
295 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
296/* calculate kernel stack pointer from task struct 8k */
297 addik r1, r1, THREAD_SIZE;
298 tophys(r1,r1);
299
300 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
301 SAVE_REGS
302
303 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
304 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
305 swi r11, r1, PTO+PT_R1; /* Store user SP. */
306 addi r11, r0, 1;
307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
3082: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
309 /* Save away the syscall number. */
310 swi r12, r1, PTO+PT_R0;
311 tovirt(r1,r1)
312
Michal Simekca545022009-05-26 16:30:21 +0200313/* where the trap should return need -8 to adjust for rtsd r15, 8*/
314/* Jump to the appropriate function for the system call number in r12
315 * (r12 is not preserved), or return an error if r12 is not valid. The LP
316 * register should point to the location where
317 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
Michal Simek23575482009-08-24 13:26:04 +0200318
319 # Step into virtual mode.
320 set_vms;
321 addik r11, r0, 3f
322 rtid r11, 0
323 nop
3243:
325 add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */
326 lwi r11, r11, TS_THREAD_INFO /* get thread info */
327 lwi r11, r11, TI_FLAGS /* get flags in thread info */
328 andi r11, r11, _TIF_WORK_SYSCALL_MASK
329 beqi r11, 4f
330
331 addik r3, r0, -ENOSYS
332 swi r3, r1, PTO + PT_R3
333 brlid r15, do_syscall_trace_enter
334 addik r5, r1, PTO + PT_R0
335
336 # do_syscall_trace_enter returns the new syscall nr.
337 addk r12, r0, r3
338 lwi r5, r1, PTO+PT_R5;
339 lwi r6, r1, PTO+PT_R6;
340 lwi r7, r1, PTO+PT_R7;
341 lwi r8, r1, PTO+PT_R8;
342 lwi r9, r1, PTO+PT_R9;
343 lwi r10, r1, PTO+PT_R10;
3444:
345/* Jump to the appropriate function for the system call number in r12
346 * (r12 is not preserved), or return an error if r12 is not valid.
347 * The LP register should point to the location where the called function
348 * should return. [note that MAKE_SYS_CALL uses label 1] */
349 /* See if the system call number is valid */
Michal Simekca545022009-05-26 16:30:21 +0200350 addi r11, r12, -__NR_syscalls;
Michal Simek23575482009-08-24 13:26:04 +0200351 bgei r11,5f;
Michal Simekca545022009-05-26 16:30:21 +0200352 /* Figure out which function to use for this system call. */
353 /* Note Microblaze barrel shift is optional, so don't rely on it */
354 add r12, r12, r12; /* convert num -> ptr */
355 add r12, r12, r12;
356
Michal Simek11d51362009-12-07 08:21:34 +0100357#ifdef DEBUG
Michal Simekca545022009-05-26 16:30:21 +0200358 /* Trac syscalls and stored them to r0_ram */
Michal Simek23575482009-08-24 13:26:04 +0200359 lwi r3, r12, 0x400 + r0_ram
Michal Simekca545022009-05-26 16:30:21 +0200360 addi r3, r3, 1
Michal Simek23575482009-08-24 13:26:04 +0200361 swi r3, r12, 0x400 + r0_ram
Michal Simek11d51362009-12-07 08:21:34 +0100362#endif
Michal Simekca545022009-05-26 16:30:21 +0200363
Michal Simek23575482009-08-24 13:26:04 +0200364 # Find and jump into the syscall handler.
365 lwi r12, r12, sys_call_table
366 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
367 la r15, r0, ret_from_trap-8
368 bra r12
369
Michal Simekca545022009-05-26 16:30:21 +0200370 /* The syscall number is invalid, return an error. */
Michal Simek23575482009-08-24 13:26:04 +02003715:
Michal Simekca545022009-05-26 16:30:21 +0200372 addi r3, r0, -ENOSYS;
373 rtsd r15,8; /* looks like a normal subroutine return */
374 or r0, r0, r0
375
376
Michal Simek23575482009-08-24 13:26:04 +0200377/* Entry point used to return from a syscall/trap */
Michal Simekca545022009-05-26 16:30:21 +0200378/* We re-enable BIP bit before state restore */
379C_ENTRY(ret_from_trap):
380 set_bip; /* Ints masked for state restore*/
381 lwi r11, r1, PTO+PT_MODE;
382/* See if returning to kernel mode, if so, skip resched &c. */
383 bnei r11, 2f;
384
385 /* We're returning to user mode, so check for various conditions that
386 * trigger rescheduling. */
Michal Simek23575482009-08-24 13:26:04 +0200387 # FIXME: Restructure all these flag checks.
388 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
389 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
390 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
391 andi r11, r11, _TIF_WORK_SYSCALL_MASK
392 beqi r11, 1f
393
394 swi r3, r1, PTO + PT_R3
395 swi r4, r1, PTO + PT_R4
396 brlid r15, do_syscall_trace_leave
397 addik r5, r1, PTO + PT_R0
398 lwi r3, r1, PTO + PT_R3
399 lwi r4, r1, PTO + PT_R4
4001:
401
402 /* We're returning to user mode, so check for various conditions that
403 * trigger rescheduling. */
Michal Simekca545022009-05-26 16:30:21 +0200404 /* Get current task ptr into r11 */
405 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
406 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
407 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
408 andi r11, r11, _TIF_NEED_RESCHED;
409 beqi r11, 5f;
410
411 swi r3, r1, PTO + PT_R3; /* store syscall result */
412 swi r4, r1, PTO + PT_R4;
413 bralid r15, schedule; /* Call scheduler */
414 nop; /* delay slot */
415 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
416 lwi r4, r1, PTO + PT_R4;
417
418 /* Maybe handle a signal */
4195: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
420 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
421 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
422 andi r11, r11, _TIF_SIGPENDING;
423 beqi r11, 1f; /* Signals to handle, handle them */
424
425 swi r3, r1, PTO + PT_R3; /* store syscall result */
426 swi r4, r1, PTO + PT_R4;
427 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
428 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
429 addi r7, r0, 1; /* Arg 3: int in_syscall */
430 bralid r15, do_signal; /* Handle any signals */
431 nop;
432 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
433 lwi r4, r1, PTO + PT_R4;
434
435/* Finally, return to user state. */
4361: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
437 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
438 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
439 VM_OFF;
440 tophys(r1,r1);
441 RESTORE_REGS;
442 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
443 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
444 bri 6f;
445
446/* Return to kernel state. */
4472: VM_OFF;
448 tophys(r1,r1);
449 RESTORE_REGS;
450 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
451 tovirt(r1,r1);
4526:
453TRAP_return: /* Make global symbol for debugging */
454 rtbd r14, 0; /* Instructions to return from an IRQ */
455 nop;
456
457
458/* These syscalls need access to the struct pt_regs on the stack, so we
459 implement them in assembly (they're basically all wrappers anyway). */
460
461C_ENTRY(sys_fork_wrapper):
462 addi r5, r0, SIGCHLD /* Arg 0: flags */
463 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
464 la r7, r1, PTO /* Arg 2: parent context */
465 add r8. r0, r0 /* Arg 3: (unused) */
466 add r9, r0, r0; /* Arg 4: (unused) */
467 add r10, r0, r0; /* Arg 5: (unused) */
468 brid do_fork /* Do real work (tail-call) */
469 nop;
470
471/* This the initial entry point for a new child thread, with an appropriate
472 stack in place that makes it look the the child is in the middle of an
473 syscall. This function is actually `returned to' from switch_thread
474 (copy_thread makes ret_from_fork the return address in each new thread's
475 saved context). */
476C_ENTRY(ret_from_fork):
477 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
478 add r3, r5, r0; /* switch_thread returns the prev task */
479 /* ( in the delay slot ) */
480 add r3, r0, r0; /* Child's fork call should return 0. */
481 brid ret_from_trap; /* Do normal trap return */
482 nop;
483
Arnd Bergmanne5135882009-06-18 19:55:30 +0200484C_ENTRY(sys_vfork):
485 brid microblaze_vfork /* Do real work (tail-call) */
Michal Simekca545022009-05-26 16:30:21 +0200486 la r5, r1, PTO
Michal Simekca545022009-05-26 16:30:21 +0200487
Arnd Bergmanne5135882009-06-18 19:55:30 +0200488C_ENTRY(sys_clone):
Michal Simekca545022009-05-26 16:30:21 +0200489 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
490 lwi r6, r1, PTO+PT_R1; /* If so, use paret's stack ptr */
4911: la r7, r1, PTO; /* Arg 2: parent context */
492 add r8, r0, r0; /* Arg 3: (unused) */
493 add r9, r0, r0; /* Arg 4: (unused) */
494 add r10, r0, r0; /* Arg 5: (unused) */
495 brid do_fork /* Do real work (tail-call) */
496 nop;
497
Arnd Bergmanne5135882009-06-18 19:55:30 +0200498C_ENTRY(sys_execve):
Michal Simekca545022009-05-26 16:30:21 +0200499 la r8, r1, PTO; /* add user context as 4th arg */
Arnd Bergmanne5135882009-06-18 19:55:30 +0200500 brid microblaze_execve; /* Do real work (tail-call).*/
Michal Simekca545022009-05-26 16:30:21 +0200501 nop;
502
Michal Simekca545022009-05-26 16:30:21 +0200503C_ENTRY(sys_rt_sigsuspend_wrapper):
504 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
505 swi r4, r1, PTO+PT_R4;
506 la r7, r1, PTO; /* add user context as 3rd arg */
507 brlid r15, sys_rt_sigsuspend; /* Do real work.*/
508 nop;
509 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
510 lwi r4, r1, PTO+PT_R4;
511 bri ret_from_trap /* fall through will not work here due to align */
512 nop;
513
Michal Simekca545022009-05-26 16:30:21 +0200514C_ENTRY(sys_rt_sigreturn_wrapper):
515 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
516 swi r4, r1, PTO+PT_R4;
517 la r5, r1, PTO; /* add user context as 1st arg */
518 brlid r15, sys_rt_sigreturn /* Do real work */
519 nop;
520 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
521 lwi r4, r1, PTO+PT_R4;
522 bri ret_from_trap /* fall through will not work here due to align */
523 nop;
524
525/*
526 * HW EXCEPTION rutine start
527 */
528
529#define SAVE_STATE \
530 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */ \
531 set_bip; /*equalize initial state for all possible entries*/\
532 clear_eip; \
533 enable_irq; \
534 set_ee; \
535 /* See if already in kernel mode.*/ \
536 lwi r11, r0, TOPHYS(PER_CPU(KM)); \
537 beqi r11, 1f; /* Jump ahead if coming from user */\
538 /* Kernel-mode state save. */ \
539 /* Reload kernel stack-ptr. */ \
540 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
541 tophys(r1,r11); \
542 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */ \
543 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
544 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
545 /* store return registers separately because \
546 * this macros is use for others exceptions */ \
547 swi r3, r1, PTO + PT_R3; \
548 swi r4, r1, PTO + PT_R4; \
549 SAVE_REGS \
550 /* PC, before IRQ/trap - this is one instruction above */ \
551 swi r17, r1, PTO+PT_PC; \
552 \
553 addi r11, r0, 1; /* Was in kernel-mode. */ \
554 swi r11, r1, PTO+PT_MODE; \
555 brid 2f; \
556 nop; /* Fill delay slot */ \
5571: /* User-mode state save. */ \
558 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
559 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
560 tophys(r1,r1); \
561 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
562 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
563 tophys(r1,r1); \
564 \
565 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
566 /* store return registers separately because this macros \
567 * is use for others exceptions */ \
568 swi r3, r1, PTO + PT_R3; \
569 swi r4, r1, PTO + PT_R4; \
570 SAVE_REGS \
571 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
572 swi r17, r1, PTO+PT_PC; \
573 \
574 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
575 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
576 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
577 addi r11, r0, 1; \
578 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5792: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
580 /* Save away the syscall number. */ \
581 swi r0, r1, PTO+PT_R0; \
582 tovirt(r1,r1)
583
584C_ENTRY(full_exception_trap):
585 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
586 /* adjust exception address for privileged instruction
587 * for finding where is it */
588 addik r17, r17, -4
589 SAVE_STATE /* Save registers */
590 /* FIXME this can be store directly in PT_ESR reg.
591 * I tested it but there is a fault */
592 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
593 la r15, r0, ret_from_exc - 8
594 la r5, r1, PTO /* parameter struct pt_regs * regs */
595 mfs r6, resr
596 nop
597 mfs r7, rfsr; /* save FSR */
598 nop
Michal Simek131e4e92009-09-28 08:50:53 +0200599 mts rfsr, r0; /* Clear sticky fsr */
600 nop
Michal Simekca545022009-05-26 16:30:21 +0200601 la r12, r0, full_exception
602 set_vms;
603 rtbd r12, 0;
604 nop;
605
606/*
607 * Unaligned data trap.
608 *
609 * Unaligned data trap last on 4k page is handled here.
610 *
611 * Trap entered via exception, so EE bit is set, and interrupts
612 * are masked. This is nice, means we don't have to CLI before state save
613 *
614 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
615 */
616C_ENTRY(unaligned_data_trap):
617 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
618 SAVE_STATE /* Save registers.*/
619 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
620 la r15, r0, ret_from_exc-8
621 mfs r3, resr /* ESR */
622 nop
623 mfs r4, rear /* EAR */
624 nop
625 la r7, r1, PTO /* parameter struct pt_regs * regs */
626 la r12, r0, _unaligned_data_exception
627 set_vms;
628 rtbd r12, 0; /* interrupts enabled */
629 nop;
630
631/*
632 * Page fault traps.
633 *
634 * If the real exception handler (from hw_exception_handler.S) didn't find
635 * the mapping for the process, then we're thrown here to handle such situation.
636 *
637 * Trap entered via exceptions, so EE bit is set, and interrupts
638 * are masked. This is nice, means we don't have to CLI before state save
639 *
640 * Build a standard exception frame for TLB Access errors. All TLB exceptions
641 * will bail out to this point if they can't resolve the lightweight TLB fault.
642 *
643 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
644 * void do_page_fault(struct pt_regs *regs,
645 * unsigned long address,
646 * unsigned long error_code)
647 */
648/* data and intruction trap - which is choose is resolved int fault.c */
649C_ENTRY(page_fault_data_trap):
650 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
651 SAVE_STATE /* Save registers.*/
652 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
653 la r15, r0, ret_from_exc-8
654 la r5, r1, PTO /* parameter struct pt_regs * regs */
655 mfs r6, rear /* parameter unsigned long address */
656 nop
657 mfs r7, resr /* parameter unsigned long error_code */
658 nop
659 la r12, r0, do_page_fault
660 set_vms;
661 rtbd r12, 0; /* interrupts enabled */
662 nop;
663
664C_ENTRY(page_fault_instr_trap):
665 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
666 SAVE_STATE /* Save registers.*/
667 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
668 la r15, r0, ret_from_exc-8
669 la r5, r1, PTO /* parameter struct pt_regs * regs */
670 mfs r6, rear /* parameter unsigned long address */
671 nop
672 ori r7, r0, 0 /* parameter unsigned long error_code */
673 la r12, r0, do_page_fault
674 set_vms;
675 rtbd r12, 0; /* interrupts enabled */
676 nop;
677
678/* Entry point used to return from an exception. */
679C_ENTRY(ret_from_exc):
680 set_bip; /* Ints masked for state restore*/
681 lwi r11, r1, PTO+PT_MODE;
682 bnei r11, 2f; /* See if returning to kernel mode, */
683 /* ... if so, skip resched &c. */
684
685 /* We're returning to user mode, so check for various conditions that
686 trigger rescheduling. */
687 /* Get current task ptr into r11 */
688 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
689 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
690 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
691 andi r11, r11, _TIF_NEED_RESCHED;
692 beqi r11, 5f;
693
694/* Call the scheduler before returning from a syscall/trap. */
695 bralid r15, schedule; /* Call scheduler */
696 nop; /* delay slot */
697
698 /* Maybe handle a signal */
6995: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
700 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
701 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
702 andi r11, r11, _TIF_SIGPENDING;
703 beqi r11, 1f; /* Signals to handle, handle them */
704
705 /*
706 * Handle a signal return; Pending signals should be in r18.
707 *
708 * Not all registers are saved by the normal trap/interrupt entry
709 * points (for instance, call-saved registers (because the normal
710 * C-compiler calling sequence in the kernel makes sure they're
711 * preserved), and call-clobbered registers in the case of
712 * traps), but signal handlers may want to examine or change the
713 * complete register state. Here we save anything not saved by
714 * the normal entry sequence, so that it may be safely restored
715 * (in a possibly modified form) after do_signal returns.
716 * store return registers separately because this macros is use
717 * for others exceptions */
Michal Simekca545022009-05-26 16:30:21 +0200718 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
719 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
720 addi r7, r0, 0; /* Arg 3: int in_syscall */
721 bralid r15, do_signal; /* Handle any signals */
722 nop;
Michal Simekca545022009-05-26 16:30:21 +0200723
724/* Finally, return to user state. */
7251: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
726 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
727 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
728 VM_OFF;
729 tophys(r1,r1);
730
731 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
732 lwi r4, r1, PTO+PT_R4;
733 RESTORE_REGS;
734 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
735
736 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
737 bri 6f;
738/* Return to kernel state. */
7392: VM_OFF;
740 tophys(r1,r1);
741 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
742 lwi r4, r1, PTO+PT_R4;
743 RESTORE_REGS;
744 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
745
746 tovirt(r1,r1);
7476:
748EXC_return: /* Make global symbol for debugging */
749 rtbd r14, 0; /* Instructions to return from an IRQ */
750 nop;
751
752/*
753 * HW EXCEPTION rutine end
754 */
755
756/*
757 * Hardware maskable interrupts.
758 *
759 * The stack-pointer (r1) should have already been saved to the memory
760 * location PER_CPU(ENTRY_SP).
761 */
762C_ENTRY(_interrupt):
763/* MS: we are in physical address */
764/* Save registers, switch to proper stack, convert SP to virtual.*/
765 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
766 swi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
767 /* MS: See if already in kernel mode. */
768 lwi r11, r0, TOPHYS(PER_CPU(KM));
769 beqi r11, 1f; /* MS: Jump ahead if coming from user */
770
771/* Kernel-mode state save. */
772 or r11, r1, r0
773 tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
774/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
775 swi r11, r1, (PT_R1 - PT_SIZE);
776/* MS: restore r11 because of saving in SAVE_REGS */
777 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
778 /* save registers */
779/* MS: Make room on the stack -> activation record */
780 addik r1, r1, -STATE_SAVE_SIZE;
781/* MS: store return registers separately because
782 * this macros is use for others exceptions */
783 swi r3, r1, PTO + PT_R3;
784 swi r4, r1, PTO + PT_R4;
785 SAVE_REGS
786 /* MS: store mode */
787 addi r11, r0, 1; /* MS: Was in kernel-mode. */
788 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
789 brid 2f;
790 nop; /* MS: Fill delay slot */
791
7921:
793/* User-mode state save. */
794/* MS: restore r11 -> FIXME move before SAVE_REG */
795 lwi r11, r0, TOPHYS(PER_CPU(R11_SAVE));
796 /* MS: get the saved current */
797 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
798 tophys(r1,r1);
799 lwi r1, r1, TS_THREAD_INFO;
800 addik r1, r1, THREAD_SIZE;
801 tophys(r1,r1);
802 /* save registers */
803 addik r1, r1, -STATE_SAVE_SIZE;
804 swi r3, r1, PTO+PT_R3;
805 swi r4, r1, PTO+PT_R4;
806 SAVE_REGS
807 /* calculate mode */
808 swi r0, r1, PTO + PT_MODE;
809 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
810 swi r11, r1, PTO+PT_R1;
811 /* setup kernel mode to KM */
812 addi r11, r0, 1;
813 swi r11, r0, TOPHYS(PER_CPU(KM));
814
8152:
816 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
817 swi r0, r1, PTO + PT_R0;
818 tovirt(r1,r1)
819 la r5, r1, PTO;
820 set_vms;
821 la r11, r0, do_IRQ;
822 la r15, r0, irq_call;
823irq_call:rtbd r11, 0;
824 nop;
825
826/* MS: we are in virtual mode */
827ret_from_irq:
828 lwi r11, r1, PTO + PT_MODE;
829 bnei r11, 2f;
830
831 add r11, r0, CURRENT_TASK;
832 lwi r11, r11, TS_THREAD_INFO;
833 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
834 andi r11, r11, _TIF_NEED_RESCHED;
835 beqi r11, 5f
836 bralid r15, schedule;
837 nop; /* delay slot */
838
839 /* Maybe handle a signal */
8405: add r11, r0, CURRENT_TASK;
841 lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
842 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
843 andi r11, r11, _TIF_SIGPENDING;
844 beqid r11, no_intr_resched
845/* Handle a signal return; Pending signals should be in r18. */
846 addi r7, r0, 0; /* Arg 3: int in_syscall */
847 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
848 bralid r15, do_signal; /* Handle any signals */
849 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
850
851/* Finally, return to user state. */
852no_intr_resched:
853 /* Disable interrupts, we are now committed to the state restore */
854 disable_irq
855 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
856 add r11, r0, CURRENT_TASK;
857 swi r11, r0, PER_CPU(CURRENT_SAVE);
858 VM_OFF;
859 tophys(r1,r1);
860 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
861 lwi r4, r1, PTO + PT_R4;
862 RESTORE_REGS
863 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
864 lwi r1, r1, PT_R1 - PT_SIZE;
865 bri 6f;
866/* MS: Return to kernel state. */
8672: VM_OFF /* MS: turn off MMU */
868 tophys(r1,r1)
869 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
870 lwi r4, r1, PTO + PT_R4;
871 RESTORE_REGS
872 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
873 tovirt(r1,r1);
8746:
875IRQ_return: /* MS: Make global symbol for debugging */
876 rtid r14, 0
877 nop
878
879/*
880 * `Debug' trap
881 * We enter dbtrap in "BIP" (breakpoint) mode.
882 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
883 * original dbtrap.
884 * however, wait to save state first
885 */
886C_ENTRY(_debug_exception):
887 /* BIP bit is set on entry, no interrupts can occur */
888 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
889
890 swi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
891 set_bip; /*equalize initial state for all possible entries*/
892 clear_eip;
893 enable_irq;
894 lwi r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
895 beqi r11, 1f; /* Jump ahead if coming from user */
896 /* Kernel-mode state save. */
897 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
898 tophys(r1,r11);
899 swi r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
900 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
901
902 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
903 swi r3, r1, PTO + PT_R3;
904 swi r4, r1, PTO + PT_R4;
905 SAVE_REGS;
906
907 addi r11, r0, 1; /* Was in kernel-mode. */
908 swi r11, r1, PTO + PT_MODE;
909 brid 2f;
910 nop; /* Fill delay slot */
9111: /* User-mode state save. */
912 lwi r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
913 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
914 tophys(r1,r1);
915 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
916 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
917 tophys(r1,r1);
918
919 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
920 swi r3, r1, PTO + PT_R3;
921 swi r4, r1, PTO + PT_R4;
922 SAVE_REGS;
923
924 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
925 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
926 swi r11, r1, PTO+PT_R1; /* Store user SP. */
927 addi r11, r0, 1;
928 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
9292: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
930 /* Save away the syscall number. */
931 swi r0, r1, PTO+PT_R0;
932 tovirt(r1,r1)
933
934 addi r5, r0, SIGTRAP /* send the trap signal */
935 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
936 addk r7, r0, r0 /* 3rd param zero */
937
938 set_vms;
939 la r11, r0, send_sig;
940 la r15, r0, dbtrap_call;
941dbtrap_call: rtbd r11, 0;
942 nop;
943
944 set_bip; /* Ints masked for state restore*/
945 lwi r11, r1, PTO+PT_MODE;
946 bnei r11, 2f;
947
948 /* Get current task ptr into r11 */
949 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
950 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
951 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
952 andi r11, r11, _TIF_NEED_RESCHED;
953 beqi r11, 5f;
954
955/* Call the scheduler before returning from a syscall/trap. */
956
957 bralid r15, schedule; /* Call scheduler */
958 nop; /* delay slot */
959 /* XXX Is PT_DTRACE handling needed here? */
960 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
961
962 /* Maybe handle a signal */
9635: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
964 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
965 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
966 andi r11, r11, _TIF_SIGPENDING;
967 beqi r11, 1f; /* Signals to handle, handle them */
968
969/* Handle a signal return; Pending signals should be in r18. */
970 /* Not all registers are saved by the normal trap/interrupt entry
971 points (for instance, call-saved registers (because the normal
972 C-compiler calling sequence in the kernel makes sure they're
973 preserved), and call-clobbered registers in the case of
974 traps), but signal handlers may want to examine or change the
975 complete register state. Here we save anything not saved by
976 the normal entry sequence, so that it may be safely restored
977 (in a possibly modified form) after do_signal returns. */
978
979 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
980 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
981 addi r7, r0, 0; /* Arg 3: int in_syscall */
982 bralid r15, do_signal; /* Handle any signals */
983 nop;
984
985
986/* Finally, return to user state. */
9871: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
988 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
989 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
990 VM_OFF;
991 tophys(r1,r1);
992
993 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
994 lwi r4, r1, PTO+PT_R4;
995 RESTORE_REGS
996 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
997
998
999 lwi r1, r1, PT_R1 - PT_SIZE;
1000 /* Restore user stack pointer. */
1001 bri 6f;
1002
1003/* Return to kernel state. */
10042: VM_OFF;
1005 tophys(r1,r1);
1006 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
1007 lwi r4, r1, PTO+PT_R4;
1008 RESTORE_REGS
1009 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
1010
1011 tovirt(r1,r1);
10126:
1013DBTRAP_return: /* Make global symbol for debugging */
1014 rtbd r14, 0; /* Instructions to return from an IRQ */
1015 nop;
1016
1017
1018
1019ENTRY(_switch_to)
1020 /* prepare return value */
1021 addk r3, r0, r31
1022
1023 /* save registers in cpu_context */
1024 /* use r11 and r12, volatile registers, as temp register */
1025 /* give start of cpu_context for previous process */
1026 addik r11, r5, TI_CPU_CONTEXT
1027 swi r1, r11, CC_R1
1028 swi r2, r11, CC_R2
1029 /* skip volatile registers.
1030 * they are saved on stack when we jumped to _switch_to() */
1031 /* dedicated registers */
1032 swi r13, r11, CC_R13
1033 swi r14, r11, CC_R14
1034 swi r15, r11, CC_R15
1035 swi r16, r11, CC_R16
1036 swi r17, r11, CC_R17
1037 swi r18, r11, CC_R18
1038 /* save non-volatile registers */
1039 swi r19, r11, CC_R19
1040 swi r20, r11, CC_R20
1041 swi r21, r11, CC_R21
1042 swi r22, r11, CC_R22
1043 swi r23, r11, CC_R23
1044 swi r24, r11, CC_R24
1045 swi r25, r11, CC_R25
1046 swi r26, r11, CC_R26
1047 swi r27, r11, CC_R27
1048 swi r28, r11, CC_R28
1049 swi r29, r11, CC_R29
1050 swi r30, r11, CC_R30
1051 /* special purpose registers */
1052 mfs r12, rmsr
1053 nop
1054 swi r12, r11, CC_MSR
1055 mfs r12, rear
1056 nop
1057 swi r12, r11, CC_EAR
1058 mfs r12, resr
1059 nop
1060 swi r12, r11, CC_ESR
1061 mfs r12, rfsr
1062 nop
1063 swi r12, r11, CC_FSR
1064
1065 /* update r31, the current */
1066 lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
1067 /* stored it to current_save too */
1068 swi r31, r0, PER_CPU(CURRENT_SAVE)
1069
1070 /* get new process' cpu context and restore */
1071 /* give me start where start context of next task */
1072 addik r11, r6, TI_CPU_CONTEXT
1073
1074 /* non-volatile registers */
1075 lwi r30, r11, CC_R30
1076 lwi r29, r11, CC_R29
1077 lwi r28, r11, CC_R28
1078 lwi r27, r11, CC_R27
1079 lwi r26, r11, CC_R26
1080 lwi r25, r11, CC_R25
1081 lwi r24, r11, CC_R24
1082 lwi r23, r11, CC_R23
1083 lwi r22, r11, CC_R22
1084 lwi r21, r11, CC_R21
1085 lwi r20, r11, CC_R20
1086 lwi r19, r11, CC_R19
1087 /* dedicated registers */
1088 lwi r18, r11, CC_R18
1089 lwi r17, r11, CC_R17
1090 lwi r16, r11, CC_R16
1091 lwi r15, r11, CC_R15
1092 lwi r14, r11, CC_R14
1093 lwi r13, r11, CC_R13
1094 /* skip volatile registers */
1095 lwi r2, r11, CC_R2
1096 lwi r1, r11, CC_R1
1097
1098 /* special purpose registers */
1099 lwi r12, r11, CC_FSR
1100 mts rfsr, r12
1101 nop
1102 lwi r12, r11, CC_MSR
1103 mts rmsr, r12
1104 nop
1105
1106 rtsd r15, 8
1107 nop
1108
1109ENTRY(_reset)
1110 brai 0x70; /* Jump back to FS-boot */
1111
1112ENTRY(_break)
1113 mfs r5, rmsr
1114 nop
1115 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1116 mfs r5, resr
1117 nop
1118 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1119 bri 0
1120
1121 /* These are compiled and loaded into high memory, then
1122 * copied into place in mach_early_setup */
1123 .section .init.ivt, "ax"
1124 .org 0x0
1125 /* this is very important - here is the reset vector */
1126 /* in current MMU branch you don't care what is here - it is
1127 * used from bootloader site - but this is correct for FS-BOOT */
1128 brai 0x70
1129 nop
1130 brai TOPHYS(_user_exception); /* syscall handler */
1131 brai TOPHYS(_interrupt); /* Interrupt handler */
1132 brai TOPHYS(_break); /* nmi trap handler */
1133 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1134
1135 .org 0x60
1136 brai TOPHYS(_debug_exception); /* debug trap handler*/
1137
1138.section .rodata,"a"
1139#include "syscall_table.S"
1140
1141syscall_table_size=(.-sys_call_table)
1142