Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1 | /* |
| 2 | * File: arch/blackfin/mach-common/entry.S |
| 3 | * Based on: |
| 4 | * Author: Linus Torvalds |
| 5 | * |
| 6 | * Created: ? |
| 7 | * Description: contains the system-call and fault low-level handling routines. |
| 8 | * This also contains the timer-interrupt handler, as well as all |
| 9 | * interrupts and faults that can result in a task-switch. |
| 10 | * |
| 11 | * Modified: |
| 12 | * Copyright 2004-2006 Analog Devices Inc. |
| 13 | * |
| 14 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ |
| 15 | * |
| 16 | * This program is free software; you can redistribute it and/or modify |
| 17 | * it under the terms of the GNU General Public License as published by |
| 18 | * the Free Software Foundation; either version 2 of the License, or |
| 19 | * (at your option) any later version. |
| 20 | * |
| 21 | * This program is distributed in the hope that it will be useful, |
| 22 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 23 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 24 | * GNU General Public License for more details. |
| 25 | * |
| 26 | * You should have received a copy of the GNU General Public License |
| 27 | * along with this program; if not, see the file COPYING, or write |
| 28 | * to the Free Software Foundation, Inc., |
| 29 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 30 | */ |
| 31 | |
Robin Getz | 2ebcade | 2007-10-09 17:24:30 +0800 | [diff] [blame] | 32 | /* NOTE: This code handles signal-recognition, which happens every time |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 33 | * after a timer-interrupt and after each system call. |
| 34 | */ |
| 35 | |
Mike Frysinger | 9cb07b2 | 2007-11-21 16:45:08 +0800 | [diff] [blame] | 36 | #include <linux/init.h> |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 37 | #include <linux/linkage.h> |
Mike Frysinger | 1f83b8f | 2007-07-12 22:58:21 +0800 | [diff] [blame] | 38 | #include <linux/unistd.h> |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 39 | #include <asm/blackfin.h> |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 40 | #include <asm/errno.h> |
Bernd Schmidt | 5d750b9 | 2008-04-25 05:02:33 +0800 | [diff] [blame] | 41 | #include <asm/fixed_code.h> |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 42 | #include <asm/thread_info.h> /* TIF_NEED_RESCHED */ |
| 43 | #include <asm/asm-offsets.h> |
Robin Getz | 669b792 | 2007-06-21 16:34:08 +0800 | [diff] [blame] | 44 | #include <asm/trace.h> |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 45 | |
| 46 | #include <asm/mach-common/context.S> |
| 47 | |
Mike Frysinger | f0b5d12 | 2007-08-05 17:03:59 +0800 | [diff] [blame] | 48 | #if defined(CONFIG_BFIN_SCRATCH_REG_RETN) |
| 49 | # define EX_SCRATCH_REG RETN |
| 50 | #elif defined(CONFIG_BFIN_SCRATCH_REG_RETE) |
| 51 | # define EX_SCRATCH_REG RETE |
| 52 | #else |
| 53 | # define EX_SCRATCH_REG CYCLES |
| 54 | #endif |
| 55 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 56 | #ifdef CONFIG_EXCPT_IRQ_SYSC_L1 |
| 57 | .section .l1.text |
| 58 | #else |
| 59 | .text |
| 60 | #endif |
| 61 | |
| 62 | /* Slightly simplified and streamlined entry point for CPLB misses. |
| 63 | * This one does not lower the level to IRQ5, and thus can be used to |
| 64 | * patch up CPLB misses on the kernel stack. |
| 65 | */ |
Mike Frysinger | 1aafd90 | 2007-07-25 11:19:14 +0800 | [diff] [blame] | 66 | #if ANOMALY_05000261 |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 67 | #define _ex_dviol _ex_workaround_261 |
| 68 | #define _ex_dmiss _ex_workaround_261 |
| 69 | #define _ex_dmult _ex_workaround_261 |
| 70 | |
| 71 | ENTRY(_ex_workaround_261) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 72 | /* |
| 73 | * Work around an anomaly: if we see a new DCPLB fault, return |
| 74 | * without doing anything. Then, if we get the same fault again, |
| 75 | * handle it. |
| 76 | */ |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 77 | P4 = R7; /* Store EXCAUSE */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 78 | p5.l = _last_cplb_fault_retx; |
| 79 | p5.h = _last_cplb_fault_retx; |
| 80 | r7 = [p5]; |
| 81 | r6 = retx; |
| 82 | [p5] = r6; |
| 83 | cc = r6 == r7; |
Mike Frysinger | 8d6c242 | 2007-11-21 15:53:49 +0800 | [diff] [blame] | 84 | if !cc jump _bfin_return_from_exception; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 85 | /* fall through */ |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 86 | R7 = P4; |
| 87 | R6 = 0x26; /* Data CPLB Miss */ |
| 88 | cc = R6 == R7; |
| 89 | if cc jump _ex_dcplb_miss (BP); |
Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 90 | R6 = 0x23; /* Data CPLB Miss */ |
| 91 | cc = R6 == R7; |
| 92 | if cc jump _ex_dcplb_viol (BP); |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 93 | /* Handle 0x23 Data CPLB Protection Violation |
| 94 | * and Data CPLB Multiple Hits - Linux Trap Zero |
| 95 | */ |
| 96 | jump _ex_trap_c; |
| 97 | ENDPROC(_ex_workaround_261) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 98 | |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 99 | #else |
Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 100 | #ifdef CONFIG_MPU |
| 101 | #define _ex_dviol _ex_dcplb_viol |
| 102 | #else |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 103 | #define _ex_dviol _ex_trap_c |
Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 104 | #endif |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 105 | #define _ex_dmiss _ex_dcplb_miss |
| 106 | #define _ex_dmult _ex_trap_c |
| 107 | #endif |
| 108 | |
Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 109 | |
| 110 | ENTRY(_ex_dcplb_viol) |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 111 | ENTRY(_ex_dcplb_miss) |
| 112 | ENTRY(_ex_icplb_miss) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 113 | (R7:6,P5:4) = [sp++]; |
| 114 | ASTAT = [sp++]; |
| 115 | SAVE_ALL_SYS |
Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 116 | #ifdef CONFIG_MPU |
Bernd Schmidt | 2a0c4fd | 2008-04-23 07:17:34 +0800 | [diff] [blame] | 117 | /* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that |
| 118 | * will change the stack pointer. */ |
Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 119 | R0 = SEQSTAT; |
| 120 | R1 = SP; |
Bernd Schmidt | 2a0c4fd | 2008-04-23 07:17:34 +0800 | [diff] [blame] | 121 | #endif |
| 122 | DEBUG_HWTRACE_SAVE(p5, r7) |
| 123 | #ifdef CONFIG_MPU |
Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 124 | sp += -12; |
| 125 | call _cplb_hdr; |
| 126 | sp += 12; |
| 127 | CC = R0 == 0; |
| 128 | IF !CC JUMP _handle_bad_cplb; |
| 129 | #else |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 130 | call __cplb_hdr; |
Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 131 | #endif |
Mike Frysinger | 80f31c8 | 2008-02-02 15:47:24 +0800 | [diff] [blame] | 132 | DEBUG_HWTRACE_RESTORE(p5, r7) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 133 | RESTORE_ALL_SYS |
Mike Frysinger | f0b5d12 | 2007-08-05 17:03:59 +0800 | [diff] [blame] | 134 | SP = EX_SCRATCH_REG; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 135 | rtx; |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 136 | ENDPROC(_ex_icplb_miss) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 137 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 138 | ENTRY(_ex_syscall) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 139 | (R7:6,P5:4) = [sp++]; |
| 140 | ASTAT = [sp++]; |
| 141 | raise 15; /* invoked by TRAP #0, for sys call */ |
Mike Frysinger | f0b5d12 | 2007-08-05 17:03:59 +0800 | [diff] [blame] | 142 | sp = EX_SCRATCH_REG; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 143 | rtx |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 144 | ENDPROC(_ex_syscall) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 145 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 146 | ENTRY(_ex_soft_bp) |
| 147 | r7 = retx; |
| 148 | r7 += -2; |
| 149 | retx = r7; |
| 150 | jump.s _ex_trap_c; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 151 | ENDPROC(_ex_soft_bp) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 152 | |
| 153 | ENTRY(_ex_single_step) |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 154 | /* If we just returned from an interrupt, the single step event is |
| 155 | for the RTI instruction. */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 156 | r7 = retx; |
| 157 | r6 = reti; |
| 158 | cc = r7 == r6; |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 159 | if cc jump _bfin_return_from_exception; |
| 160 | |
Sonic Zhang | 0d1cdd7 | 2008-07-26 18:54:38 +0800 | [diff] [blame] | 161 | /* Don't do single step in hardware exception handler */ |
| 162 | p5.l = lo(IPEND); |
| 163 | p5.h = hi(IPEND); |
| 164 | r6 = [p5]; |
| 165 | cc = bittst(r6, 5); |
| 166 | if cc jump _bfin_return_from_exception; |
| 167 | |
| 168 | #ifdef CONFIG_KGDB |
| 169 | /* skip single step if current interrupt priority is higher than |
| 170 | * that of the first instruction, from which gdb starts single step */ |
| 171 | r6 >>= 6; |
| 172 | r7 = 10; |
| 173 | .Lfind_priority_start: |
| 174 | cc = bittst(r6, 0); |
| 175 | if cc jump .Lfind_priority_done; |
| 176 | r6 >>= 1; |
| 177 | r7 += -1; |
| 178 | cc = r7 == 0; |
| 179 | if cc jump .Lfind_priority_done; |
| 180 | jump.s .Lfind_priority_start; |
| 181 | .Lfind_priority_done: |
| 182 | p4.l = _debugger_step; |
| 183 | p4.h = _debugger_step; |
| 184 | r6 = [p4]; |
| 185 | cc = r6 == 0; |
| 186 | if cc jump .Ldo_single_step; |
| 187 | r6 += -1; |
| 188 | cc = r6 < r7; |
| 189 | if cc jump _bfin_return_from_exception; |
| 190 | .Ldo_single_step: |
| 191 | #endif |
| 192 | |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 193 | /* If we were in user mode, do the single step normally. */ |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 194 | r6 = [p5]; |
| 195 | r7 = 0xffe0 (z); |
| 196 | r7 = r7 & r6; |
| 197 | cc = r7 == 0; |
Sonic Zhang | 0d1cdd7 | 2008-07-26 18:54:38 +0800 | [diff] [blame] | 198 | if cc jump 1f; |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 199 | |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 200 | /* |
| 201 | * We were in an interrupt handler. By convention, all of them save |
| 202 | * SYSCFG with their first instruction, so by checking whether our |
| 203 | * RETX points at the entry point, we can determine whether to allow |
| 204 | * a single step, or whether to clear SYSCFG. |
| 205 | * |
| 206 | * First, find out the interrupt level and the event vector for it. |
| 207 | */ |
| 208 | p5.l = lo(EVT0); |
| 209 | p5.h = hi(EVT0); |
| 210 | p5 += -4; |
| 211 | 2: |
| 212 | r7 = rot r7 by -1; |
| 213 | p5 += 4; |
| 214 | if !cc jump 2b; |
| 215 | |
| 216 | /* What we actually do is test for the _second_ instruction in the |
| 217 | * IRQ handler. That way, if there are insns following the restore |
| 218 | * of SYSCFG after leaving the handler, we will not turn off SYSCFG |
| 219 | * for them. */ |
| 220 | |
| 221 | r7 = [p5]; |
| 222 | r7 += 2; |
| 223 | r6 = RETX; |
| 224 | cc = R7 == R6; |
| 225 | if !cc jump _bfin_return_from_exception; |
| 226 | |
Sonic Zhang | 0d1cdd7 | 2008-07-26 18:54:38 +0800 | [diff] [blame] | 227 | 1: |
| 228 | /* Single stepping only a single instruction, so clear the trace |
| 229 | * bit here. */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 230 | r7 = syscfg; |
| 231 | bitclr (r7, 0); |
| 232 | syscfg = R7; |
| 233 | |
Sonic Zhang | 0d1cdd7 | 2008-07-26 18:54:38 +0800 | [diff] [blame] | 234 | jump _ex_trap_c; |
| 235 | |
Mike Frysinger | 46c87c3 | 2007-11-21 16:15:48 +0800 | [diff] [blame] | 236 | ENDPROC(_ex_single_step) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 237 | |
Mike Frysinger | 8d6c242 | 2007-11-21 15:53:49 +0800 | [diff] [blame] | 238 | ENTRY(_bfin_return_from_exception) |
Mike Frysinger | 1aafd90 | 2007-07-25 11:19:14 +0800 | [diff] [blame] | 239 | #if ANOMALY_05000257 |
Michael Hennerich | 8af10b7 | 2007-05-21 18:09:09 +0800 | [diff] [blame] | 240 | R7=LC0; |
| 241 | LC0=R7; |
| 242 | R7=LC1; |
| 243 | LC1=R7; |
| 244 | #endif |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 245 | (R7:6,P5:4) = [sp++]; |
| 246 | ASTAT = [sp++]; |
Mike Frysinger | f0b5d12 | 2007-08-05 17:03:59 +0800 | [diff] [blame] | 247 | sp = EX_SCRATCH_REG; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 248 | rtx; |
Mike Frysinger | 46c87c3 | 2007-11-21 16:15:48 +0800 | [diff] [blame] | 249 | ENDPROC(_bfin_return_from_exception) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 250 | |
| 251 | ENTRY(_handle_bad_cplb) |
Bernd Schmidt | 2a0c4fd | 2008-04-23 07:17:34 +0800 | [diff] [blame] | 252 | DEBUG_HWTRACE_RESTORE(p5, r7) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 253 | /* To get here, we just tried and failed to change a CPLB |
| 254 | * so, handle things in trap_c (C code), by lowering to |
| 255 | * IRQ5, just like we normally do. Since this is not a |
| 256 | * "normal" return path, we have a do alot of stuff to |
| 257 | * the stack to get ready so, we can fall through - we |
| 258 | * need to make a CPLB exception look like a normal exception |
| 259 | */ |
| 260 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 261 | RESTORE_ALL_SYS |
| 262 | [--sp] = ASTAT; |
Mike Frysinger | 80f31c8 | 2008-02-02 15:47:24 +0800 | [diff] [blame] | 263 | [--sp] = (R7:6,P5:4); |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 264 | |
Mike Frysinger | 1ffe664 | 2007-08-05 17:14:04 +0800 | [diff] [blame] | 265 | ENTRY(_ex_replaceable) |
| 266 | nop; |
| 267 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 268 | ENTRY(_ex_trap_c) |
Robin Getz | 2ebcade | 2007-10-09 17:24:30 +0800 | [diff] [blame] | 269 | /* Make sure we are not in a double fault */ |
| 270 | p4.l = lo(IPEND); |
| 271 | p4.h = hi(IPEND); |
| 272 | r7 = [p4]; |
| 273 | CC = BITTST (r7, 5); |
| 274 | if CC jump _double_fault; |
| 275 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 276 | /* Call C code (trap_c) to handle the exception, which most |
| 277 | * likely involves sending a signal to the current process. |
| 278 | * To avoid double faults, lower our priority to IRQ5 first. |
| 279 | */ |
| 280 | P5.h = _exception_to_level5; |
| 281 | P5.l = _exception_to_level5; |
| 282 | p4.l = lo(EVT5); |
| 283 | p4.h = hi(EVT5); |
| 284 | [p4] = p5; |
| 285 | csync; |
| 286 | |
Bernd Schmidt | 5d750b9 | 2008-04-25 05:02:33 +0800 | [diff] [blame] | 287 | p4.l = lo(DCPLB_FAULT_ADDR); |
| 288 | p4.h = hi(DCPLB_FAULT_ADDR); |
| 289 | r7 = [p4]; |
| 290 | p5.h = _saved_dcplb_fault_addr; |
| 291 | p5.l = _saved_dcplb_fault_addr; |
| 292 | [p5] = r7; |
| 293 | |
| 294 | r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)]; |
| 295 | p5.h = _saved_icplb_fault_addr; |
| 296 | p5.l = _saved_icplb_fault_addr; |
| 297 | [p5] = r7; |
| 298 | |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 299 | p4.l = _excpt_saved_stuff; |
| 300 | p4.h = _excpt_saved_stuff; |
| 301 | |
Bernd Schmidt | 5d750b9 | 2008-04-25 05:02:33 +0800 | [diff] [blame] | 302 | r6 = retx; |
| 303 | [p4] = r6; |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 304 | |
| 305 | r6 = SYSCFG; |
| 306 | [p4 + 4] = r6; |
| 307 | BITCLR(r6, 0); |
| 308 | SYSCFG = r6; |
Bernd Schmidt | 5d750b9 | 2008-04-25 05:02:33 +0800 | [diff] [blame] | 309 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 310 | /* Disable all interrupts, but make sure level 5 is enabled so |
| 311 | * we can switch to that level. Save the old mask. */ |
| 312 | cli r6; |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 313 | [p4 + 8] = r6; |
| 314 | |
| 315 | p4.l = lo(SAFE_USER_INSTRUCTION); |
| 316 | p4.h = hi(SAFE_USER_INSTRUCTION); |
| 317 | retx = p4; |
| 318 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 319 | r6 = 0x3f; |
| 320 | sti r6; |
| 321 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 322 | (R7:6,P5:4) = [sp++]; |
| 323 | ASTAT = [sp++]; |
Mike Frysinger | f0b5d12 | 2007-08-05 17:03:59 +0800 | [diff] [blame] | 324 | SP = EX_SCRATCH_REG; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 325 | raise 5; |
| 326 | rtx; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 327 | ENDPROC(_ex_trap_c) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 328 | |
Robin Getz | 2ebcade | 2007-10-09 17:24:30 +0800 | [diff] [blame] | 329 | /* We just realized we got an exception, while we were processing a different |
| 330 | * exception. This is a unrecoverable event, so crash |
| 331 | */ |
| 332 | ENTRY(_double_fault) |
| 333 | /* Turn caches & protection off, to ensure we don't get any more |
| 334 | * double exceptions |
| 335 | */ |
| 336 | |
| 337 | P4.L = LO(IMEM_CONTROL); |
| 338 | P4.H = HI(IMEM_CONTROL); |
| 339 | |
| 340 | R5 = [P4]; /* Control Register*/ |
| 341 | BITCLR(R5,ENICPLB_P); |
| 342 | SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ |
| 343 | .align 8; |
| 344 | [P4] = R5; |
| 345 | SSYNC; |
| 346 | |
| 347 | P4.L = LO(DMEM_CONTROL); |
| 348 | P4.H = HI(DMEM_CONTROL); |
| 349 | R5 = [P4]; |
| 350 | BITCLR(R5,ENDCPLB_P); |
| 351 | SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ |
| 352 | .align 8; |
| 353 | [P4] = R5; |
| 354 | SSYNC; |
| 355 | |
| 356 | /* Fix up the stack */ |
| 357 | (R7:6,P5:4) = [sp++]; |
| 358 | ASTAT = [sp++]; |
| 359 | SP = EX_SCRATCH_REG; |
| 360 | |
| 361 | /* We should be out of the exception stack, and back down into |
| 362 | * kernel or user space stack |
| 363 | */ |
| 364 | SAVE_ALL_SYS |
| 365 | |
Bernd Schmidt | ddb3f00 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 366 | /* The dumping functions expect the return address in the RETI |
| 367 | * slot. */ |
| 368 | r6 = retx; |
| 369 | [sp + PT_PC] = r6; |
| 370 | |
Robin Getz | 2ebcade | 2007-10-09 17:24:30 +0800 | [diff] [blame] | 371 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ |
| 372 | SP += -12; |
| 373 | call _double_fault_c; |
| 374 | SP += 12; |
| 375 | .L_double_fault_panic: |
| 376 | JUMP .L_double_fault_panic |
| 377 | |
| 378 | ENDPROC(_double_fault) |
| 379 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 380 | ENTRY(_exception_to_level5) |
| 381 | SAVE_ALL_SYS |
| 382 | |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 383 | p4.l = _excpt_saved_stuff; |
| 384 | p4.h = _excpt_saved_stuff; |
Bernd Schmidt | 5d750b9 | 2008-04-25 05:02:33 +0800 | [diff] [blame] | 385 | r6 = [p4]; |
| 386 | [sp + PT_PC] = r6; |
| 387 | |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 388 | r6 = [p4 + 4]; |
| 389 | [sp + PT_SYSCFG] = r6; |
| 390 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 391 | /* Restore interrupt mask. We haven't pushed RETI, so this |
| 392 | * doesn't enable interrupts until we return from this handler. */ |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 393 | r6 = [p4 + 8]; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 394 | sti r6; |
| 395 | |
| 396 | /* Restore the hardware error vector. */ |
| 397 | P5.h = _evt_ivhw; |
| 398 | P5.l = _evt_ivhw; |
| 399 | p4.l = lo(EVT5); |
| 400 | p4.h = hi(EVT5); |
| 401 | [p4] = p5; |
| 402 | csync; |
| 403 | |
| 404 | p2.l = lo(IPEND); |
| 405 | p2.h = hi(IPEND); |
| 406 | csync; |
| 407 | r0 = [p2]; /* Read current IPEND */ |
| 408 | [sp + PT_IPEND] = r0; /* Store IPEND */ |
| 409 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 410 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ |
| 411 | SP += -12; |
| 412 | call _trap_c; |
| 413 | SP += 12; |
| 414 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 415 | call _ret_from_exception; |
| 416 | RESTORE_ALL_SYS |
| 417 | rti; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 418 | ENDPROC(_exception_to_level5) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 419 | |
| 420 | ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ |
| 421 | /* Since the kernel stack can be anywhere, it's not guaranteed to be |
| 422 | * covered by a CPLB. Switch to an exception stack; use RETN as a |
| 423 | * scratch register (for want of a better option). |
| 424 | */ |
Mike Frysinger | f0b5d12 | 2007-08-05 17:03:59 +0800 | [diff] [blame] | 425 | EX_SCRATCH_REG = sp; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 426 | sp.l = _exception_stack_top; |
| 427 | sp.h = _exception_stack_top; |
| 428 | /* Try to deal with syscalls quickly. */ |
| 429 | [--sp] = ASTAT; |
Mike Frysinger | 80f31c8 | 2008-02-02 15:47:24 +0800 | [diff] [blame] | 430 | [--sp] = (R7:6,P5:4); |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 431 | r7 = SEQSTAT; /* reason code is in bit 5:0 */ |
| 432 | r6.l = lo(SEQSTAT_EXCAUSE); |
| 433 | r6.h = hi(SEQSTAT_EXCAUSE); |
| 434 | r7 = r7 & r6; |
Mike Frysinger | 1ffe664 | 2007-08-05 17:14:04 +0800 | [diff] [blame] | 435 | p5.h = _ex_table; |
| 436 | p5.l = _ex_table; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 437 | p4 = r7; |
| 438 | p5 = p5 + (p4 << 2); |
| 439 | p4 = [p5]; |
| 440 | jump (p4); |
| 441 | |
| 442 | .Lbadsys: |
| 443 | r7 = -ENOSYS; /* signextending enough */ |
| 444 | [sp + PT_R0] = r7; /* return value from system call */ |
| 445 | jump .Lsyscall_really_exit; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 446 | ENDPROC(_trap) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 447 | |
| 448 | ENTRY(_kernel_execve) |
| 449 | link SIZEOF_PTREGS; |
| 450 | p0 = sp; |
| 451 | r3 = SIZEOF_PTREGS / 4; |
| 452 | r4 = 0(x); |
| 453 | 0: |
| 454 | [p0++] = r4; |
| 455 | r3 += -1; |
| 456 | cc = r3 == 0; |
| 457 | if !cc jump 0b (bp); |
| 458 | |
| 459 | p0 = sp; |
| 460 | sp += -16; |
| 461 | [sp + 12] = p0; |
| 462 | call _do_execve; |
| 463 | SP += 16; |
| 464 | cc = r0 == 0; |
| 465 | if ! cc jump 1f; |
| 466 | /* Success. Copy our temporary pt_regs to the top of the kernel |
| 467 | * stack and do a normal exception return. |
| 468 | */ |
| 469 | r1 = sp; |
| 470 | r0 = (-KERNEL_STACK_SIZE) (x); |
| 471 | r1 = r1 & r0; |
| 472 | p2 = r1; |
| 473 | p3 = [p2]; |
| 474 | r0 = KERNEL_STACK_SIZE - 4 (z); |
| 475 | p1 = r0; |
| 476 | p1 = p1 + p2; |
| 477 | |
| 478 | p0 = fp; |
| 479 | r4 = [p0--]; |
| 480 | r3 = SIZEOF_PTREGS / 4; |
| 481 | 0: |
| 482 | r4 = [p0--]; |
| 483 | [p1--] = r4; |
| 484 | r3 += -1; |
| 485 | cc = r3 == 0; |
| 486 | if ! cc jump 0b (bp); |
| 487 | |
| 488 | r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z); |
| 489 | p1 = r0; |
| 490 | p1 = p1 + p2; |
| 491 | sp = p1; |
| 492 | r0 = syscfg; |
| 493 | [SP + PT_SYSCFG] = r0; |
| 494 | [p3 + (TASK_THREAD + THREAD_KSP)] = sp; |
| 495 | |
| 496 | RESTORE_CONTEXT; |
| 497 | rti; |
| 498 | 1: |
| 499 | unlink; |
| 500 | rts; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 501 | ENDPROC(_kernel_execve) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 502 | |
| 503 | ENTRY(_system_call) |
| 504 | /* Store IPEND */ |
| 505 | p2.l = lo(IPEND); |
| 506 | p2.h = hi(IPEND); |
| 507 | csync; |
| 508 | r0 = [p2]; |
| 509 | [sp + PT_IPEND] = r0; |
| 510 | |
| 511 | /* Store RETS for now */ |
| 512 | r0 = rets; |
| 513 | [sp + PT_RESERVED] = r0; |
| 514 | /* Set the stack for the current process */ |
| 515 | r7 = sp; |
| 516 | r6.l = lo(ALIGN_PAGE_MASK); |
| 517 | r6.h = hi(ALIGN_PAGE_MASK); |
| 518 | r7 = r7 & r6; /* thread_info */ |
| 519 | p2 = r7; |
| 520 | p2 = [p2]; |
| 521 | |
| 522 | [p2+(TASK_THREAD+THREAD_KSP)] = sp; |
| 523 | |
| 524 | /* Check the System Call */ |
| 525 | r7 = __NR_syscall; |
| 526 | /* System call number is passed in P0 */ |
| 527 | r6 = p0; |
| 528 | cc = r6 < r7; |
| 529 | if ! cc jump .Lbadsys; |
| 530 | |
| 531 | /* are we tracing syscalls?*/ |
| 532 | r7 = sp; |
| 533 | r6.l = lo(ALIGN_PAGE_MASK); |
| 534 | r6.h = hi(ALIGN_PAGE_MASK); |
| 535 | r7 = r7 & r6; |
| 536 | p2 = r7; |
| 537 | r7 = [p2+TI_FLAGS]; |
| 538 | CC = BITTST(r7,TIF_SYSCALL_TRACE); |
| 539 | if CC JUMP _sys_trace; |
| 540 | |
| 541 | /* Execute the appropriate system call */ |
| 542 | |
| 543 | p4 = p0; |
| 544 | p5.l = _sys_call_table; |
| 545 | p5.h = _sys_call_table; |
| 546 | p5 = p5 + (p4 << 2); |
| 547 | r0 = [sp + PT_R0]; |
| 548 | r1 = [sp + PT_R1]; |
| 549 | r2 = [sp + PT_R2]; |
| 550 | p5 = [p5]; |
| 551 | |
| 552 | [--sp] = r5; |
| 553 | [--sp] = r4; |
| 554 | [--sp] = r3; |
| 555 | SP += -12; |
| 556 | call (p5); |
| 557 | SP += 24; |
| 558 | [sp + PT_R0] = r0; |
| 559 | |
| 560 | .Lresume_userspace: |
| 561 | r7 = sp; |
| 562 | r4.l = lo(ALIGN_PAGE_MASK); |
| 563 | r4.h = hi(ALIGN_PAGE_MASK); |
| 564 | r7 = r7 & r4; /* thread_info->flags */ |
| 565 | p5 = r7; |
| 566 | .Lresume_userspace_1: |
| 567 | /* Disable interrupts. */ |
| 568 | [--sp] = reti; |
| 569 | reti = [sp++]; |
| 570 | |
| 571 | r7 = [p5 + TI_FLAGS]; |
| 572 | r4.l = lo(_TIF_WORK_MASK); |
| 573 | r4.h = hi(_TIF_WORK_MASK); |
| 574 | r7 = r7 & r4; |
| 575 | |
| 576 | .Lsyscall_resched: |
| 577 | cc = BITTST(r7, TIF_NEED_RESCHED); |
| 578 | if !cc jump .Lsyscall_sigpending; |
| 579 | |
| 580 | /* Reenable interrupts. */ |
| 581 | [--sp] = reti; |
| 582 | r0 = [sp++]; |
| 583 | |
| 584 | SP += -12; |
| 585 | call _schedule; |
| 586 | SP += 12; |
| 587 | |
| 588 | jump .Lresume_userspace_1; |
| 589 | |
| 590 | .Lsyscall_sigpending: |
| 591 | cc = BITTST(r7, TIF_RESTORE_SIGMASK); |
| 592 | if cc jump .Lsyscall_do_signals; |
| 593 | cc = BITTST(r7, TIF_SIGPENDING); |
| 594 | if !cc jump .Lsyscall_really_exit; |
| 595 | .Lsyscall_do_signals: |
| 596 | /* Reenable interrupts. */ |
| 597 | [--sp] = reti; |
| 598 | r0 = [sp++]; |
| 599 | |
| 600 | r0 = sp; |
| 601 | SP += -12; |
| 602 | call _do_signal; |
| 603 | SP += 12; |
| 604 | |
| 605 | .Lsyscall_really_exit: |
| 606 | r5 = [sp + PT_RESERVED]; |
| 607 | rets = r5; |
| 608 | rts; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 609 | ENDPROC(_system_call) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 610 | |
| 611 | _sys_trace: |
| 612 | call _syscall_trace; |
| 613 | |
| 614 | /* Execute the appropriate system call */ |
| 615 | |
| 616 | p4 = [SP + PT_P0]; |
| 617 | p5.l = _sys_call_table; |
| 618 | p5.h = _sys_call_table; |
| 619 | p5 = p5 + (p4 << 2); |
| 620 | r0 = [sp + PT_R0]; |
| 621 | r1 = [sp + PT_R1]; |
| 622 | r2 = [sp + PT_R2]; |
| 623 | r3 = [sp + PT_R3]; |
| 624 | r4 = [sp + PT_R4]; |
| 625 | r5 = [sp + PT_R5]; |
| 626 | p5 = [p5]; |
| 627 | |
| 628 | [--sp] = r5; |
| 629 | [--sp] = r4; |
| 630 | [--sp] = r3; |
| 631 | SP += -12; |
| 632 | call (p5); |
| 633 | SP += 24; |
| 634 | [sp + PT_R0] = r0; |
| 635 | |
| 636 | call _syscall_trace; |
| 637 | jump .Lresume_userspace; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 638 | ENDPROC(_sys_trace) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 639 | |
| 640 | ENTRY(_resume) |
| 641 | /* |
| 642 | * Beware - when entering resume, prev (the current task) is |
| 643 | * in r0, next (the new task) is in r1. |
| 644 | */ |
| 645 | p0 = r0; |
| 646 | p1 = r1; |
| 647 | [--sp] = rets; |
| 648 | [--sp] = fp; |
| 649 | [--sp] = (r7:4, p5:3); |
| 650 | |
| 651 | /* save usp */ |
| 652 | p2 = usp; |
| 653 | [p0+(TASK_THREAD+THREAD_USP)] = p2; |
| 654 | |
| 655 | /* save current kernel stack pointer */ |
| 656 | [p0+(TASK_THREAD+THREAD_KSP)] = sp; |
| 657 | |
| 658 | /* save program counter */ |
| 659 | r1.l = _new_old_task; |
| 660 | r1.h = _new_old_task; |
| 661 | [p0+(TASK_THREAD+THREAD_PC)] = r1; |
| 662 | |
| 663 | /* restore the kernel stack pointer */ |
| 664 | sp = [p1+(TASK_THREAD+THREAD_KSP)]; |
| 665 | |
| 666 | /* restore user stack pointer */ |
| 667 | p0 = [p1+(TASK_THREAD+THREAD_USP)]; |
| 668 | usp = p0; |
| 669 | |
| 670 | /* restore pc */ |
| 671 | p0 = [p1+(TASK_THREAD+THREAD_PC)]; |
| 672 | jump (p0); |
| 673 | |
| 674 | /* |
| 675 | * Following code actually lands up in a new (old) task. |
| 676 | */ |
| 677 | |
| 678 | _new_old_task: |
| 679 | (r7:4, p5:3) = [sp++]; |
| 680 | fp = [sp++]; |
| 681 | rets = [sp++]; |
| 682 | |
| 683 | /* |
| 684 | * When we come out of resume, r0 carries "old" task, becuase we are |
| 685 | * in "new" task. |
| 686 | */ |
| 687 | rts; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 688 | ENDPROC(_resume) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 689 | |
| 690 | ENTRY(_ret_from_exception) |
| 691 | p2.l = lo(IPEND); |
| 692 | p2.h = hi(IPEND); |
| 693 | |
| 694 | csync; |
| 695 | r0 = [p2]; |
| 696 | [sp + PT_IPEND] = r0; |
| 697 | |
| 698 | 1: |
Robin Getz | d5c4b5e | 2007-12-21 17:49:53 +0800 | [diff] [blame] | 699 | r2 = LO(~0x37) (Z); |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 700 | r0 = r2 & r0; |
| 701 | cc = r0 == 0; |
| 702 | if !cc jump 4f; /* if not return to user mode, get out */ |
| 703 | |
| 704 | /* Make sure any pending system call or deferred exception |
| 705 | * return in ILAT for this process to get executed, otherwise |
| 706 | * in case context switch happens, system call of |
| 707 | * first process (i.e in ILAT) will be carried |
| 708 | * forward to the switched process |
| 709 | */ |
| 710 | |
| 711 | p2.l = lo(ILAT); |
| 712 | p2.h = hi(ILAT); |
| 713 | r0 = [p2]; |
| 714 | r1 = (EVT_IVG14 | EVT_IVG15) (z); |
| 715 | r0 = r0 & r1; |
| 716 | cc = r0 == 0; |
| 717 | if !cc jump 5f; |
| 718 | |
| 719 | /* Set the stack for the current process */ |
| 720 | r7 = sp; |
| 721 | r4.l = lo(ALIGN_PAGE_MASK); |
| 722 | r4.h = hi(ALIGN_PAGE_MASK); |
| 723 | r7 = r7 & r4; /* thread_info->flags */ |
| 724 | p5 = r7; |
| 725 | r7 = [p5 + TI_FLAGS]; |
| 726 | r4.l = lo(_TIF_WORK_MASK); |
| 727 | r4.h = hi(_TIF_WORK_MASK); |
| 728 | r7 = r7 & r4; |
| 729 | cc = r7 == 0; |
| 730 | if cc jump 4f; |
| 731 | |
| 732 | p0.l = lo(EVT15); |
| 733 | p0.h = hi(EVT15); |
| 734 | p1.l = _schedule_and_signal; |
| 735 | p1.h = _schedule_and_signal; |
| 736 | [p0] = p1; |
| 737 | csync; |
| 738 | raise 15; /* raise evt14 to do signal or reschedule */ |
| 739 | 4: |
| 740 | r0 = syscfg; |
| 741 | bitclr(r0, 0); |
| 742 | syscfg = r0; |
| 743 | 5: |
| 744 | rts; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 745 | ENDPROC(_ret_from_exception) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 746 | |
| 747 | ENTRY(_return_from_int) |
| 748 | /* If someone else already raised IRQ 15, do nothing. */ |
| 749 | csync; |
| 750 | p2.l = lo(ILAT); |
| 751 | p2.h = hi(ILAT); |
| 752 | r0 = [p2]; |
| 753 | cc = bittst (r0, EVT_IVG15_P); |
| 754 | if cc jump 2f; |
| 755 | |
| 756 | /* if not return to user mode, get out */ |
| 757 | p2.l = lo(IPEND); |
| 758 | p2.h = hi(IPEND); |
| 759 | r0 = [p2]; |
| 760 | r1 = 0x17(Z); |
| 761 | r2 = ~r1; |
| 762 | r2.h = 0; |
| 763 | r0 = r2 & r0; |
| 764 | r1 = 1; |
| 765 | r1 = r0 - r1; |
| 766 | r2 = r0 & r1; |
| 767 | cc = r2 == 0; |
| 768 | if !cc jump 2f; |
| 769 | |
| 770 | /* Lower the interrupt level to 15. */ |
| 771 | p0.l = lo(EVT15); |
| 772 | p0.h = hi(EVT15); |
| 773 | p1.l = _schedule_and_signal_from_int; |
| 774 | p1.h = _schedule_and_signal_from_int; |
| 775 | [p0] = p1; |
| 776 | csync; |
Mike Frysinger | 1aafd90 | 2007-07-25 11:19:14 +0800 | [diff] [blame] | 777 | #if ANOMALY_05000281 |
Bernd Schmidt | 5d750b9 | 2008-04-25 05:02:33 +0800 | [diff] [blame] | 778 | r0.l = lo(SAFE_USER_INSTRUCTION); |
| 779 | r0.h = hi(SAFE_USER_INSTRUCTION); |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 780 | reti = r0; |
| 781 | #endif |
| 782 | r0 = 0x801f (z); |
| 783 | STI r0; |
| 784 | raise 15; /* raise evt15 to do signal or reschedule */ |
| 785 | rti; |
| 786 | 2: |
| 787 | rts; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 788 | ENDPROC(_return_from_int) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 789 | |
| 790 | ENTRY(_lower_to_irq14) |
Mike Frysinger | 1aafd90 | 2007-07-25 11:19:14 +0800 | [diff] [blame] | 791 | #if ANOMALY_05000281 |
Bernd Schmidt | 5d750b9 | 2008-04-25 05:02:33 +0800 | [diff] [blame] | 792 | r0.l = lo(SAFE_USER_INSTRUCTION); |
| 793 | r0.h = hi(SAFE_USER_INSTRUCTION); |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 794 | reti = r0; |
| 795 | #endif |
| 796 | r0 = 0x401f; |
| 797 | sti r0; |
| 798 | raise 14; |
| 799 | rti; |
| 800 | ENTRY(_evt14_softirq) |
| 801 | #ifdef CONFIG_DEBUG_HWERR |
| 802 | r0 = 0x3f; |
| 803 | sti r0; |
| 804 | #else |
| 805 | cli r0; |
| 806 | #endif |
| 807 | [--sp] = RETI; |
| 808 | SP += 4; |
| 809 | rts; |
| 810 | |
| 811 | _schedule_and_signal_from_int: |
| 812 | /* To end up here, vector 15 was changed - so we have to change it |
| 813 | * back. |
| 814 | */ |
| 815 | p0.l = lo(EVT15); |
| 816 | p0.h = hi(EVT15); |
| 817 | p1.l = _evt_system_call; |
| 818 | p1.h = _evt_system_call; |
| 819 | [p0] = p1; |
| 820 | csync; |
Bernd Schmidt | c824498 | 2007-05-21 18:09:33 +0800 | [diff] [blame] | 821 | |
| 822 | /* Set orig_p0 to -1 to indicate this isn't the end of a syscall. */ |
| 823 | r0 = -1 (x); |
| 824 | [sp + PT_ORIG_P0] = r0; |
| 825 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 826 | p1 = rets; |
| 827 | [sp + PT_RESERVED] = p1; |
| 828 | |
| 829 | p0.l = _irq_flags; |
| 830 | p0.h = _irq_flags; |
| 831 | r0 = [p0]; |
| 832 | sti r0; |
| 833 | |
Bernd Schmidt | 7adfb58 | 2007-06-21 11:34:16 +0800 | [diff] [blame] | 834 | r0 = sp; |
| 835 | sp += -12; |
| 836 | call _finish_atomic_sections; |
| 837 | sp += 12; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 838 | jump.s .Lresume_userspace; |
| 839 | |
| 840 | _schedule_and_signal: |
| 841 | SAVE_CONTEXT_SYSCALL |
| 842 | /* To end up here, vector 15 was changed - so we have to change it |
| 843 | * back. |
| 844 | */ |
| 845 | p0.l = lo(EVT15); |
| 846 | p0.h = hi(EVT15); |
| 847 | p1.l = _evt_system_call; |
| 848 | p1.h = _evt_system_call; |
| 849 | [p0] = p1; |
| 850 | csync; |
| 851 | p0.l = 1f; |
| 852 | p0.h = 1f; |
| 853 | [sp + PT_RESERVED] = P0; |
| 854 | call .Lresume_userspace; |
| 855 | 1: |
| 856 | RESTORE_CONTEXT |
| 857 | rti; |
Mike Frysinger | 51be24c | 2007-06-11 15:31:30 +0800 | [diff] [blame] | 858 | ENDPROC(_lower_to_irq14) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 859 | |
Robin Getz | 518039b | 2007-07-25 11:03:28 +0800 | [diff] [blame] | 860 | /* We handle this 100% in exception space - to reduce overhead |
| 861 | * Only potiential problem is if the software buffer gets swapped out of the |
| 862 | * CPLB table - then double fault. - so we don't let this happen in other places |
| 863 | */ |
| 864 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND |
| 865 | ENTRY(_ex_trace_buff_full) |
| 866 | [--sp] = P3; |
| 867 | [--sp] = P2; |
| 868 | [--sp] = LC0; |
| 869 | [--sp] = LT0; |
| 870 | [--sp] = LB0; |
| 871 | P5.L = _trace_buff_offset; |
| 872 | P5.H = _trace_buff_offset; |
| 873 | P3 = [P5]; /* trace_buff_offset */ |
| 874 | P5.L = lo(TBUFSTAT); |
| 875 | P5.H = hi(TBUFSTAT); |
| 876 | R7 = [P5]; |
| 877 | R7 <<= 1; /* double, since we need to read twice */ |
| 878 | LC0 = R7; |
| 879 | R7 <<= 2; /* need to shift over again, |
| 880 | * to get the number of bytes */ |
| 881 | P5.L = lo(TBUF); |
| 882 | P5.H = hi(TBUF); |
| 883 | R6 = ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*1024) - 1; |
| 884 | |
| 885 | P2 = R7; |
| 886 | P3 = P3 + P2; |
| 887 | R7 = P3; |
| 888 | R7 = R7 & R6; |
| 889 | P3 = R7; |
| 890 | P2.L = _trace_buff_offset; |
| 891 | P2.H = _trace_buff_offset; |
| 892 | [P2] = P3; |
| 893 | |
| 894 | P2.L = _software_trace_buff; |
| 895 | P2.H = _software_trace_buff; |
| 896 | |
| 897 | LSETUP (.Lstart, .Lend) LC0; |
| 898 | .Lstart: |
| 899 | R7 = [P5]; /* read TBUF */ |
| 900 | P4 = P3 + P2; |
| 901 | [P4] = R7; |
| 902 | P3 += -4; |
| 903 | R7 = P3; |
| 904 | R7 = R7 & R6; |
| 905 | .Lend: |
| 906 | P3 = R7; |
| 907 | |
| 908 | LB0 = [sp++]; |
| 909 | LT0 = [sp++]; |
| 910 | LC0 = [sp++]; |
| 911 | P2 = [sp++]; |
| 912 | P3 = [sp++]; |
Mike Frysinger | 8d6c242 | 2007-11-21 15:53:49 +0800 | [diff] [blame] | 913 | jump _bfin_return_from_exception; |
Robin Getz | 337d390 | 2007-10-09 17:31:46 +0800 | [diff] [blame] | 914 | ENDPROC(_ex_trace_buff_full) |
Robin Getz | 518039b | 2007-07-25 11:03:28 +0800 | [diff] [blame] | 915 | |
| 916 | #if CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN == 4 |
| 917 | .data |
| 918 | #else |
| 919 | .section .l1.data.B |
Robin Getz | 337d390 | 2007-10-09 17:31:46 +0800 | [diff] [blame] | 920 | #endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN */ |
Robin Getz | 518039b | 2007-07-25 11:03:28 +0800 | [diff] [blame] | 921 | ENTRY(_trace_buff_offset) |
| 922 | .long 0; |
| 923 | ALIGN |
| 924 | ENTRY(_software_trace_buff) |
| 925 | .rept ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*256); |
| 926 | .long 0 |
| 927 | .endr |
Robin Getz | 337d390 | 2007-10-09 17:31:46 +0800 | [diff] [blame] | 928 | #endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND */ |
| 929 | |
| 930 | #if CONFIG_EARLY_PRINTK |
Mike Frysinger | 9cb07b2 | 2007-11-21 16:45:08 +0800 | [diff] [blame] | 931 | __INIT |
Robin Getz | 337d390 | 2007-10-09 17:31:46 +0800 | [diff] [blame] | 932 | ENTRY(_early_trap) |
| 933 | SAVE_ALL_SYS |
| 934 | trace_buffer_stop(p0,r0); |
| 935 | |
| 936 | /* Turn caches off, to ensure we don't get double exceptions */ |
| 937 | |
| 938 | P4.L = LO(IMEM_CONTROL); |
| 939 | P4.H = HI(IMEM_CONTROL); |
| 940 | |
| 941 | R5 = [P4]; /* Control Register*/ |
| 942 | BITCLR(R5,ENICPLB_P); |
| 943 | CLI R1; |
| 944 | SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ |
| 945 | .align 8; |
| 946 | [P4] = R5; |
| 947 | SSYNC; |
| 948 | |
| 949 | P4.L = LO(DMEM_CONTROL); |
| 950 | P4.H = HI(DMEM_CONTROL); |
| 951 | R5 = [P4]; |
| 952 | BITCLR(R5,ENDCPLB_P); |
| 953 | SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ |
| 954 | .align 8; |
| 955 | [P4] = R5; |
| 956 | SSYNC; |
| 957 | STI R1; |
| 958 | |
| 959 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ |
| 960 | r1 = RETX; |
| 961 | |
| 962 | SP += -12; |
| 963 | call _early_trap_c; |
| 964 | SP += 12; |
| 965 | ENDPROC(_early_trap) |
Mike Frysinger | 9cb07b2 | 2007-11-21 16:45:08 +0800 | [diff] [blame] | 966 | __FINIT |
Robin Getz | 337d390 | 2007-10-09 17:31:46 +0800 | [diff] [blame] | 967 | #endif /* CONFIG_EARLY_PRINTK */ |
Robin Getz | 518039b | 2007-07-25 11:03:28 +0800 | [diff] [blame] | 968 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 969 | /* |
| 970 | * Put these in the kernel data section - that should always be covered by |
| 971 | * a CPLB. This is needed to ensure we don't get double fault conditions |
| 972 | */ |
| 973 | |
| 974 | #ifdef CONFIG_SYSCALL_TAB_L1 |
| 975 | .section .l1.data |
| 976 | #else |
| 977 | .data |
| 978 | #endif |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 979 | |
Mike Frysinger | 1ffe664 | 2007-08-05 17:14:04 +0800 | [diff] [blame] | 980 | ENTRY(_ex_table) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 981 | /* entry for each EXCAUSE[5:0] |
Mike Frysinger | 9401e61 | 2007-07-12 11:50:43 +0800 | [diff] [blame] | 982 | * This table must be in sync with the table in ./kernel/traps.c |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 983 | * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined |
| 984 | */ |
Mike Frysinger | 1ffe664 | 2007-08-05 17:14:04 +0800 | [diff] [blame] | 985 | .long _ex_syscall /* 0x00 - User Defined - Linux Syscall */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 986 | .long _ex_soft_bp /* 0x01 - User Defined - Software breakpoint */ |
Mike Frysinger | 1ffe664 | 2007-08-05 17:14:04 +0800 | [diff] [blame] | 987 | .long _ex_replaceable /* 0x02 - User Defined */ |
Mike Frysinger | 9401e61 | 2007-07-12 11:50:43 +0800 | [diff] [blame] | 988 | .long _ex_trap_c /* 0x03 - User Defined - userspace stack overflow */ |
Robin Getz | 9f336a5 | 2007-10-29 18:23:28 +0800 | [diff] [blame] | 989 | .long _ex_trap_c /* 0x04 - User Defined - dump trace buffer */ |
Mike Frysinger | 1ffe664 | 2007-08-05 17:14:04 +0800 | [diff] [blame] | 990 | .long _ex_replaceable /* 0x05 - User Defined */ |
| 991 | .long _ex_replaceable /* 0x06 - User Defined */ |
| 992 | .long _ex_replaceable /* 0x07 - User Defined */ |
| 993 | .long _ex_replaceable /* 0x08 - User Defined */ |
| 994 | .long _ex_replaceable /* 0x09 - User Defined */ |
| 995 | .long _ex_replaceable /* 0x0A - User Defined */ |
| 996 | .long _ex_replaceable /* 0x0B - User Defined */ |
| 997 | .long _ex_replaceable /* 0x0C - User Defined */ |
| 998 | .long _ex_replaceable /* 0x0D - User Defined */ |
| 999 | .long _ex_replaceable /* 0x0E - User Defined */ |
| 1000 | .long _ex_replaceable /* 0x0F - User Defined */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1001 | .long _ex_single_step /* 0x10 - HW Single step */ |
Robin Getz | 518039b | 2007-07-25 11:03:28 +0800 | [diff] [blame] | 1002 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND |
| 1003 | .long _ex_trace_buff_full /* 0x11 - Trace Buffer Full */ |
| 1004 | #else |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1005 | .long _ex_trap_c /* 0x11 - Trace Buffer Full */ |
Robin Getz | 518039b | 2007-07-25 11:03:28 +0800 | [diff] [blame] | 1006 | #endif |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1007 | .long _ex_trap_c /* 0x12 - Reserved */ |
| 1008 | .long _ex_trap_c /* 0x13 - Reserved */ |
| 1009 | .long _ex_trap_c /* 0x14 - Reserved */ |
| 1010 | .long _ex_trap_c /* 0x15 - Reserved */ |
| 1011 | .long _ex_trap_c /* 0x16 - Reserved */ |
| 1012 | .long _ex_trap_c /* 0x17 - Reserved */ |
| 1013 | .long _ex_trap_c /* 0x18 - Reserved */ |
| 1014 | .long _ex_trap_c /* 0x19 - Reserved */ |
| 1015 | .long _ex_trap_c /* 0x1A - Reserved */ |
| 1016 | .long _ex_trap_c /* 0x1B - Reserved */ |
| 1017 | .long _ex_trap_c /* 0x1C - Reserved */ |
| 1018 | .long _ex_trap_c /* 0x1D - Reserved */ |
| 1019 | .long _ex_trap_c /* 0x1E - Reserved */ |
| 1020 | .long _ex_trap_c /* 0x1F - Reserved */ |
| 1021 | .long _ex_trap_c /* 0x20 - Reserved */ |
| 1022 | .long _ex_trap_c /* 0x21 - Undefined Instruction */ |
| 1023 | .long _ex_trap_c /* 0x22 - Illegal Instruction Combination */ |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 1024 | .long _ex_dviol /* 0x23 - Data CPLB Protection Violation */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1025 | .long _ex_trap_c /* 0x24 - Data access misaligned */ |
| 1026 | .long _ex_trap_c /* 0x25 - Unrecoverable Event */ |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 1027 | .long _ex_dmiss /* 0x26 - Data CPLB Miss */ |
| 1028 | .long _ex_dmult /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1029 | .long _ex_trap_c /* 0x28 - Emulation Watchpoint */ |
| 1030 | .long _ex_trap_c /* 0x29 - Instruction fetch access error (535 only) */ |
| 1031 | .long _ex_trap_c /* 0x2A - Instruction fetch misaligned */ |
Robin Getz | f26fbc4 | 2007-11-12 22:21:30 +0800 | [diff] [blame] | 1032 | .long _ex_trap_c /* 0x2B - Instruction CPLB protection Violation */ |
| 1033 | .long _ex_icplb_miss /* 0x2C - Instruction CPLB miss */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1034 | .long _ex_trap_c /* 0x2D - Instruction CPLB Multiple Hits */ |
| 1035 | .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */ |
| 1036 | .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */ |
| 1037 | .long _ex_trap_c /* 0x2F - Reserved */ |
| 1038 | .long _ex_trap_c /* 0x30 - Reserved */ |
| 1039 | .long _ex_trap_c /* 0x31 - Reserved */ |
| 1040 | .long _ex_trap_c /* 0x32 - Reserved */ |
| 1041 | .long _ex_trap_c /* 0x33 - Reserved */ |
| 1042 | .long _ex_trap_c /* 0x34 - Reserved */ |
| 1043 | .long _ex_trap_c /* 0x35 - Reserved */ |
| 1044 | .long _ex_trap_c /* 0x36 - Reserved */ |
| 1045 | .long _ex_trap_c /* 0x37 - Reserved */ |
| 1046 | .long _ex_trap_c /* 0x38 - Reserved */ |
| 1047 | .long _ex_trap_c /* 0x39 - Reserved */ |
| 1048 | .long _ex_trap_c /* 0x3A - Reserved */ |
| 1049 | .long _ex_trap_c /* 0x3B - Reserved */ |
| 1050 | .long _ex_trap_c /* 0x3C - Reserved */ |
| 1051 | .long _ex_trap_c /* 0x3D - Reserved */ |
| 1052 | .long _ex_trap_c /* 0x3E - Reserved */ |
| 1053 | .long _ex_trap_c /* 0x3F - Reserved */ |
Mike Frysinger | 1ffe664 | 2007-08-05 17:14:04 +0800 | [diff] [blame] | 1054 | END(_ex_table) |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1055 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1056 | ENTRY(_sys_call_table) |
Bryan Wu | 0b95f22 | 2007-09-23 00:51:32 +0800 | [diff] [blame] | 1057 | .long _sys_restart_syscall /* 0 */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1058 | .long _sys_exit |
| 1059 | .long _sys_fork |
| 1060 | .long _sys_read |
| 1061 | .long _sys_write |
| 1062 | .long _sys_open /* 5 */ |
| 1063 | .long _sys_close |
| 1064 | .long _sys_ni_syscall /* old waitpid */ |
| 1065 | .long _sys_creat |
| 1066 | .long _sys_link |
| 1067 | .long _sys_unlink /* 10 */ |
| 1068 | .long _sys_execve |
| 1069 | .long _sys_chdir |
| 1070 | .long _sys_time |
| 1071 | .long _sys_mknod |
| 1072 | .long _sys_chmod /* 15 */ |
| 1073 | .long _sys_chown /* chown16 */ |
| 1074 | .long _sys_ni_syscall /* old break syscall holder */ |
| 1075 | .long _sys_ni_syscall /* old stat */ |
| 1076 | .long _sys_lseek |
| 1077 | .long _sys_getpid /* 20 */ |
| 1078 | .long _sys_mount |
| 1079 | .long _sys_ni_syscall /* old umount */ |
| 1080 | .long _sys_setuid |
| 1081 | .long _sys_getuid |
| 1082 | .long _sys_stime /* 25 */ |
| 1083 | .long _sys_ptrace |
| 1084 | .long _sys_alarm |
| 1085 | .long _sys_ni_syscall /* old fstat */ |
| 1086 | .long _sys_pause |
| 1087 | .long _sys_ni_syscall /* old utime */ /* 30 */ |
| 1088 | .long _sys_ni_syscall /* old stty syscall holder */ |
| 1089 | .long _sys_ni_syscall /* old gtty syscall holder */ |
| 1090 | .long _sys_access |
| 1091 | .long _sys_nice |
| 1092 | .long _sys_ni_syscall /* 35 */ /* old ftime syscall holder */ |
| 1093 | .long _sys_sync |
| 1094 | .long _sys_kill |
| 1095 | .long _sys_rename |
| 1096 | .long _sys_mkdir |
| 1097 | .long _sys_rmdir /* 40 */ |
| 1098 | .long _sys_dup |
| 1099 | .long _sys_pipe |
| 1100 | .long _sys_times |
| 1101 | .long _sys_ni_syscall /* old prof syscall holder */ |
| 1102 | .long _sys_brk /* 45 */ |
| 1103 | .long _sys_setgid |
| 1104 | .long _sys_getgid |
| 1105 | .long _sys_ni_syscall /* old sys_signal */ |
| 1106 | .long _sys_geteuid /* geteuid16 */ |
| 1107 | .long _sys_getegid /* getegid16 */ /* 50 */ |
| 1108 | .long _sys_acct |
| 1109 | .long _sys_umount /* recycled never used phys() */ |
| 1110 | .long _sys_ni_syscall /* old lock syscall holder */ |
| 1111 | .long _sys_ioctl |
| 1112 | .long _sys_fcntl /* 55 */ |
| 1113 | .long _sys_ni_syscall /* old mpx syscall holder */ |
| 1114 | .long _sys_setpgid |
| 1115 | .long _sys_ni_syscall /* old ulimit syscall holder */ |
| 1116 | .long _sys_ni_syscall /* old old uname */ |
| 1117 | .long _sys_umask /* 60 */ |
| 1118 | .long _sys_chroot |
| 1119 | .long _sys_ustat |
| 1120 | .long _sys_dup2 |
| 1121 | .long _sys_getppid |
| 1122 | .long _sys_getpgrp /* 65 */ |
| 1123 | .long _sys_setsid |
| 1124 | .long _sys_ni_syscall /* old sys_sigaction */ |
| 1125 | .long _sys_sgetmask |
| 1126 | .long _sys_ssetmask |
| 1127 | .long _sys_setreuid /* setreuid16 */ /* 70 */ |
| 1128 | .long _sys_setregid /* setregid16 */ |
| 1129 | .long _sys_ni_syscall /* old sys_sigsuspend */ |
| 1130 | .long _sys_ni_syscall /* old sys_sigpending */ |
| 1131 | .long _sys_sethostname |
| 1132 | .long _sys_setrlimit /* 75 */ |
| 1133 | .long _sys_ni_syscall /* old getrlimit */ |
| 1134 | .long _sys_getrusage |
| 1135 | .long _sys_gettimeofday |
| 1136 | .long _sys_settimeofday |
| 1137 | .long _sys_getgroups /* getgroups16 */ /* 80 */ |
| 1138 | .long _sys_setgroups /* setgroups16 */ |
| 1139 | .long _sys_ni_syscall /* old_select */ |
| 1140 | .long _sys_symlink |
| 1141 | .long _sys_ni_syscall /* old lstat */ |
| 1142 | .long _sys_readlink /* 85 */ |
| 1143 | .long _sys_uselib |
| 1144 | .long _sys_ni_syscall /* sys_swapon */ |
| 1145 | .long _sys_reboot |
| 1146 | .long _sys_ni_syscall /* old_readdir */ |
| 1147 | .long _sys_ni_syscall /* sys_mmap */ /* 90 */ |
| 1148 | .long _sys_munmap |
| 1149 | .long _sys_truncate |
| 1150 | .long _sys_ftruncate |
| 1151 | .long _sys_fchmod |
| 1152 | .long _sys_fchown /* fchown16 */ /* 95 */ |
| 1153 | .long _sys_getpriority |
| 1154 | .long _sys_setpriority |
| 1155 | .long _sys_ni_syscall /* old profil syscall holder */ |
| 1156 | .long _sys_statfs |
| 1157 | .long _sys_fstatfs /* 100 */ |
| 1158 | .long _sys_ni_syscall |
| 1159 | .long _sys_ni_syscall /* old sys_socketcall */ |
| 1160 | .long _sys_syslog |
| 1161 | .long _sys_setitimer |
| 1162 | .long _sys_getitimer /* 105 */ |
| 1163 | .long _sys_newstat |
| 1164 | .long _sys_newlstat |
| 1165 | .long _sys_newfstat |
| 1166 | .long _sys_ni_syscall /* old uname */ |
| 1167 | .long _sys_ni_syscall /* iopl for i386 */ /* 110 */ |
| 1168 | .long _sys_vhangup |
| 1169 | .long _sys_ni_syscall /* obsolete idle() syscall */ |
| 1170 | .long _sys_ni_syscall /* vm86old for i386 */ |
| 1171 | .long _sys_wait4 |
| 1172 | .long _sys_ni_syscall /* 115 */ /* sys_swapoff */ |
| 1173 | .long _sys_sysinfo |
| 1174 | .long _sys_ni_syscall /* old sys_ipc */ |
| 1175 | .long _sys_fsync |
| 1176 | .long _sys_ni_syscall /* old sys_sigreturn */ |
| 1177 | .long _sys_clone /* 120 */ |
| 1178 | .long _sys_setdomainname |
| 1179 | .long _sys_newuname |
| 1180 | .long _sys_ni_syscall /* old sys_modify_ldt */ |
| 1181 | .long _sys_adjtimex |
| 1182 | .long _sys_ni_syscall /* 125 */ /* sys_mprotect */ |
| 1183 | .long _sys_ni_syscall /* old sys_sigprocmask */ |
| 1184 | .long _sys_ni_syscall /* old "creat_module" */ |
| 1185 | .long _sys_init_module |
| 1186 | .long _sys_delete_module |
| 1187 | .long _sys_ni_syscall /* 130: old "get_kernel_syms" */ |
| 1188 | .long _sys_quotactl |
| 1189 | .long _sys_getpgid |
| 1190 | .long _sys_fchdir |
| 1191 | .long _sys_bdflush |
| 1192 | .long _sys_ni_syscall /* 135 */ /* sys_sysfs */ |
| 1193 | .long _sys_personality |
| 1194 | .long _sys_ni_syscall /* for afs_syscall */ |
| 1195 | .long _sys_setfsuid /* setfsuid16 */ |
| 1196 | .long _sys_setfsgid /* setfsgid16 */ |
| 1197 | .long _sys_llseek /* 140 */ |
| 1198 | .long _sys_getdents |
| 1199 | .long _sys_ni_syscall /* sys_select */ |
| 1200 | .long _sys_flock |
| 1201 | .long _sys_ni_syscall /* sys_msync */ |
| 1202 | .long _sys_readv /* 145 */ |
| 1203 | .long _sys_writev |
| 1204 | .long _sys_getsid |
| 1205 | .long _sys_fdatasync |
| 1206 | .long _sys_sysctl |
| 1207 | .long _sys_ni_syscall /* 150 */ /* sys_mlock */ |
| 1208 | .long _sys_ni_syscall /* sys_munlock */ |
| 1209 | .long _sys_ni_syscall /* sys_mlockall */ |
| 1210 | .long _sys_ni_syscall /* sys_munlockall */ |
| 1211 | .long _sys_sched_setparam |
| 1212 | .long _sys_sched_getparam /* 155 */ |
| 1213 | .long _sys_sched_setscheduler |
| 1214 | .long _sys_sched_getscheduler |
| 1215 | .long _sys_sched_yield |
| 1216 | .long _sys_sched_get_priority_max |
| 1217 | .long _sys_sched_get_priority_min /* 160 */ |
| 1218 | .long _sys_sched_rr_get_interval |
| 1219 | .long _sys_nanosleep |
Bryan Wu | 0b95f22 | 2007-09-23 00:51:32 +0800 | [diff] [blame] | 1220 | .long _sys_mremap |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1221 | .long _sys_setresuid /* setresuid16 */ |
| 1222 | .long _sys_getresuid /* getresuid16 */ /* 165 */ |
| 1223 | .long _sys_ni_syscall /* for vm86 */ |
| 1224 | .long _sys_ni_syscall /* old "query_module" */ |
| 1225 | .long _sys_ni_syscall /* sys_poll */ |
Bryan Wu | 0b95f22 | 2007-09-23 00:51:32 +0800 | [diff] [blame] | 1226 | .long _sys_nfsservctl |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1227 | .long _sys_setresgid /* setresgid16 */ /* 170 */ |
| 1228 | .long _sys_getresgid /* getresgid16 */ |
| 1229 | .long _sys_prctl |
| 1230 | .long _sys_rt_sigreturn |
| 1231 | .long _sys_rt_sigaction |
| 1232 | .long _sys_rt_sigprocmask /* 175 */ |
| 1233 | .long _sys_rt_sigpending |
| 1234 | .long _sys_rt_sigtimedwait |
| 1235 | .long _sys_rt_sigqueueinfo |
| 1236 | .long _sys_rt_sigsuspend |
| 1237 | .long _sys_pread64 /* 180 */ |
| 1238 | .long _sys_pwrite64 |
| 1239 | .long _sys_lchown /* lchown16 */ |
| 1240 | .long _sys_getcwd |
| 1241 | .long _sys_capget |
| 1242 | .long _sys_capset /* 185 */ |
| 1243 | .long _sys_sigaltstack |
| 1244 | .long _sys_sendfile |
| 1245 | .long _sys_ni_syscall /* streams1 */ |
| 1246 | .long _sys_ni_syscall /* streams2 */ |
| 1247 | .long _sys_vfork /* 190 */ |
| 1248 | .long _sys_getrlimit |
| 1249 | .long _sys_mmap2 |
| 1250 | .long _sys_truncate64 |
| 1251 | .long _sys_ftruncate64 |
| 1252 | .long _sys_stat64 /* 195 */ |
| 1253 | .long _sys_lstat64 |
| 1254 | .long _sys_fstat64 |
| 1255 | .long _sys_chown |
| 1256 | .long _sys_getuid |
| 1257 | .long _sys_getgid /* 200 */ |
| 1258 | .long _sys_geteuid |
| 1259 | .long _sys_getegid |
| 1260 | .long _sys_setreuid |
| 1261 | .long _sys_setregid |
| 1262 | .long _sys_getgroups /* 205 */ |
| 1263 | .long _sys_setgroups |
| 1264 | .long _sys_fchown |
| 1265 | .long _sys_setresuid |
| 1266 | .long _sys_getresuid |
| 1267 | .long _sys_setresgid /* 210 */ |
| 1268 | .long _sys_getresgid |
| 1269 | .long _sys_lchown |
| 1270 | .long _sys_setuid |
| 1271 | .long _sys_setgid |
| 1272 | .long _sys_setfsuid /* 215 */ |
| 1273 | .long _sys_setfsgid |
| 1274 | .long _sys_pivot_root |
| 1275 | .long _sys_ni_syscall /* sys_mincore */ |
| 1276 | .long _sys_ni_syscall /* sys_madvise */ |
| 1277 | .long _sys_getdents64 /* 220 */ |
| 1278 | .long _sys_fcntl64 |
| 1279 | .long _sys_ni_syscall /* reserved for TUX */ |
| 1280 | .long _sys_ni_syscall |
| 1281 | .long _sys_gettid |
Bryan Wu | 0b95f22 | 2007-09-23 00:51:32 +0800 | [diff] [blame] | 1282 | .long _sys_readahead /* 225 */ |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1283 | .long _sys_setxattr |
| 1284 | .long _sys_lsetxattr |
| 1285 | .long _sys_fsetxattr |
| 1286 | .long _sys_getxattr |
| 1287 | .long _sys_lgetxattr /* 230 */ |
| 1288 | .long _sys_fgetxattr |
| 1289 | .long _sys_listxattr |
| 1290 | .long _sys_llistxattr |
| 1291 | .long _sys_flistxattr |
| 1292 | .long _sys_removexattr /* 235 */ |
| 1293 | .long _sys_lremovexattr |
| 1294 | .long _sys_fremovexattr |
| 1295 | .long _sys_tkill |
| 1296 | .long _sys_sendfile64 |
| 1297 | .long _sys_futex /* 240 */ |
| 1298 | .long _sys_sched_setaffinity |
| 1299 | .long _sys_sched_getaffinity |
| 1300 | .long _sys_ni_syscall /* sys_set_thread_area */ |
| 1301 | .long _sys_ni_syscall /* sys_get_thread_area */ |
| 1302 | .long _sys_io_setup /* 245 */ |
| 1303 | .long _sys_io_destroy |
| 1304 | .long _sys_io_getevents |
| 1305 | .long _sys_io_submit |
| 1306 | .long _sys_io_cancel |
| 1307 | .long _sys_ni_syscall /* 250 */ /* sys_alloc_hugepages */ |
| 1308 | .long _sys_ni_syscall /* sys_freec_hugepages */ |
| 1309 | .long _sys_exit_group |
| 1310 | .long _sys_lookup_dcookie |
| 1311 | .long _sys_bfin_spinlock |
| 1312 | .long _sys_epoll_create /* 255 */ |
| 1313 | .long _sys_epoll_ctl |
| 1314 | .long _sys_epoll_wait |
| 1315 | .long _sys_ni_syscall /* remap_file_pages */ |
| 1316 | .long _sys_set_tid_address |
| 1317 | .long _sys_timer_create /* 260 */ |
| 1318 | .long _sys_timer_settime |
| 1319 | .long _sys_timer_gettime |
| 1320 | .long _sys_timer_getoverrun |
| 1321 | .long _sys_timer_delete |
| 1322 | .long _sys_clock_settime /* 265 */ |
| 1323 | .long _sys_clock_gettime |
| 1324 | .long _sys_clock_getres |
| 1325 | .long _sys_clock_nanosleep |
| 1326 | .long _sys_statfs64 |
| 1327 | .long _sys_fstatfs64 /* 270 */ |
| 1328 | .long _sys_tgkill |
| 1329 | .long _sys_utimes |
| 1330 | .long _sys_fadvise64_64 |
| 1331 | .long _sys_ni_syscall /* vserver */ |
| 1332 | .long _sys_ni_syscall /* 275, mbind */ |
| 1333 | .long _sys_ni_syscall /* get_mempolicy */ |
| 1334 | .long _sys_ni_syscall /* set_mempolicy */ |
| 1335 | .long _sys_mq_open |
| 1336 | .long _sys_mq_unlink |
| 1337 | .long _sys_mq_timedsend /* 280 */ |
| 1338 | .long _sys_mq_timedreceive |
| 1339 | .long _sys_mq_notify |
| 1340 | .long _sys_mq_getsetattr |
| 1341 | .long _sys_ni_syscall /* kexec_load */ |
| 1342 | .long _sys_waitid /* 285 */ |
| 1343 | .long _sys_add_key |
| 1344 | .long _sys_request_key |
| 1345 | .long _sys_keyctl |
| 1346 | .long _sys_ioprio_set |
| 1347 | .long _sys_ioprio_get /* 290 */ |
| 1348 | .long _sys_inotify_init |
| 1349 | .long _sys_inotify_add_watch |
| 1350 | .long _sys_inotify_rm_watch |
| 1351 | .long _sys_ni_syscall /* migrate_pages */ |
| 1352 | .long _sys_openat /* 295 */ |
| 1353 | .long _sys_mkdirat |
| 1354 | .long _sys_mknodat |
| 1355 | .long _sys_fchownat |
| 1356 | .long _sys_futimesat |
| 1357 | .long _sys_fstatat64 /* 300 */ |
| 1358 | .long _sys_unlinkat |
| 1359 | .long _sys_renameat |
| 1360 | .long _sys_linkat |
| 1361 | .long _sys_symlinkat |
| 1362 | .long _sys_readlinkat /* 305 */ |
| 1363 | .long _sys_fchmodat |
| 1364 | .long _sys_faccessat |
| 1365 | .long _sys_pselect6 |
| 1366 | .long _sys_ppoll |
| 1367 | .long _sys_unshare /* 310 */ |
| 1368 | .long _sys_sram_alloc |
| 1369 | .long _sys_sram_free |
| 1370 | .long _sys_dma_memcpy |
| 1371 | .long _sys_accept |
| 1372 | .long _sys_bind /* 315 */ |
| 1373 | .long _sys_connect |
| 1374 | .long _sys_getpeername |
| 1375 | .long _sys_getsockname |
| 1376 | .long _sys_getsockopt |
| 1377 | .long _sys_listen /* 320 */ |
| 1378 | .long _sys_recv |
| 1379 | .long _sys_recvfrom |
| 1380 | .long _sys_recvmsg |
| 1381 | .long _sys_send |
| 1382 | .long _sys_sendmsg /* 325 */ |
| 1383 | .long _sys_sendto |
| 1384 | .long _sys_setsockopt |
| 1385 | .long _sys_shutdown |
| 1386 | .long _sys_socket |
| 1387 | .long _sys_socketpair /* 330 */ |
| 1388 | .long _sys_semctl |
| 1389 | .long _sys_semget |
| 1390 | .long _sys_semop |
| 1391 | .long _sys_msgctl |
| 1392 | .long _sys_msgget /* 335 */ |
| 1393 | .long _sys_msgrcv |
| 1394 | .long _sys_msgsnd |
| 1395 | .long _sys_shmat |
| 1396 | .long _sys_shmctl |
| 1397 | .long _sys_shmdt /* 340 */ |
| 1398 | .long _sys_shmget |
Bryan Wu | 0b95f22 | 2007-09-23 00:51:32 +0800 | [diff] [blame] | 1399 | .long _sys_splice |
| 1400 | .long _sys_sync_file_range |
| 1401 | .long _sys_tee |
| 1402 | .long _sys_vmsplice /* 345 */ |
| 1403 | .long _sys_epoll_pwait |
| 1404 | .long _sys_utimensat |
| 1405 | .long _sys_signalfd |
Bryan Wu | 2f775db | 2008-03-06 16:04:58 -0700 | [diff] [blame] | 1406 | .long _sys_timerfd_create |
Bryan Wu | 0b95f22 | 2007-09-23 00:51:32 +0800 | [diff] [blame] | 1407 | .long _sys_eventfd /* 350 */ |
| 1408 | .long _sys_pread64 |
| 1409 | .long _sys_pwrite64 |
| 1410 | .long _sys_fadvise64 |
| 1411 | .long _sys_set_robust_list |
| 1412 | .long _sys_get_robust_list /* 355 */ |
| 1413 | .long _sys_fallocate |
Bernd Schmidt | fc97551 | 2008-01-27 19:56:43 +0800 | [diff] [blame] | 1414 | .long _sys_semtimedop |
Bryan Wu | 2f775db | 2008-03-06 16:04:58 -0700 | [diff] [blame] | 1415 | .long _sys_timerfd_settime |
| 1416 | .long _sys_timerfd_gettime |
| 1417 | |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1418 | .rept NR_syscalls-(.-_sys_call_table)/4 |
| 1419 | .long _sys_ni_syscall |
| 1420 | .endr |
Bernd Schmidt | 0893f12 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 1421 | |
| 1422 | /* |
| 1423 | * Used to save the real RETX, IMASK and SYSCFG when temporarily |
| 1424 | * storing safe values across the transition from exception to IRQ5. |
| 1425 | */ |
| 1426 | _excpt_saved_stuff: |
| 1427 | .long 0; |
| 1428 | .long 0; |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1429 | .long 0; |
| 1430 | |
| 1431 | _exception_stack: |
| 1432 | .rept 1024 |
| 1433 | .long 0; |
| 1434 | .endr |
| 1435 | _exception_stack_top: |
| 1436 | |
Mike Frysinger | 1aafd90 | 2007-07-25 11:19:14 +0800 | [diff] [blame] | 1437 | #if ANOMALY_05000261 |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1438 | /* Used by the assembly entry point to work around an anomaly. */ |
| 1439 | _last_cplb_fault_retx: |
| 1440 | .long 0; |
| 1441 | #endif |