blob: 95c810b3c97e9730d67dea90971ce465b33ca064 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: traps.c,v 1.17 2004/05/02 01:46:30 sugioka Exp $
2 *
3 * linux/arch/sh/traps.c
4 *
5 * SuperH version: Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2000 Philipp Rumpf
7 * Copyright (C) 2000 David Howells
8 * Copyright (C) 2002, 2003 Paul Mundt
9 */
10
11/*
12 * 'Traps.c' handles hardware traps and faults after we have saved some
13 * state in 'entry.S'.
14 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
20#include <linux/timer.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/spinlock.h>
27#include <linux/module.h>
28#include <linux/kallsyms.h>
29
30#include <asm/system.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/atomic.h>
34#include <asm/processor.h>
35#include <asm/sections.h>
36
37#ifdef CONFIG_SH_KGDB
38#include <asm/kgdb.h>
Takashi YOSHII4b565682006-09-27 17:15:32 +090039#define CHK_REMOTE_DEBUG(regs) \
40{ \
41 if (kgdb_debug_hook && !user_mode(regs))\
42 (*kgdb_debug_hook)(regs); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070043}
44#else
45#define CHK_REMOTE_DEBUG(regs)
46#endif
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#ifdef CONFIG_CPU_SH2
49#define TRAP_RESERVED_INST 4
50#define TRAP_ILLEGAL_SLOT_INST 6
51#else
52#define TRAP_RESERVED_INST 12
53#define TRAP_ILLEGAL_SLOT_INST 13
54#endif
55
56/*
57 * These constants are for searching for possible module text
58 * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
59 * a guess of how much space is likely to be vmalloced.
60 */
61#define VMALLOC_OFFSET (8*1024*1024)
62#define MODULE_RANGE (8*1024*1024)
63
Paul Mundt765ae312006-09-27 11:31:32 +090064DEFINE_SPINLOCK(die_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66void die(const char * str, struct pt_regs * regs, long err)
67{
68 static int die_counter;
69
70 console_verbose();
71 spin_lock_irq(&die_lock);
72 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
73 CHK_REMOTE_DEBUG(regs);
74 show_regs(regs);
75 spin_unlock_irq(&die_lock);
76 do_exit(SIGSEGV);
77}
78
79static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
80{
81 if (!user_mode(regs))
82 die(str, regs, err);
83}
84
85static int handle_unaligned_notify_count = 10;
86
87/*
88 * try and fix up kernelspace address errors
89 * - userspace errors just cause EFAULT to be returned, resulting in SEGV
90 * - kernel/userspace interfaces cause a jump to an appropriate handler
91 * - other kernel errors are bad
92 * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
93 */
94static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
95{
96 if (!user_mode(regs))
97 {
98 const struct exception_table_entry *fixup;
99 fixup = search_exception_tables(regs->pc);
100 if (fixup) {
101 regs->pc = fixup->fixup;
102 return 0;
103 }
104 die(str, regs, err);
105 }
106 return -EFAULT;
107}
108
109/*
110 * handle an instruction that does an unaligned memory access by emulating the
111 * desired behaviour
112 * - note that PC _may not_ point to the faulting instruction
113 * (if that instruction is in a branch delay slot)
114 * - return 0 if emulation okay, -EFAULT on existential error
115 */
116static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
117{
118 int ret, index, count;
119 unsigned long *rm, *rn;
120 unsigned char *src, *dst;
121
122 index = (instruction>>8)&15; /* 0x0F00 */
123 rn = &regs->regs[index];
124
125 index = (instruction>>4)&15; /* 0x00F0 */
126 rm = &regs->regs[index];
127
128 count = 1<<(instruction&3);
129
130 ret = -EFAULT;
131 switch (instruction>>12) {
132 case 0: /* mov.[bwl] to/from memory via r0+rn */
133 if (instruction & 8) {
134 /* from memory */
135 src = (unsigned char*) *rm;
136 src += regs->regs[0];
137 dst = (unsigned char*) rn;
138 *(unsigned long*)dst = 0;
139
140#ifdef __LITTLE_ENDIAN__
141 if (copy_from_user(dst, src, count))
142 goto fetch_fault;
143
144 if ((count == 2) && dst[1] & 0x80) {
145 dst[2] = 0xff;
146 dst[3] = 0xff;
147 }
148#else
149 dst += 4-count;
150
151 if (__copy_user(dst, src, count))
152 goto fetch_fault;
153
154 if ((count == 2) && dst[2] & 0x80) {
155 dst[0] = 0xff;
156 dst[1] = 0xff;
157 }
158#endif
159 } else {
160 /* to memory */
161 src = (unsigned char*) rm;
162#if !defined(__LITTLE_ENDIAN__)
163 src += 4-count;
164#endif
165 dst = (unsigned char*) *rn;
166 dst += regs->regs[0];
167
168 if (copy_to_user(dst, src, count))
169 goto fetch_fault;
170 }
171 ret = 0;
172 break;
173
174 case 1: /* mov.l Rm,@(disp,Rn) */
175 src = (unsigned char*) rm;
176 dst = (unsigned char*) *rn;
177 dst += (instruction&0x000F)<<2;
178
179 if (copy_to_user(dst,src,4))
180 goto fetch_fault;
181 ret = 0;
182 break;
183
184 case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
185 if (instruction & 4)
186 *rn -= count;
187 src = (unsigned char*) rm;
188 dst = (unsigned char*) *rn;
189#if !defined(__LITTLE_ENDIAN__)
190 src += 4-count;
191#endif
192 if (copy_to_user(dst, src, count))
193 goto fetch_fault;
194 ret = 0;
195 break;
196
197 case 5: /* mov.l @(disp,Rm),Rn */
198 src = (unsigned char*) *rm;
199 src += (instruction&0x000F)<<2;
200 dst = (unsigned char*) rn;
201 *(unsigned long*)dst = 0;
202
203 if (copy_from_user(dst,src,4))
204 goto fetch_fault;
205 ret = 0;
206 break;
207
208 case 6: /* mov.[bwl] from memory, possibly with post-increment */
209 src = (unsigned char*) *rm;
210 if (instruction & 4)
211 *rm += count;
212 dst = (unsigned char*) rn;
213 *(unsigned long*)dst = 0;
214
215#ifdef __LITTLE_ENDIAN__
216 if (copy_from_user(dst, src, count))
217 goto fetch_fault;
218
219 if ((count == 2) && dst[1] & 0x80) {
220 dst[2] = 0xff;
221 dst[3] = 0xff;
222 }
223#else
224 dst += 4-count;
225
226 if (copy_from_user(dst, src, count))
227 goto fetch_fault;
228
229 if ((count == 2) && dst[2] & 0x80) {
230 dst[0] = 0xff;
231 dst[1] = 0xff;
232 }
233#endif
234 ret = 0;
235 break;
236
237 case 8:
238 switch ((instruction&0xFF00)>>8) {
239 case 0x81: /* mov.w R0,@(disp,Rn) */
240 src = (unsigned char*) &regs->regs[0];
241#if !defined(__LITTLE_ENDIAN__)
242 src += 2;
243#endif
244 dst = (unsigned char*) *rm; /* called Rn in the spec */
245 dst += (instruction&0x000F)<<1;
246
247 if (copy_to_user(dst, src, 2))
248 goto fetch_fault;
249 ret = 0;
250 break;
251
252 case 0x85: /* mov.w @(disp,Rm),R0 */
253 src = (unsigned char*) *rm;
254 src += (instruction&0x000F)<<1;
255 dst = (unsigned char*) &regs->regs[0];
256 *(unsigned long*)dst = 0;
257
258#if !defined(__LITTLE_ENDIAN__)
259 dst += 2;
260#endif
261
262 if (copy_from_user(dst, src, 2))
263 goto fetch_fault;
264
265#ifdef __LITTLE_ENDIAN__
266 if (dst[1] & 0x80) {
267 dst[2] = 0xff;
268 dst[3] = 0xff;
269 }
270#else
271 if (dst[2] & 0x80) {
272 dst[0] = 0xff;
273 dst[1] = 0xff;
274 }
275#endif
276 ret = 0;
277 break;
278 }
279 break;
280 }
281 return ret;
282
283 fetch_fault:
284 /* Argh. Address not only misaligned but also non-existent.
285 * Raise an EFAULT and see if it's trapped
286 */
287 return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
288}
289
290/*
291 * emulate the instruction in the delay slot
292 * - fetches the instruction from PC+2
293 */
294static inline int handle_unaligned_delayslot(struct pt_regs *regs)
295{
296 u16 instruction;
297
298 if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
299 /* the instruction-fetch faulted */
300 if (user_mode(regs))
301 return -EFAULT;
302
303 /* kernel */
304 die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
305 }
306
307 return handle_unaligned_ins(instruction,regs);
308}
309
310/*
311 * handle an instruction that does an unaligned memory access
312 * - have to be careful of branch delay-slot instructions that fault
313 * SH3:
314 * - if the branch would be taken PC points to the branch
315 * - if the branch would not be taken, PC points to delay-slot
316 * SH4:
317 * - PC always points to delayed branch
318 * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
319 */
320
321/* Macros to determine offset from current PC for branch instructions */
322/* Explicit type coercion is used to force sign extension where needed */
323#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
324#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
325
326static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
327{
328 u_int rm;
329 int ret, index;
330
331 index = (instruction>>8)&15; /* 0x0F00 */
332 rm = regs->regs[index];
333
334 /* shout about the first ten userspace fixups */
335 if (user_mode(regs) && handle_unaligned_notify_count>0) {
336 handle_unaligned_notify_count--;
337
338 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
339 current->comm,current->pid,(u16*)regs->pc,instruction);
340 }
341
342 ret = -EFAULT;
343 switch (instruction&0xF000) {
344 case 0x0000:
345 if (instruction==0x000B) {
346 /* rts */
347 ret = handle_unaligned_delayslot(regs);
348 if (ret==0)
349 regs->pc = regs->pr;
350 }
351 else if ((instruction&0x00FF)==0x0023) {
352 /* braf @Rm */
353 ret = handle_unaligned_delayslot(regs);
354 if (ret==0)
355 regs->pc += rm + 4;
356 }
357 else if ((instruction&0x00FF)==0x0003) {
358 /* bsrf @Rm */
359 ret = handle_unaligned_delayslot(regs);
360 if (ret==0) {
361 regs->pr = regs->pc + 4;
362 regs->pc += rm + 4;
363 }
364 }
365 else {
366 /* mov.[bwl] to/from memory via r0+rn */
367 goto simple;
368 }
369 break;
370
371 case 0x1000: /* mov.l Rm,@(disp,Rn) */
372 goto simple;
373
374 case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
375 goto simple;
376
377 case 0x4000:
378 if ((instruction&0x00FF)==0x002B) {
379 /* jmp @Rm */
380 ret = handle_unaligned_delayslot(regs);
381 if (ret==0)
382 regs->pc = rm;
383 }
384 else if ((instruction&0x00FF)==0x000B) {
385 /* jsr @Rm */
386 ret = handle_unaligned_delayslot(regs);
387 if (ret==0) {
388 regs->pr = regs->pc + 4;
389 regs->pc = rm;
390 }
391 }
392 else {
393 /* mov.[bwl] to/from memory via r0+rn */
394 goto simple;
395 }
396 break;
397
398 case 0x5000: /* mov.l @(disp,Rm),Rn */
399 goto simple;
400
401 case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
402 goto simple;
403
404 case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
405 switch (instruction&0x0F00) {
406 case 0x0100: /* mov.w R0,@(disp,Rm) */
407 goto simple;
408 case 0x0500: /* mov.w @(disp,Rm),R0 */
409 goto simple;
410 case 0x0B00: /* bf lab - no delayslot*/
411 break;
412 case 0x0F00: /* bf/s lab */
413 ret = handle_unaligned_delayslot(regs);
414 if (ret==0) {
415#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
416 if ((regs->sr & 0x00000001) != 0)
417 regs->pc += 4; /* next after slot */
418 else
419#endif
420 regs->pc += SH_PC_8BIT_OFFSET(instruction);
421 }
422 break;
423 case 0x0900: /* bt lab - no delayslot */
424 break;
425 case 0x0D00: /* bt/s lab */
426 ret = handle_unaligned_delayslot(regs);
427 if (ret==0) {
428#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
429 if ((regs->sr & 0x00000001) == 0)
430 regs->pc += 4; /* next after slot */
431 else
432#endif
433 regs->pc += SH_PC_8BIT_OFFSET(instruction);
434 }
435 break;
436 }
437 break;
438
439 case 0xA000: /* bra label */
440 ret = handle_unaligned_delayslot(regs);
441 if (ret==0)
442 regs->pc += SH_PC_12BIT_OFFSET(instruction);
443 break;
444
445 case 0xB000: /* bsr label */
446 ret = handle_unaligned_delayslot(regs);
447 if (ret==0) {
448 regs->pr = regs->pc + 4;
449 regs->pc += SH_PC_12BIT_OFFSET(instruction);
450 }
451 break;
452 }
453 return ret;
454
455 /* handle non-delay-slot instruction */
456 simple:
457 ret = handle_unaligned_ins(instruction,regs);
458 if (ret==0)
459 regs->pc += 2;
460 return ret;
461}
462
463/*
464 * Handle various address error exceptions
465 */
466asmlinkage void do_address_error(struct pt_regs *regs,
467 unsigned long writeaccess,
468 unsigned long address)
469{
470 unsigned long error_code;
471 mm_segment_t oldfs;
472 u16 instruction;
473 int tmp;
474
475 asm volatile("stc r2_bank,%0": "=r" (error_code));
476
477 oldfs = get_fs();
478
479 if (user_mode(regs)) {
480 local_irq_enable();
481 current->thread.error_code = error_code;
482 current->thread.trap_no = (writeaccess) ? 8 : 7;
483
484 /* bad PC is not something we can fix */
485 if (regs->pc & 1)
486 goto uspace_segv;
487
488 set_fs(USER_DS);
489 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
490 /* Argh. Fault on the instruction itself.
491 This should never happen non-SMP
492 */
493 set_fs(oldfs);
494 goto uspace_segv;
495 }
496
497 tmp = handle_unaligned_access(instruction, regs);
498 set_fs(oldfs);
499
500 if (tmp==0)
501 return; /* sorted */
502
503 uspace_segv:
504 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
505 force_sig(SIGSEGV, current);
506 } else {
507 if (regs->pc & 1)
508 die("unaligned program counter", regs, error_code);
509
510 set_fs(KERNEL_DS);
511 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
512 /* Argh. Fault on the instruction itself.
513 This should never happen non-SMP
514 */
515 set_fs(oldfs);
516 die("insn faulting in do_address_error", regs, 0);
517 }
518
519 handle_unaligned_access(instruction, regs);
520 set_fs(oldfs);
521 }
522}
523
524#ifdef CONFIG_SH_DSP
525/*
526 * SH-DSP support gerg@snapgear.com.
527 */
528int is_dsp_inst(struct pt_regs *regs)
529{
530 unsigned short inst;
531
532 /*
533 * Safe guard if DSP mode is already enabled or we're lacking
534 * the DSP altogether.
535 */
536 if (!(cpu_data->flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
537 return 0;
538
539 get_user(inst, ((unsigned short *) regs->pc));
540
541 inst &= 0xf000;
542
543 /* Check for any type of DSP or support instruction */
544 if ((inst == 0xf000) || (inst == 0x4000))
545 return 1;
546
547 return 0;
548}
549#else
550#define is_dsp_inst(regs) (0)
551#endif /* CONFIG_SH_DSP */
552
Takashi YOSHII4b565682006-09-27 17:15:32 +0900553extern int do_fpu_inst(unsigned short, struct pt_regs*);
554
555asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
556 unsigned long r6, unsigned long r7,
557 struct pt_regs regs)
558{
559 unsigned long error_code;
560 struct task_struct *tsk = current;
561
562#ifdef CONFIG_SH_FPU_EMU
563 unsigned short inst;
564 int err;
565
566 get_user(inst, (unsigned short*)regs.pc);
567
568 err = do_fpu_inst(inst, &regs);
569 if (!err) {
570 regs.pc += 2;
571 return;
572 }
573 /* not a FPU inst. */
574#endif
575
576#ifdef CONFIG_SH_DSP
577 /* Check if it's a DSP instruction */
578 if (is_dsp_inst(&regs)) {
579 /* Enable DSP mode, and restart instruction. */
580 regs.sr |= SR_DSP;
581 return;
582 }
583#endif
584
585 asm volatile("stc r2_bank, %0": "=r" (error_code));
586 local_irq_enable();
587 tsk->thread.error_code = error_code;
588 tsk->thread.trap_no = TRAP_RESERVED_INST;
589 CHK_REMOTE_DEBUG(&regs);
590 force_sig(SIGILL, tsk);
591 die_if_no_fixup("reserved instruction", &regs, error_code);
592}
593
594#ifdef CONFIG_SH_FPU_EMU
595static int emulate_branch(unsigned short inst, struct pt_regs* regs)
596{
597 /*
598 * bfs: 8fxx: PC+=d*2+4;
599 * bts: 8dxx: PC+=d*2+4;
600 * bra: axxx: PC+=D*2+4;
601 * bsr: bxxx: PC+=D*2+4 after PR=PC+4;
602 * braf:0x23: PC+=Rn*2+4;
603 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
604 * jmp: 4x2b: PC=Rn;
605 * jsr: 4x0b: PC=Rn after PR=PC+4;
606 * rts: 000b: PC=PR;
607 */
608 if ((inst & 0xfd00) == 0x8d00) {
609 regs->pc += SH_PC_8BIT_OFFSET(inst);
610 return 0;
611 }
612
613 if ((inst & 0xe000) == 0xa000) {
614 regs->pc += SH_PC_12BIT_OFFSET(inst);
615 return 0;
616 }
617
618 if ((inst & 0xf0df) == 0x0003) {
619 regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
620 return 0;
621 }
622
623 if ((inst & 0xf0df) == 0x400b) {
624 regs->pc = regs->regs[(inst & 0x0f00) >> 8];
625 return 0;
626 }
627
628 if ((inst & 0xffff) == 0x000b) {
629 regs->pc = regs->pr;
630 return 0;
631 }
632
633 return 1;
634}
635#endif
636
637asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
638 unsigned long r6, unsigned long r7,
639 struct pt_regs regs)
640{
641 unsigned long error_code;
642 struct task_struct *tsk = current;
643#ifdef CONFIG_SH_FPU_EMU
644 unsigned short inst;
645
646 get_user(inst, (unsigned short *)regs.pc + 1);
647 if (!do_fpu_inst(inst, &regs)) {
648 get_user(inst, (unsigned short *)regs.pc);
649 if (!emulate_branch(inst, &regs))
650 return;
651 /* fault in branch.*/
652 }
653 /* not a FPU inst. */
654#endif
655
656 asm volatile("stc r2_bank, %0": "=r" (error_code));
657 local_irq_enable();
658 tsk->thread.error_code = error_code;
659 tsk->thread.trap_no = TRAP_RESERVED_INST;
660 CHK_REMOTE_DEBUG(&regs);
661 force_sig(SIGILL, tsk);
662 die_if_no_fixup("illegal slot instruction", &regs, error_code);
663}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
665asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
666 unsigned long r6, unsigned long r7,
667 struct pt_regs regs)
668{
669 long ex;
670 asm volatile("stc r2_bank, %0" : "=r" (ex));
671 die_if_kernel("exception", &regs, ex);
672}
673
674#if defined(CONFIG_SH_STANDARD_BIOS)
675void *gdb_vbr_vector;
676
677static inline void __init gdb_vbr_init(void)
678{
679 register unsigned long vbr;
680
681 /*
682 * Read the old value of the VBR register to initialise
683 * the vector through which debug and BIOS traps are
684 * delegated by the Linux trap handler.
685 */
686 asm volatile("stc vbr, %0" : "=r" (vbr));
687
688 gdb_vbr_vector = (void *)(vbr + 0x100);
689 printk("Setting GDB trap vector to 0x%08lx\n",
690 (unsigned long)gdb_vbr_vector);
691}
692#endif
693
694void __init per_cpu_trap_init(void)
695{
696 extern void *vbr_base;
697
698#ifdef CONFIG_SH_STANDARD_BIOS
699 gdb_vbr_init();
700#endif
701
702 /* NOTE: The VBR value should be at P1
703 (or P2, virtural "fixed" address space).
704 It's definitely should not in physical address. */
705
706 asm volatile("ldc %0, vbr"
707 : /* no output */
708 : "r" (&vbr_base)
709 : "memory");
710}
711
712void __init trap_init(void)
713{
714 extern void *exception_handling_table[];
715
716 exception_handling_table[TRAP_RESERVED_INST]
717 = (void *)do_reserved_inst;
718 exception_handling_table[TRAP_ILLEGAL_SLOT_INST]
719 = (void *)do_illegal_slot_inst;
720
Takashi YOSHII4b565682006-09-27 17:15:32 +0900721#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
722 defined(CONFIG_SH_FPU_EMU)
723 /*
724 * For SH-4 lacking an FPU, treat floating point instructions as
725 * reserved. They'll be handled in the math-emu case, or faulted on
726 * otherwise.
727 */
728 /* entry 64 corresponds to EXPEVT=0x800 */
729 exception_handling_table[64] = (void *)do_reserved_inst;
730 exception_handling_table[65] = (void *)do_illegal_slot_inst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731#endif
732
733 /* Setup VBR for boot cpu */
734 per_cpu_trap_init();
735}
736
737void show_stack(struct task_struct *tsk, unsigned long *sp)
738{
739 unsigned long *stack, addr;
740 unsigned long module_start = VMALLOC_START;
741 unsigned long module_end = VMALLOC_END;
742 int i = 1;
743
744 if (tsk && !sp) {
745 sp = (unsigned long *)tsk->thread.sp;
746 }
747
748 if (!sp) {
749 __asm__ __volatile__ (
750 "mov r15, %0\n\t"
751 "stc r7_bank, %1\n\t"
752 : "=r" (module_start),
753 "=r" (module_end)
754 );
755
756 sp = (unsigned long *)module_start;
757 }
758
759 stack = sp;
760
761 printk("\nCall trace: ");
762#ifdef CONFIG_KALLSYMS
763 printk("\n");
764#endif
765
766 while (!kstack_end(stack)) {
767 addr = *stack++;
768 if (((addr >= (unsigned long)_text) &&
769 (addr <= (unsigned long)_etext)) ||
770 ((addr >= module_start) && (addr <= module_end))) {
771 /*
772 * For 80-columns display, 6 entry is maximum.
773 * NOTE: '[<8c00abcd>] ' consumes 13 columns .
774 */
775#ifndef CONFIG_KALLSYMS
776 if (i && ((i % 6) == 0))
777 printk("\n ");
778#endif
779 printk("[<%08lx>] ", addr);
780 print_symbol("%s\n", addr);
781 i++;
782 }
783 }
784
785 printk("\n");
786}
787
788void show_task(unsigned long *sp)
789{
790 show_stack(NULL, sp);
791}
792
793void dump_stack(void)
794{
795 show_stack(NULL, NULL);
796}
797EXPORT_SYMBOL(dump_stack);