Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com) |
| 3 | * |
| 4 | * This file implements mcount(), which is used to collect profiling data. |
| 5 | * This can also be tweaked for kernel stack overflow detection. |
| 6 | */ |
| 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/linkage.h> |
| 9 | |
| 10 | #include <asm/ptrace.h> |
| 11 | #include <asm/thread_info.h> |
| 12 | |
| 13 | /* |
| 14 | * This is the main variant and is called by C code. GCC's -pg option |
| 15 | * automatically instruments every C function with a call to this. |
| 16 | */ |
| 17 | |
| 18 | #ifdef CONFIG_STACK_DEBUG |
| 19 | |
| 20 | #define OVSTACKSIZE 4096 /* lets hope this is enough */ |
| 21 | |
| 22 | .data |
| 23 | .align 8 |
| 24 | panicstring: |
| 25 | .asciz "Stack overflow\n" |
| 26 | .align 8 |
| 27 | ovstack: |
| 28 | .skip OVSTACKSIZE |
| 29 | #endif |
| 30 | .text |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 31 | .align 32 |
| 32 | .globl _mcount |
| 33 | .type _mcount,#function |
| 34 | .globl mcount |
| 35 | .type mcount,#function |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | _mcount: |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 37 | mcount: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #ifdef CONFIG_STACK_DEBUG |
| 39 | /* |
| 40 | * Check whether %sp is dangerously low. |
| 41 | */ |
| 42 | ldub [%g6 + TI_FPDEPTH], %g1 |
| 43 | srl %g1, 1, %g3 |
| 44 | add %g3, 1, %g3 |
| 45 | sllx %g3, 8, %g3 ! each fpregs frame is 256b |
| 46 | add %g3, 192, %g3 |
| 47 | add %g6, %g3, %g3 ! where does task_struct+frame end? |
| 48 | sub %g3, STACK_BIAS, %g3 |
| 49 | cmp %sp, %g3 |
| 50 | bg,pt %xcc, 1f |
David S. Miller | c749808 | 2008-08-12 02:03:49 -0700 | [diff] [blame] | 51 | nop |
David S. Miller | 4f70f7a | 2008-08-12 18:33:56 -0700 | [diff] [blame] | 52 | lduh [%g6 + TI_CPU], %g1 |
| 53 | sethi %hi(hardirq_stack), %g3 |
| 54 | or %g3, %lo(hardirq_stack), %g3 |
| 55 | sllx %g1, 3, %g1 |
| 56 | ldx [%g3 + %g1], %g7 |
| 57 | sub %g7, STACK_BIAS, %g7 |
| 58 | cmp %sp, %g7 |
| 59 | bleu,pt %xcc, 2f |
| 60 | sethi %hi(THREAD_SIZE), %g3 |
| 61 | add %g7, %g3, %g7 |
| 62 | cmp %sp, %g7 |
| 63 | blu,pn %xcc, 1f |
| 64 | 2: sethi %hi(softirq_stack), %g3 |
| 65 | or %g3, %lo(softirq_stack), %g3 |
| 66 | ldx [%g3 + %g1], %g7 |
David S. Miller | 166e553 | 2009-12-09 01:43:45 -0800 | [diff] [blame] | 67 | sub %g7, STACK_BIAS, %g7 |
David S. Miller | 4f70f7a | 2008-08-12 18:33:56 -0700 | [diff] [blame] | 68 | cmp %sp, %g7 |
David S. Miller | 166e553 | 2009-12-09 01:43:45 -0800 | [diff] [blame] | 69 | bleu,pt %xcc, 3f |
David S. Miller | 4f70f7a | 2008-08-12 18:33:56 -0700 | [diff] [blame] | 70 | sethi %hi(THREAD_SIZE), %g3 |
| 71 | add %g7, %g3, %g7 |
| 72 | cmp %sp, %g7 |
| 73 | blu,pn %xcc, 1f |
| 74 | nop |
David S. Miller | c749808 | 2008-08-12 02:03:49 -0700 | [diff] [blame] | 75 | /* If we are already on ovstack, don't hop onto it |
| 76 | * again, we are already trying to output the stack overflow |
| 77 | * message. |
| 78 | */ |
David S. Miller | 166e553 | 2009-12-09 01:43:45 -0800 | [diff] [blame] | 79 | 3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | or %g7, %lo(ovstack), %g7 |
David S. Miller | c749808 | 2008-08-12 02:03:49 -0700 | [diff] [blame] | 81 | add %g7, OVSTACKSIZE, %g3 |
| 82 | sub %g3, STACK_BIAS + 192, %g3 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | sub %g7, STACK_BIAS, %g7 |
David S. Miller | c749808 | 2008-08-12 02:03:49 -0700 | [diff] [blame] | 84 | cmp %sp, %g7 |
| 85 | blu,pn %xcc, 2f |
| 86 | cmp %sp, %g3 |
| 87 | bleu,pn %xcc, 1f |
| 88 | nop |
| 89 | 2: mov %g3, %sp |
| 90 | sethi %hi(panicstring), %g3 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | call prom_printf |
| 92 | or %g3, %lo(panicstring), %o0 |
| 93 | call prom_halt |
| 94 | nop |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 95 | 1: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #endif |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 97 | #ifdef CONFIG_FUNCTION_TRACER |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 98 | #ifdef CONFIG_DYNAMIC_FTRACE |
David S. Miller | 63b7549 | 2010-04-12 22:35:24 -0700 | [diff] [blame^] | 99 | /* Do nothing, the retl/nop below is all we need. */ |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 100 | #else |
David S. Miller | 63b7549 | 2010-04-12 22:35:24 -0700 | [diff] [blame^] | 101 | sethi %hi(function_trace_stop), %g1 |
| 102 | lduw [%g1 + %lo(function_trace_stop)], %g2 |
| 103 | brnz,pn %g2, 1f |
| 104 | sethi %hi(ftrace_trace_function), %g1 |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 105 | sethi %hi(ftrace_stub), %g2 |
| 106 | ldx [%g1 + %lo(ftrace_trace_function)], %g1 |
| 107 | or %g2, %lo(ftrace_stub), %g2 |
| 108 | cmp %g1, %g2 |
| 109 | be,pn %icc, 1f |
| 110 | mov %i7, %o1 |
| 111 | jmpl %g1, %g0 |
| 112 | mov %o7, %o0 |
| 113 | /* not reached */ |
| 114 | 1: |
| 115 | #endif |
| 116 | #endif |
| 117 | retl |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | nop |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 119 | .size _mcount,.-_mcount |
| 120 | .size mcount,.-mcount |
| 121 | |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 122 | #ifdef CONFIG_FUNCTION_TRACER |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 123 | .globl ftrace_stub |
| 124 | .type ftrace_stub,#function |
| 125 | ftrace_stub: |
| 126 | retl |
| 127 | nop |
| 128 | .size ftrace_stub,.-ftrace_stub |
| 129 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 130 | .globl ftrace_caller |
| 131 | .type ftrace_caller,#function |
| 132 | ftrace_caller: |
David S. Miller | 63b7549 | 2010-04-12 22:35:24 -0700 | [diff] [blame^] | 133 | sethi %hi(function_trace_stop), %g1 |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 134 | mov %i7, %o1 |
David S. Miller | 63b7549 | 2010-04-12 22:35:24 -0700 | [diff] [blame^] | 135 | lduw [%g1 + %lo(function_trace_stop)], %g2 |
| 136 | brnz,pn %g2, ftrace_stub |
| 137 | mov %o7, %o0 |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 138 | .globl ftrace_call |
| 139 | ftrace_call: |
David S. Miller | 63b7549 | 2010-04-12 22:35:24 -0700 | [diff] [blame^] | 140 | /* If the final kernel link ever turns on relaxation, we'll need |
| 141 | * to do something about this tail call. Otherwise the linker |
| 142 | * will rewrite the call into a branch and nop out the move |
| 143 | * instruction. |
| 144 | */ |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 145 | call ftrace_stub |
| 146 | mov %o0, %o7 |
| 147 | retl |
| 148 | nop |
David S. Miller | 63b7549 | 2010-04-12 22:35:24 -0700 | [diff] [blame^] | 149 | .size ftrace_call,.-ftrace_call |
David Miller | d05f5f9 | 2008-05-13 22:06:59 -0700 | [diff] [blame] | 150 | .size ftrace_caller,.-ftrace_caller |
| 151 | #endif |
| 152 | #endif |