Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/lib/mcount.S |
| 3 | * |
Paul Mundt | e460ab2 | 2009-07-11 21:06:53 +0900 | [diff] [blame] | 4 | * Copyright (C) 2008, 2009 Paul Mundt |
Matt Fleming | c1340c0 | 2009-06-28 14:05:44 +0100 | [diff] [blame] | 5 | * Copyright (C) 2008, 2009 Matt Fleming |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU General Public |
| 8 | * License. See the file "COPYING" in the main directory of this archive |
| 9 | * for more details. |
| 10 | */ |
| 11 | #include <asm/ftrace.h> |
Matt Fleming | b99610f | 2009-07-11 01:00:23 +0000 | [diff] [blame] | 12 | #include <asm/thread_info.h> |
| 13 | #include <asm/asm-offsets.h> |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 14 | |
| 15 | #define MCOUNT_ENTER() \ |
| 16 | mov.l r4, @-r15; \ |
| 17 | mov.l r5, @-r15; \ |
| 18 | mov.l r6, @-r15; \ |
| 19 | mov.l r7, @-r15; \ |
| 20 | sts.l pr, @-r15; \ |
| 21 | \ |
| 22 | mov.l @(20,r15),r4; \ |
| 23 | sts pr, r5 |
| 24 | |
| 25 | #define MCOUNT_LEAVE() \ |
| 26 | lds.l @r15+, pr; \ |
| 27 | mov.l @r15+, r7; \ |
| 28 | mov.l @r15+, r6; \ |
| 29 | mov.l @r15+, r5; \ |
| 30 | rts; \ |
| 31 | mov.l @r15+, r4 |
| 32 | |
Matt Fleming | b99610f | 2009-07-11 01:00:23 +0000 | [diff] [blame] | 33 | #ifdef CONFIG_STACK_DEBUG |
| 34 | /* |
| 35 | * Perform diagnostic checks on the state of the kernel stack. |
| 36 | * |
| 37 | * Check for stack overflow. If there is less than 1KB free |
| 38 | * then it has overflowed. |
| 39 | * |
| 40 | * Make sure the stack pointer contains a valid address. Valid |
| 41 | * addresses for kernel stacks are anywhere after the bss |
Geert Uytterhoeven | 363737d | 2012-05-31 22:39:21 +0200 | [diff] [blame] | 42 | * (after __bss_stop) and anywhere in init_thread_union (init_stack). |
Matt Fleming | b99610f | 2009-07-11 01:00:23 +0000 | [diff] [blame] | 43 | */ |
| 44 | #define STACK_CHECK() \ |
| 45 | mov #(THREAD_SIZE >> 10), r0; \ |
| 46 | shll8 r0; \ |
| 47 | shll2 r0; \ |
| 48 | \ |
| 49 | /* r1 = sp & (THREAD_SIZE - 1) */ \ |
| 50 | mov #-1, r1; \ |
| 51 | add r0, r1; \ |
| 52 | and r15, r1; \ |
| 53 | \ |
| 54 | mov #TI_SIZE, r3; \ |
| 55 | mov #(STACK_WARN >> 8), r2; \ |
| 56 | shll8 r2; \ |
| 57 | add r3, r2; \ |
| 58 | \ |
| 59 | /* Is the stack overflowing? */ \ |
| 60 | cmp/hi r2, r1; \ |
| 61 | bf stack_panic; \ |
| 62 | \ |
Geert Uytterhoeven | 363737d | 2012-05-31 22:39:21 +0200 | [diff] [blame] | 63 | /* If sp > __bss_stop then we're OK. */ \ |
Matt Fleming | b99610f | 2009-07-11 01:00:23 +0000 | [diff] [blame] | 64 | mov.l .L_ebss, r1; \ |
| 65 | cmp/hi r1, r15; \ |
| 66 | bt 1f; \ |
| 67 | \ |
| 68 | /* If sp < init_stack, we're not OK. */ \ |
| 69 | mov.l .L_init_thread_union, r1; \ |
| 70 | cmp/hs r1, r15; \ |
| 71 | bf stack_panic; \ |
| 72 | \ |
Geert Uytterhoeven | 363737d | 2012-05-31 22:39:21 +0200 | [diff] [blame] | 73 | /* If sp > init_stack && sp < __bss_stop, not OK. */ \ |
Matt Fleming | b99610f | 2009-07-11 01:00:23 +0000 | [diff] [blame] | 74 | add r0, r1; \ |
| 75 | cmp/hs r1, r15; \ |
| 76 | bt stack_panic; \ |
| 77 | 1: |
| 78 | #else |
| 79 | #define STACK_CHECK() |
| 80 | #endif /* CONFIG_STACK_DEBUG */ |
| 81 | |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 82 | .align 2 |
| 83 | .globl _mcount |
| 84 | .type _mcount,@function |
| 85 | .globl mcount |
| 86 | .type mcount,@function |
| 87 | _mcount: |
| 88 | mcount: |
Paul Mundt | e460ab2 | 2009-07-11 21:06:53 +0900 | [diff] [blame] | 89 | STACK_CHECK() |
| 90 | |
| 91 | #ifndef CONFIG_FUNCTION_TRACER |
| 92 | rts |
| 93 | nop |
| 94 | #else |
Matt Fleming | c1340c0 | 2009-06-28 14:05:44 +0100 | [diff] [blame] | 95 | #ifndef CONFIG_DYNAMIC_FTRACE |
| 96 | mov.l .Lfunction_trace_stop, r0 |
| 97 | mov.l @r0, r0 |
| 98 | tst r0, r0 |
| 99 | bf ftrace_stub |
| 100 | #endif |
Matt Fleming | b99610f | 2009-07-11 01:00:23 +0000 | [diff] [blame] | 101 | |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 102 | MCOUNT_ENTER() |
| 103 | |
| 104 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 105 | .globl mcount_call |
| 106 | mcount_call: |
| 107 | mov.l .Lftrace_stub, r6 |
| 108 | #else |
| 109 | mov.l .Lftrace_trace_function, r6 |
| 110 | mov.l ftrace_stub, r7 |
| 111 | cmp/eq r6, r7 |
| 112 | bt skip_trace |
| 113 | mov.l @r6, r6 |
| 114 | #endif |
| 115 | |
| 116 | jsr @r6 |
| 117 | nop |
| 118 | |
Matt Fleming | 327933f | 2009-07-11 00:29:03 +0000 | [diff] [blame] | 119 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 120 | mov.l .Lftrace_graph_return, r6 |
| 121 | mov.l .Lftrace_stub, r7 |
| 122 | cmp/eq r6, r7 |
| 123 | bt 1f |
| 124 | |
| 125 | mov.l .Lftrace_graph_caller, r0 |
| 126 | jmp @r0 |
| 127 | nop |
| 128 | |
| 129 | 1: |
| 130 | mov.l .Lftrace_graph_entry, r6 |
| 131 | mov.l .Lftrace_graph_entry_stub, r7 |
| 132 | cmp/eq r6, r7 |
| 133 | bt skip_trace |
| 134 | |
| 135 | mov.l .Lftrace_graph_caller, r0 |
| 136 | jmp @r0 |
| 137 | nop |
| 138 | |
| 139 | .align 2 |
| 140 | .Lftrace_graph_return: |
| 141 | .long ftrace_graph_return |
| 142 | .Lftrace_graph_entry: |
| 143 | .long ftrace_graph_entry |
| 144 | .Lftrace_graph_entry_stub: |
| 145 | .long ftrace_graph_entry_stub |
| 146 | .Lftrace_graph_caller: |
| 147 | .long ftrace_graph_caller |
| 148 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 149 | |
| 150 | .globl skip_trace |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 151 | skip_trace: |
| 152 | MCOUNT_LEAVE() |
| 153 | |
| 154 | .align 2 |
| 155 | .Lftrace_trace_function: |
Matt Fleming | 327933f | 2009-07-11 00:29:03 +0000 | [diff] [blame] | 156 | .long ftrace_trace_function |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 157 | |
| 158 | #ifdef CONFIG_DYNAMIC_FTRACE |
Matt Fleming | 327933f | 2009-07-11 00:29:03 +0000 | [diff] [blame] | 159 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 160 | /* |
| 161 | * NOTE: Do not move either ftrace_graph_call or ftrace_caller |
| 162 | * as this will affect the calculation of GRAPH_INSN_OFFSET. |
| 163 | */ |
| 164 | .globl ftrace_graph_call |
| 165 | ftrace_graph_call: |
| 166 | mov.l .Lskip_trace, r0 |
| 167 | jmp @r0 |
| 168 | nop |
| 169 | |
| 170 | .align 2 |
| 171 | .Lskip_trace: |
| 172 | .long skip_trace |
| 173 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 174 | |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 175 | .globl ftrace_caller |
| 176 | ftrace_caller: |
Matt Fleming | c1340c0 | 2009-06-28 14:05:44 +0100 | [diff] [blame] | 177 | mov.l .Lfunction_trace_stop, r0 |
| 178 | mov.l @r0, r0 |
| 179 | tst r0, r0 |
| 180 | bf ftrace_stub |
Matt Fleming | c652d78 | 2009-07-06 20:16:33 +0900 | [diff] [blame] | 181 | |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 182 | MCOUNT_ENTER() |
| 183 | |
| 184 | .globl ftrace_call |
| 185 | ftrace_call: |
| 186 | mov.l .Lftrace_stub, r6 |
| 187 | jsr @r6 |
| 188 | nop |
| 189 | |
Matt Fleming | 327933f | 2009-07-11 00:29:03 +0000 | [diff] [blame] | 190 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 191 | bra ftrace_graph_call |
| 192 | nop |
| 193 | #else |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 194 | MCOUNT_LEAVE() |
Matt Fleming | 327933f | 2009-07-11 00:29:03 +0000 | [diff] [blame] | 195 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 196 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 197 | |
Paul Mundt | a470b95 | 2009-07-11 20:33:34 +0900 | [diff] [blame] | 198 | .align 2 |
| 199 | .Lfunction_trace_stop: |
| 200 | .long function_trace_stop |
| 201 | |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 202 | /* |
| 203 | * NOTE: From here on the locations of the .Lftrace_stub label and |
| 204 | * ftrace_stub itself are fixed. Adding additional data here will skew |
| 205 | * the displacement for the memory table and break the block replacement. |
| 206 | * Place new labels either after the ftrace_stub body, or before |
| 207 | * ftrace_caller. You have been warned. |
| 208 | */ |
Matt Fleming | fad57fe | 2008-11-12 20:11:47 +0900 | [diff] [blame] | 209 | .Lftrace_stub: |
| 210 | .long ftrace_stub |
| 211 | |
| 212 | .globl ftrace_stub |
| 213 | ftrace_stub: |
| 214 | rts |
| 215 | nop |
Matt Fleming | c1340c0 | 2009-06-28 14:05:44 +0100 | [diff] [blame] | 216 | |
Matt Fleming | 327933f | 2009-07-11 00:29:03 +0000 | [diff] [blame] | 217 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 218 | .globl ftrace_graph_caller |
| 219 | ftrace_graph_caller: |
| 220 | mov.l 2f, r0 |
| 221 | mov.l @r0, r0 |
| 222 | tst r0, r0 |
| 223 | bt 1f |
| 224 | |
| 225 | mov.l 3f, r1 |
| 226 | jmp @r1 |
| 227 | nop |
| 228 | 1: |
| 229 | /* |
| 230 | * MCOUNT_ENTER() pushed 5 registers onto the stack, so |
| 231 | * the stack address containing our return address is |
| 232 | * r15 + 20. |
| 233 | */ |
| 234 | mov #20, r0 |
| 235 | add r15, r0 |
| 236 | mov r0, r4 |
| 237 | |
| 238 | mov.l .Lprepare_ftrace_return, r0 |
| 239 | jsr @r0 |
| 240 | nop |
| 241 | |
| 242 | MCOUNT_LEAVE() |
| 243 | |
| 244 | .align 2 |
| 245 | 2: .long function_trace_stop |
| 246 | 3: .long skip_trace |
| 247 | .Lprepare_ftrace_return: |
| 248 | .long prepare_ftrace_return |
| 249 | |
| 250 | .globl return_to_handler |
| 251 | return_to_handler: |
| 252 | /* |
| 253 | * Save the return values. |
| 254 | */ |
| 255 | mov.l r0, @-r15 |
| 256 | mov.l r1, @-r15 |
| 257 | |
| 258 | mov #0, r4 |
| 259 | |
| 260 | mov.l .Lftrace_return_to_handler, r0 |
| 261 | jsr @r0 |
| 262 | nop |
| 263 | |
| 264 | /* |
| 265 | * The return value from ftrace_return_handler has the real |
| 266 | * address that we should return to. |
| 267 | */ |
| 268 | lds r0, pr |
| 269 | mov.l @r15+, r1 |
| 270 | rts |
| 271 | mov.l @r15+, r0 |
| 272 | |
| 273 | |
| 274 | .align 2 |
| 275 | .Lftrace_return_to_handler: |
| 276 | .long ftrace_return_to_handler |
| 277 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
Paul Mundt | e460ab2 | 2009-07-11 21:06:53 +0900 | [diff] [blame] | 278 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 279 | |
| 280 | #ifdef CONFIG_STACK_DEBUG |
| 281 | .globl stack_panic |
| 282 | stack_panic: |
| 283 | mov.l .Ldump_stack, r0 |
| 284 | jsr @r0 |
| 285 | nop |
| 286 | |
| 287 | mov.l .Lpanic, r0 |
| 288 | jsr @r0 |
| 289 | mov.l .Lpanic_s, r4 |
| 290 | |
| 291 | rts |
| 292 | nop |
| 293 | |
| 294 | .align 2 |
Paul Mundt | e460ab2 | 2009-07-11 21:06:53 +0900 | [diff] [blame] | 295 | .L_init_thread_union: |
| 296 | .long init_thread_union |
Paul Mundt | 14eae6e | 2013-01-14 18:07:36 +0900 | [diff] [blame] | 297 | .L_ebss: |
| 298 | .long __bss_stop |
Paul Mundt | e460ab2 | 2009-07-11 21:06:53 +0900 | [diff] [blame] | 299 | .Lpanic: |
| 300 | .long panic |
| 301 | .Lpanic_s: |
| 302 | .long .Lpanic_str |
| 303 | .Ldump_stack: |
| 304 | .long dump_stack |
| 305 | |
| 306 | .section .rodata |
| 307 | .align 2 |
| 308 | .Lpanic_str: |
| 309 | .string "Stack error" |
| 310 | #endif /* CONFIG_STACK_DEBUG */ |