blob: bd3ec648beccb79f256ef66af693395de4506dc4 [file] [log] [blame]
Matt Flemingfad57fe2008-11-12 20:11:47 +09001/*
2 * arch/sh/lib/mcount.S
3 *
4 * Copyright (C) 2008 Paul Mundt
Matt Flemingc1340c02009-06-28 14:05:44 +01005 * Copyright (C) 2008, 2009 Matt Fleming
Matt Flemingfad57fe2008-11-12 20:11:47 +09006 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <asm/ftrace.h>
Matt Flemingb99610f2009-07-11 01:00:23 +000012#include <asm/thread_info.h>
13#include <asm/asm-offsets.h>
Matt Flemingfad57fe2008-11-12 20:11:47 +090014
15#define MCOUNT_ENTER() \
16 mov.l r4, @-r15; \
17 mov.l r5, @-r15; \
18 mov.l r6, @-r15; \
19 mov.l r7, @-r15; \
20 sts.l pr, @-r15; \
21 \
22 mov.l @(20,r15),r4; \
23 sts pr, r5
24
25#define MCOUNT_LEAVE() \
26 lds.l @r15+, pr; \
27 mov.l @r15+, r7; \
28 mov.l @r15+, r6; \
29 mov.l @r15+, r5; \
30 rts; \
31 mov.l @r15+, r4
32
Matt Flemingb99610f2009-07-11 01:00:23 +000033#ifdef CONFIG_STACK_DEBUG
34/*
35 * Perform diagnostic checks on the state of the kernel stack.
36 *
37 * Check for stack overflow. If there is less than 1KB free
38 * then it has overflowed.
39 *
40 * Make sure the stack pointer contains a valid address. Valid
41 * addresses for kernel stacks are anywhere after the bss
42 * (after _ebss) and anywhere in init_thread_union (init_stack).
43 */
44#define STACK_CHECK() \
45 mov #(THREAD_SIZE >> 10), r0; \
46 shll8 r0; \
47 shll2 r0; \
48 \
49 /* r1 = sp & (THREAD_SIZE - 1) */ \
50 mov #-1, r1; \
51 add r0, r1; \
52 and r15, r1; \
53 \
54 mov #TI_SIZE, r3; \
55 mov #(STACK_WARN >> 8), r2; \
56 shll8 r2; \
57 add r3, r2; \
58 \
59 /* Is the stack overflowing? */ \
60 cmp/hi r2, r1; \
61 bf stack_panic; \
62 \
63 /* If sp > _ebss then we're OK. */ \
64 mov.l .L_ebss, r1; \
65 cmp/hi r1, r15; \
66 bt 1f; \
67 \
68 /* If sp < init_stack, we're not OK. */ \
69 mov.l .L_init_thread_union, r1; \
70 cmp/hs r1, r15; \
71 bf stack_panic; \
72 \
73 /* If sp > init_stack && sp < _ebss, not OK. */ \
74 add r0, r1; \
75 cmp/hs r1, r15; \
76 bt stack_panic; \
771:
78#else
79#define STACK_CHECK()
80#endif /* CONFIG_STACK_DEBUG */
81
Matt Flemingfad57fe2008-11-12 20:11:47 +090082 .align 2
83 .globl _mcount
84 .type _mcount,@function
85 .globl mcount
86 .type mcount,@function
87_mcount:
88mcount:
Matt Flemingc1340c02009-06-28 14:05:44 +010089#ifndef CONFIG_DYNAMIC_FTRACE
90 mov.l .Lfunction_trace_stop, r0
91 mov.l @r0, r0
92 tst r0, r0
93 bf ftrace_stub
94#endif
Matt Flemingb99610f2009-07-11 01:00:23 +000095 STACK_CHECK()
96
Matt Flemingfad57fe2008-11-12 20:11:47 +090097 MCOUNT_ENTER()
98
99#ifdef CONFIG_DYNAMIC_FTRACE
100 .globl mcount_call
101mcount_call:
102 mov.l .Lftrace_stub, r6
103#else
104 mov.l .Lftrace_trace_function, r6
105 mov.l ftrace_stub, r7
106 cmp/eq r6, r7
107 bt skip_trace
108 mov.l @r6, r6
109#endif
110
111 jsr @r6
112 nop
113
Matt Fleming327933f2009-07-11 00:29:03 +0000114#ifdef CONFIG_FUNCTION_GRAPH_TRACER
115 mov.l .Lftrace_graph_return, r6
116 mov.l .Lftrace_stub, r7
117 cmp/eq r6, r7
118 bt 1f
119
120 mov.l .Lftrace_graph_caller, r0
121 jmp @r0
122 nop
123
1241:
125 mov.l .Lftrace_graph_entry, r6
126 mov.l .Lftrace_graph_entry_stub, r7
127 cmp/eq r6, r7
128 bt skip_trace
129
130 mov.l .Lftrace_graph_caller, r0
131 jmp @r0
132 nop
133
134 .align 2
135.Lftrace_graph_return:
136 .long ftrace_graph_return
137.Lftrace_graph_entry:
138 .long ftrace_graph_entry
139.Lftrace_graph_entry_stub:
140 .long ftrace_graph_entry_stub
141.Lftrace_graph_caller:
142 .long ftrace_graph_caller
143#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
144
145 .globl skip_trace
Matt Flemingfad57fe2008-11-12 20:11:47 +0900146skip_trace:
147 MCOUNT_LEAVE()
148
149 .align 2
150.Lftrace_trace_function:
Matt Fleming327933f2009-07-11 00:29:03 +0000151 .long ftrace_trace_function
Matt Flemingfad57fe2008-11-12 20:11:47 +0900152
153#ifdef CONFIG_DYNAMIC_FTRACE
Matt Fleming327933f2009-07-11 00:29:03 +0000154#ifdef CONFIG_FUNCTION_GRAPH_TRACER
155/*
156 * NOTE: Do not move either ftrace_graph_call or ftrace_caller
157 * as this will affect the calculation of GRAPH_INSN_OFFSET.
158 */
159 .globl ftrace_graph_call
160ftrace_graph_call:
161 mov.l .Lskip_trace, r0
162 jmp @r0
163 nop
164
165 .align 2
166.Lskip_trace:
167 .long skip_trace
168#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
169
Matt Flemingfad57fe2008-11-12 20:11:47 +0900170 .globl ftrace_caller
171ftrace_caller:
Matt Flemingc1340c02009-06-28 14:05:44 +0100172 mov.l .Lfunction_trace_stop, r0
173 mov.l @r0, r0
174 tst r0, r0
175 bf ftrace_stub
Matt Flemingc652d782009-07-06 20:16:33 +0900176
Matt Flemingb99610f2009-07-11 01:00:23 +0000177 STACK_CHECK()
178
Matt Flemingfad57fe2008-11-12 20:11:47 +0900179 MCOUNT_ENTER()
180
181 .globl ftrace_call
182ftrace_call:
183 mov.l .Lftrace_stub, r6
184 jsr @r6
185 nop
186
Matt Fleming327933f2009-07-11 00:29:03 +0000187#ifdef CONFIG_FUNCTION_GRAPH_TRACER
188 bra ftrace_graph_call
189 nop
190#else
Matt Flemingfad57fe2008-11-12 20:11:47 +0900191 MCOUNT_LEAVE()
Matt Fleming327933f2009-07-11 00:29:03 +0000192#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
Matt Flemingfad57fe2008-11-12 20:11:47 +0900193#endif /* CONFIG_DYNAMIC_FTRACE */
194
195/*
196 * NOTE: From here on the locations of the .Lftrace_stub label and
197 * ftrace_stub itself are fixed. Adding additional data here will skew
198 * the displacement for the memory table and break the block replacement.
199 * Place new labels either after the ftrace_stub body, or before
200 * ftrace_caller. You have been warned.
201 */
202 .align 2
203.Lftrace_stub:
204 .long ftrace_stub
205
206 .globl ftrace_stub
207ftrace_stub:
208 rts
209 nop
Matt Flemingc1340c02009-06-28 14:05:44 +0100210
Matt Flemingb99610f2009-07-11 01:00:23 +0000211#ifdef CONFIG_STACK_DEBUG
212 .globl stack_panic
213stack_panic:
214 mov.l .Ldump_stack, r0
215 jsr @r0
216 nop
217
218 mov.l .Lpanic, r0
219 jsr @r0
220 mov.l .Lpanic_s, r4
221
222 rts
223 nop
224
Matt Flemingc1340c02009-06-28 14:05:44 +0100225 .align 2
226.Lfunction_trace_stop:
227 .long function_trace_stop
Matt Flemingb99610f2009-07-11 01:00:23 +0000228.L_ebss:
229 .long _ebss
230.L_init_thread_union:
231 .long init_thread_union
232.Lpanic:
233 .long panic
234.Lpanic_s:
235 .long .Lpanic_str
236.Ldump_stack:
237 .long dump_stack
238
239 .section .rodata
240 .align 2
241.Lpanic_str:
242 .string "Stack error"
243#endif /* CONFIG_STACK_DEBUG */
Matt Fleming327933f2009-07-11 00:29:03 +0000244
245#ifdef CONFIG_FUNCTION_GRAPH_TRACER
246 .globl ftrace_graph_caller
247ftrace_graph_caller:
248 mov.l 2f, r0
249 mov.l @r0, r0
250 tst r0, r0
251 bt 1f
252
253 mov.l 3f, r1
254 jmp @r1
255 nop
2561:
257 /*
258 * MCOUNT_ENTER() pushed 5 registers onto the stack, so
259 * the stack address containing our return address is
260 * r15 + 20.
261 */
262 mov #20, r0
263 add r15, r0
264 mov r0, r4
265
266 mov.l .Lprepare_ftrace_return, r0
267 jsr @r0
268 nop
269
270 MCOUNT_LEAVE()
271
272 .align 2
2732: .long function_trace_stop
2743: .long skip_trace
275.Lprepare_ftrace_return:
276 .long prepare_ftrace_return
277
278 .globl return_to_handler
279return_to_handler:
280 /*
281 * Save the return values.
282 */
283 mov.l r0, @-r15
284 mov.l r1, @-r15
285
286 mov #0, r4
287
288 mov.l .Lftrace_return_to_handler, r0
289 jsr @r0
290 nop
291
292 /*
293 * The return value from ftrace_return_handler has the real
294 * address that we should return to.
295 */
296 lds r0, pr
297 mov.l @r15+, r1
298 rts
299 mov.l @r15+, r0
300
301
302 .align 2
303.Lftrace_return_to_handler:
304 .long ftrace_return_to_handler
305#endif /* CONFIG_FUNCTION_GRAPH_TRACER */