blob: d1ae2ba25274c10f20d578ef9c1b2a566c2134e1 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
19#include <linux/kthread.h>
20#include <linux/hardirq.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020021#include <linux/ftrace.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/module.h>
23#include <linux/hash.h>
24#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020025
Steven Rostedt3d083392008-05-12 21:20:42 +020026#include "trace.h"
27
28static DEFINE_SPINLOCK(ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020029static struct ftrace_ops ftrace_list_end __read_mostly =
30{
31 .func = ftrace_stub,
32};
33
34static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
35ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
36
37/* mcount is defined per arch in assembly */
38EXPORT_SYMBOL(mcount);
39
40notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
41{
42 struct ftrace_ops *op = ftrace_list;
43
44 /* in case someone actually ports this to alpha! */
45 read_barrier_depends();
46
47 while (op != &ftrace_list_end) {
48 /* silly alpha */
49 read_barrier_depends();
50 op->func(ip, parent_ip);
51 op = op->next;
52 };
53}
54
55/**
Steven Rostedt3d083392008-05-12 21:20:42 +020056 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020057 *
Steven Rostedt3d083392008-05-12 21:20:42 +020058 * This NULLs the ftrace function and in essence stops
59 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020060 */
Steven Rostedt3d083392008-05-12 21:20:42 +020061void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020062{
Steven Rostedt3d083392008-05-12 21:20:42 +020063 ftrace_trace_function = ftrace_stub;
64}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020065
Steven Rostedt3d083392008-05-12 21:20:42 +020066static int notrace __register_ftrace_function(struct ftrace_ops *ops)
67{
68 /* Should never be called by interrupts */
69 spin_lock(&ftrace_lock);
70
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020071 ops->next = ftrace_list;
72 /*
73 * We are entering ops into the ftrace_list but another
74 * CPU might be walking that list. We need to make sure
75 * the ops->next pointer is valid before another CPU sees
76 * the ops pointer included into the ftrace_list.
77 */
78 smp_wmb();
79 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020080
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020081 /*
82 * For one func, simply call it directly.
83 * For more than one func, call the chain.
84 */
85 if (ops->next == &ftrace_list_end)
86 ftrace_trace_function = ops->func;
87 else
88 ftrace_trace_function = ftrace_list_func;
Steven Rostedt3d083392008-05-12 21:20:42 +020089
90 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020091
92 return 0;
93}
94
Steven Rostedt3d083392008-05-12 21:20:42 +020095static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020096{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020097 struct ftrace_ops **p;
98 int ret = 0;
99
Steven Rostedt3d083392008-05-12 21:20:42 +0200100 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200101
102 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200103 * If we are removing the last function, then simply point
104 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200105 */
106 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
107 ftrace_trace_function = ftrace_stub;
108 ftrace_list = &ftrace_list_end;
109 goto out;
110 }
111
112 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
113 if (*p == ops)
114 break;
115
116 if (*p != ops) {
117 ret = -1;
118 goto out;
119 }
120
121 *p = (*p)->next;
122
123 /* If we only have one func left, then call that directly */
Steven Rostedt3d083392008-05-12 21:20:42 +0200124 if (ftrace_list == &ftrace_list_end ||
125 ftrace_list->next == &ftrace_list_end)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200126 ftrace_trace_function = ftrace_list->func;
127
128 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200129 spin_unlock(&ftrace_lock);
130
131 return ret;
132}
133
134#ifdef CONFIG_DYNAMIC_FTRACE
135
136static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
137
138static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
139
140static DEFINE_SPINLOCK(ftrace_shutdown_lock);
141static DEFINE_MUTEX(ftraced_lock);
142
143static int ftraced_trigger;
144static int ftraced_suspend;
145
146static int ftrace_record_suspend;
147
148static inline int
149notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
150{
151 struct dyn_ftrace *p;
152 struct hlist_node *t;
153 int found = 0;
154
155 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
156 if (p->ip == ip) {
157 found = 1;
158 break;
159 }
160 }
161
162 return found;
163}
164
165static inline void notrace
166ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
167{
168 hlist_add_head(&node->node, &ftrace_hash[key]);
169}
170
171static void notrace
172ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
173{
174 struct dyn_ftrace *node;
175 unsigned long flags;
176 unsigned long key;
177 int resched;
178 int atomic;
179
180 resched = need_resched();
181 preempt_disable_notrace();
182
183 /* We simply need to protect against recursion */
184 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
185 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
186 goto out;
187
188 if (unlikely(ftrace_record_suspend))
189 goto out;
190
191 key = hash_long(ip, FTRACE_HASHBITS);
192
193 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
194
195 if (ftrace_ip_in_hash(ip, key))
196 goto out;
197
198 atomic = irqs_disabled();
199
200 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
201
202 /* This ip may have hit the hash before the lock */
203 if (ftrace_ip_in_hash(ip, key))
204 goto out_unlock;
205
206 /*
207 * There's a slight race that the ftraced will update the
208 * hash and reset here. The arch alloc is responsible
209 * for seeing if the IP has already changed, and if
210 * it has, the alloc will fail.
211 */
212 node = ftrace_alloc_shutdown_node(ip);
213 if (!node)
214 goto out_unlock;
215
216 node->ip = ip;
217
218 ftrace_add_hash(node, key);
219
220 ftraced_trigger = 1;
221
222 out_unlock:
223 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
224 out:
225 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
226
227 /* prevent recursion with scheduler */
228 if (resched)
229 preempt_enable_no_resched_notrace();
230 else
231 preempt_enable_notrace();
232}
233
234static struct ftrace_ops ftrace_shutdown_ops __read_mostly =
235{
236 .func = ftrace_record_ip,
237};
238
239
240static int notrace __ftrace_modify_code(void *data)
241{
242 void (*func)(void) = data;
243
244 func();
245 return 0;
246}
247
248static void notrace ftrace_run_startup_code(void)
249{
250 stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS);
251}
252
253static void notrace ftrace_run_shutdown_code(void)
254{
255 stop_machine_run(__ftrace_modify_code, ftrace_shutdown_code, NR_CPUS);
256}
257
258static void notrace ftrace_startup(void)
259{
260 mutex_lock(&ftraced_lock);
261 ftraced_suspend++;
262 if (ftraced_suspend != 1)
263 goto out;
264 __unregister_ftrace_function(&ftrace_shutdown_ops);
265
266 ftrace_run_startup_code();
267 out:
268 mutex_unlock(&ftraced_lock);
269}
270
271static void notrace ftrace_shutdown(void)
272{
273 mutex_lock(&ftraced_lock);
274 ftraced_suspend--;
275 if (ftraced_suspend)
276 goto out;
277
278 ftrace_run_shutdown_code();
279
280 __register_ftrace_function(&ftrace_shutdown_ops);
281 out:
282 mutex_unlock(&ftraced_lock);
283}
284
285static cycle_t ftrace_update_time;
286static unsigned long ftrace_update_cnt;
287unsigned long ftrace_update_tot_cnt;
288
289static int notrace __ftrace_update_code(void *ignore)
290{
291 struct dyn_ftrace *p;
292 struct hlist_head head;
293 struct hlist_node *t;
294 cycle_t start, stop;
295 int i;
296
297 /* Don't be calling ftrace ops now */
298 __unregister_ftrace_function(&ftrace_shutdown_ops);
299
300 start = now(raw_smp_processor_id());
301 ftrace_update_cnt = 0;
302
303 /* No locks needed, the machine is stopped! */
304 for (i = 0; i < FTRACE_HASHSIZE; i++) {
305 if (hlist_empty(&ftrace_hash[i]))
306 continue;
307
308 head = ftrace_hash[i];
309 INIT_HLIST_HEAD(&ftrace_hash[i]);
310
311 /* all CPUS are stopped, we are safe to modify code */
312 hlist_for_each_entry(p, t, &head, node) {
313 ftrace_code_disable(p);
314 ftrace_update_cnt++;
315 }
316
317 }
318
319 stop = now(raw_smp_processor_id());
320 ftrace_update_time = stop - start;
321 ftrace_update_tot_cnt += ftrace_update_cnt;
322
323 __register_ftrace_function(&ftrace_shutdown_ops);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200324
325 return 0;
326}
327
Steven Rostedt3d083392008-05-12 21:20:42 +0200328static void notrace ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200329{
Steven Rostedt3d083392008-05-12 21:20:42 +0200330 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
331}
332
333static int notrace ftraced(void *ignore)
334{
335 unsigned long usecs;
336
337 set_current_state(TASK_INTERRUPTIBLE);
338
339 while (!kthread_should_stop()) {
340
341 /* check once a second */
342 schedule_timeout(HZ);
343
344 mutex_lock(&ftraced_lock);
345 if (ftraced_trigger && !ftraced_suspend) {
346 ftrace_record_suspend++;
347 ftrace_update_code();
348 usecs = nsecs_to_usecs(ftrace_update_time);
349 if (ftrace_update_tot_cnt > 100000) {
350 ftrace_update_tot_cnt = 0;
351 pr_info("hm, dftrace overflow: %lu change%s"
352 " (%lu total) in %lu usec%s\n",
353 ftrace_update_cnt,
354 ftrace_update_cnt != 1 ? "s" : "",
355 ftrace_update_tot_cnt,
356 usecs, usecs != 1 ? "s" : "");
357 WARN_ON_ONCE(1);
358 }
359 ftraced_trigger = 0;
360 ftrace_record_suspend--;
361 }
362 mutex_unlock(&ftraced_lock);
363
364 ftrace_shutdown_replenish();
365
366 set_current_state(TASK_INTERRUPTIBLE);
367 }
368 __set_current_state(TASK_RUNNING);
369 return 0;
370}
371
372static int __init notrace ftrace_shutdown_init(void)
373{
374 struct task_struct *p;
375 int ret;
376
377 ret = ftrace_shutdown_arch_init();
378 if (ret)
379 return ret;
380
381 p = kthread_run(ftraced, NULL, "ftraced");
382 if (IS_ERR(p))
383 return -1;
384
385 __register_ftrace_function(&ftrace_shutdown_ops);
386
387 return 0;
388}
389
390core_initcall(ftrace_shutdown_init);
391#else
392# define ftrace_startup() do { } while (0)
393# define ftrace_shutdown() do { } while (0)
394#endif /* CONFIG_DYNAMIC_FTRACE */
395
396/**
397 * register_ftrace_function - register a function for profiling
398 * @ops - ops structure that holds the function for profiling.
399 *
400 * Register a function to be called by all functions in the
401 * kernel.
402 *
403 * Note: @ops->func and all the functions it calls must be labeled
404 * with "notrace", otherwise it will go into a
405 * recursive loop.
406 */
407int register_ftrace_function(struct ftrace_ops *ops)
408{
409 ftrace_startup();
410
411 return __register_ftrace_function(ops);
412}
413
414/**
415 * unregister_ftrace_function - unresgister a function for profiling.
416 * @ops - ops structure that holds the function to unregister
417 *
418 * Unregister a function that was added to be called by ftrace profiling.
419 */
420int unregister_ftrace_function(struct ftrace_ops *ops)
421{
422 int ret;
423
424 ret = __unregister_ftrace_function(ops);
425
426 if (ftrace_list == &ftrace_list_end)
427 ftrace_shutdown();
428
429 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200430}