blob: d3de37299ba487ee969bbf77ba7327280a2bedf6 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
19#include <linux/kthread.h>
20#include <linux/hardirq.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020021#include <linux/ftrace.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/module.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020023#include <linux/sysctl.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020024#include <linux/hash.h>
25#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020026
Steven Rostedt3d083392008-05-12 21:20:42 +020027#include "trace.h"
28
Steven Rostedtb0fc4942008-05-12 21:20:43 +020029#ifdef CONFIG_DYNAMIC_FTRACE
30# define FTRACE_ENABLED_INIT 1
31#else
32# define FTRACE_ENABLED_INIT 0
33#endif
34
35int ftrace_enabled = FTRACE_ENABLED_INIT;
36static int last_ftrace_enabled = FTRACE_ENABLED_INIT;
37
Steven Rostedt3d083392008-05-12 21:20:42 +020038static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020039static DEFINE_MUTEX(ftrace_sysctl_lock);
40
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020041static struct ftrace_ops ftrace_list_end __read_mostly =
42{
43 .func = ftrace_stub,
44};
45
46static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
47ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
48
49/* mcount is defined per arch in assembly */
50EXPORT_SYMBOL(mcount);
51
52notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
53{
54 struct ftrace_ops *op = ftrace_list;
55
56 /* in case someone actually ports this to alpha! */
57 read_barrier_depends();
58
59 while (op != &ftrace_list_end) {
60 /* silly alpha */
61 read_barrier_depends();
62 op->func(ip, parent_ip);
63 op = op->next;
64 };
65}
66
67/**
Steven Rostedt3d083392008-05-12 21:20:42 +020068 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020069 *
Steven Rostedt3d083392008-05-12 21:20:42 +020070 * This NULLs the ftrace function and in essence stops
71 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020072 */
Steven Rostedt3d083392008-05-12 21:20:42 +020073void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020074{
Steven Rostedt3d083392008-05-12 21:20:42 +020075 ftrace_trace_function = ftrace_stub;
76}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020077
Steven Rostedt3d083392008-05-12 21:20:42 +020078static int notrace __register_ftrace_function(struct ftrace_ops *ops)
79{
80 /* Should never be called by interrupts */
81 spin_lock(&ftrace_lock);
82
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020083 ops->next = ftrace_list;
84 /*
85 * We are entering ops into the ftrace_list but another
86 * CPU might be walking that list. We need to make sure
87 * the ops->next pointer is valid before another CPU sees
88 * the ops pointer included into the ftrace_list.
89 */
90 smp_wmb();
91 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020092
Steven Rostedtb0fc4942008-05-12 21:20:43 +020093 if (ftrace_enabled) {
94 /*
95 * For one func, simply call it directly.
96 * For more than one func, call the chain.
97 */
98 if (ops->next == &ftrace_list_end)
99 ftrace_trace_function = ops->func;
100 else
101 ftrace_trace_function = ftrace_list_func;
102 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200103
104 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200105
106 return 0;
107}
108
Steven Rostedt3d083392008-05-12 21:20:42 +0200109static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200110{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200111 struct ftrace_ops **p;
112 int ret = 0;
113
Steven Rostedt3d083392008-05-12 21:20:42 +0200114 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115
116 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200117 * If we are removing the last function, then simply point
118 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119 */
120 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
121 ftrace_trace_function = ftrace_stub;
122 ftrace_list = &ftrace_list_end;
123 goto out;
124 }
125
126 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
127 if (*p == ops)
128 break;
129
130 if (*p != ops) {
131 ret = -1;
132 goto out;
133 }
134
135 *p = (*p)->next;
136
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200137 if (ftrace_enabled) {
138 /* If we only have one func left, then call that directly */
139 if (ftrace_list == &ftrace_list_end ||
140 ftrace_list->next == &ftrace_list_end)
141 ftrace_trace_function = ftrace_list->func;
142 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200143
144 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200145 spin_unlock(&ftrace_lock);
146
147 return ret;
148}
149
150#ifdef CONFIG_DYNAMIC_FTRACE
151
152static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
153
154static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
155
156static DEFINE_SPINLOCK(ftrace_shutdown_lock);
157static DEFINE_MUTEX(ftraced_lock);
158
159static int ftraced_trigger;
160static int ftraced_suspend;
161
162static int ftrace_record_suspend;
163
164static inline int
165notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
166{
167 struct dyn_ftrace *p;
168 struct hlist_node *t;
169 int found = 0;
170
171 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
172 if (p->ip == ip) {
173 found = 1;
174 break;
175 }
176 }
177
178 return found;
179}
180
181static inline void notrace
182ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
183{
184 hlist_add_head(&node->node, &ftrace_hash[key]);
185}
186
187static void notrace
188ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
189{
190 struct dyn_ftrace *node;
191 unsigned long flags;
192 unsigned long key;
193 int resched;
194 int atomic;
195
196 resched = need_resched();
197 preempt_disable_notrace();
198
199 /* We simply need to protect against recursion */
200 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
201 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
202 goto out;
203
204 if (unlikely(ftrace_record_suspend))
205 goto out;
206
207 key = hash_long(ip, FTRACE_HASHBITS);
208
209 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
210
211 if (ftrace_ip_in_hash(ip, key))
212 goto out;
213
214 atomic = irqs_disabled();
215
216 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
217
218 /* This ip may have hit the hash before the lock */
219 if (ftrace_ip_in_hash(ip, key))
220 goto out_unlock;
221
222 /*
223 * There's a slight race that the ftraced will update the
224 * hash and reset here. The arch alloc is responsible
225 * for seeing if the IP has already changed, and if
226 * it has, the alloc will fail.
227 */
228 node = ftrace_alloc_shutdown_node(ip);
229 if (!node)
230 goto out_unlock;
231
232 node->ip = ip;
233
234 ftrace_add_hash(node, key);
235
236 ftraced_trigger = 1;
237
238 out_unlock:
239 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
240 out:
241 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
242
243 /* prevent recursion with scheduler */
244 if (resched)
245 preempt_enable_no_resched_notrace();
246 else
247 preempt_enable_notrace();
248}
249
250static struct ftrace_ops ftrace_shutdown_ops __read_mostly =
251{
252 .func = ftrace_record_ip,
253};
254
255
256static int notrace __ftrace_modify_code(void *data)
257{
258 void (*func)(void) = data;
259
260 func();
261 return 0;
262}
263
264static void notrace ftrace_run_startup_code(void)
265{
266 stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS);
267}
268
269static void notrace ftrace_run_shutdown_code(void)
270{
271 stop_machine_run(__ftrace_modify_code, ftrace_shutdown_code, NR_CPUS);
272}
273
274static void notrace ftrace_startup(void)
275{
276 mutex_lock(&ftraced_lock);
277 ftraced_suspend++;
278 if (ftraced_suspend != 1)
279 goto out;
280 __unregister_ftrace_function(&ftrace_shutdown_ops);
281
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200282 if (ftrace_enabled)
283 ftrace_run_startup_code();
Steven Rostedt3d083392008-05-12 21:20:42 +0200284 out:
285 mutex_unlock(&ftraced_lock);
286}
287
288static void notrace ftrace_shutdown(void)
289{
290 mutex_lock(&ftraced_lock);
291 ftraced_suspend--;
292 if (ftraced_suspend)
293 goto out;
294
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200295 if (ftrace_enabled)
296 ftrace_run_shutdown_code();
Steven Rostedt3d083392008-05-12 21:20:42 +0200297
298 __register_ftrace_function(&ftrace_shutdown_ops);
299 out:
300 mutex_unlock(&ftraced_lock);
301}
302
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200303static void notrace ftrace_startup_sysctl(void)
304{
305 mutex_lock(&ftraced_lock);
306 /* ftraced_suspend is true if we want ftrace running */
307 if (ftraced_suspend)
308 ftrace_run_startup_code();
309 mutex_unlock(&ftraced_lock);
310}
311
312static void notrace ftrace_shutdown_sysctl(void)
313{
314 mutex_lock(&ftraced_lock);
315 /* ftraced_suspend is true if ftrace is running */
316 if (ftraced_suspend)
317 ftrace_run_shutdown_code();
318 mutex_unlock(&ftraced_lock);
319}
320
Steven Rostedt3d083392008-05-12 21:20:42 +0200321static cycle_t ftrace_update_time;
322static unsigned long ftrace_update_cnt;
323unsigned long ftrace_update_tot_cnt;
324
325static int notrace __ftrace_update_code(void *ignore)
326{
327 struct dyn_ftrace *p;
328 struct hlist_head head;
329 struct hlist_node *t;
330 cycle_t start, stop;
331 int i;
332
333 /* Don't be calling ftrace ops now */
334 __unregister_ftrace_function(&ftrace_shutdown_ops);
335
336 start = now(raw_smp_processor_id());
337 ftrace_update_cnt = 0;
338
339 /* No locks needed, the machine is stopped! */
340 for (i = 0; i < FTRACE_HASHSIZE; i++) {
341 if (hlist_empty(&ftrace_hash[i]))
342 continue;
343
344 head = ftrace_hash[i];
345 INIT_HLIST_HEAD(&ftrace_hash[i]);
346
347 /* all CPUS are stopped, we are safe to modify code */
348 hlist_for_each_entry(p, t, &head, node) {
349 ftrace_code_disable(p);
350 ftrace_update_cnt++;
351 }
352
353 }
354
355 stop = now(raw_smp_processor_id());
356 ftrace_update_time = stop - start;
357 ftrace_update_tot_cnt += ftrace_update_cnt;
358
359 __register_ftrace_function(&ftrace_shutdown_ops);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200360
361 return 0;
362}
363
Steven Rostedt3d083392008-05-12 21:20:42 +0200364static void notrace ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200365{
Steven Rostedt3d083392008-05-12 21:20:42 +0200366 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
367}
368
369static int notrace ftraced(void *ignore)
370{
371 unsigned long usecs;
372
373 set_current_state(TASK_INTERRUPTIBLE);
374
375 while (!kthread_should_stop()) {
376
377 /* check once a second */
378 schedule_timeout(HZ);
379
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200380 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200381 mutex_lock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200382 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200383 ftrace_record_suspend++;
384 ftrace_update_code();
385 usecs = nsecs_to_usecs(ftrace_update_time);
386 if (ftrace_update_tot_cnt > 100000) {
387 ftrace_update_tot_cnt = 0;
388 pr_info("hm, dftrace overflow: %lu change%s"
389 " (%lu total) in %lu usec%s\n",
390 ftrace_update_cnt,
391 ftrace_update_cnt != 1 ? "s" : "",
392 ftrace_update_tot_cnt,
393 usecs, usecs != 1 ? "s" : "");
394 WARN_ON_ONCE(1);
395 }
396 ftraced_trigger = 0;
397 ftrace_record_suspend--;
398 }
399 mutex_unlock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200400 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200401
402 ftrace_shutdown_replenish();
403
404 set_current_state(TASK_INTERRUPTIBLE);
405 }
406 __set_current_state(TASK_RUNNING);
407 return 0;
408}
409
410static int __init notrace ftrace_shutdown_init(void)
411{
412 struct task_struct *p;
413 int ret;
414
415 ret = ftrace_shutdown_arch_init();
416 if (ret)
417 return ret;
418
419 p = kthread_run(ftraced, NULL, "ftraced");
420 if (IS_ERR(p))
421 return -1;
422
423 __register_ftrace_function(&ftrace_shutdown_ops);
424
425 return 0;
426}
427
428core_initcall(ftrace_shutdown_init);
429#else
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200430# define ftrace_startup() do { } while (0)
431# define ftrace_shutdown() do { } while (0)
432# define ftrace_startup_sysctl() do { } while (0)
433# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +0200434#endif /* CONFIG_DYNAMIC_FTRACE */
435
436/**
437 * register_ftrace_function - register a function for profiling
438 * @ops - ops structure that holds the function for profiling.
439 *
440 * Register a function to be called by all functions in the
441 * kernel.
442 *
443 * Note: @ops->func and all the functions it calls must be labeled
444 * with "notrace", otherwise it will go into a
445 * recursive loop.
446 */
447int register_ftrace_function(struct ftrace_ops *ops)
448{
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200449 int ret;
450
451 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200452 ftrace_startup();
453
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200454 ret = __register_ftrace_function(ops);
455 mutex_unlock(&ftrace_sysctl_lock);
456
457 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +0200458}
459
460/**
461 * unregister_ftrace_function - unresgister a function for profiling.
462 * @ops - ops structure that holds the function to unregister
463 *
464 * Unregister a function that was added to be called by ftrace profiling.
465 */
466int unregister_ftrace_function(struct ftrace_ops *ops)
467{
468 int ret;
469
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200470 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200471 ret = __unregister_ftrace_function(ops);
472
473 if (ftrace_list == &ftrace_list_end)
474 ftrace_shutdown();
475
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200476 mutex_unlock(&ftrace_sysctl_lock);
477
478 return ret;
479}
480
481notrace int
482ftrace_enable_sysctl(struct ctl_table *table, int write,
483 struct file *filp, void __user *buffer, size_t *lenp,
484 loff_t *ppos)
485{
486 int ret;
487
488 mutex_lock(&ftrace_sysctl_lock);
489
490 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
491
492 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
493 goto out;
494
495 last_ftrace_enabled = ftrace_enabled;
496
497 if (ftrace_enabled) {
498
499 ftrace_startup_sysctl();
500
501 /* we are starting ftrace again */
502 if (ftrace_list != &ftrace_list_end) {
503 if (ftrace_list->next == &ftrace_list_end)
504 ftrace_trace_function = ftrace_list->func;
505 else
506 ftrace_trace_function = ftrace_list_func;
507 }
508
509 } else {
510 /* stopping ftrace calls (just send to ftrace_stub) */
511 ftrace_trace_function = ftrace_stub;
512
513 ftrace_shutdown_sysctl();
514 }
515
516 out:
517 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200518 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200519}