blob: 00d98c65fad0249659ed1a0da4e008e03b69db15 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053024#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020028#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020029
Abhishek Sagar395a59d2008-06-21 23:47:27 +053030#include <asm/ftrace.h>
31
Steven Rostedt3d083392008-05-12 21:20:42 +020032#include "trace.h"
33
Steven Rostedt6912896e2008-10-23 09:33:03 -040034#define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40#define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
Steven Rostedt4eebcc82008-05-12 21:20:48 +020046/* ftrace_enabled is a method to turn ftrace on or off */
47int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020048static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020049
Steven Rostedtdf4fc312008-11-26 00:16:23 -050050/* ftrace_pid_trace >= 0 will only trace threads with this pid */
51static int ftrace_pid_trace = -1;
52
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050053/* Quick disabling of function tracer. */
54int function_trace_stop;
55
Frederic Weisbeckere7d37372008-11-16 06:02:06 +010056/* By default, current tracing type is normal tracing. */
57enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
58
Steven Rostedt4eebcc82008-05-12 21:20:48 +020059/*
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
62 */
63static int ftrace_disabled __read_mostly;
64
Steven Rostedt3d083392008-05-12 21:20:42 +020065static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020066static DEFINE_MUTEX(ftrace_sysctl_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -050067static DEFINE_MUTEX(ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020068
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020069static struct ftrace_ops ftrace_list_end __read_mostly =
70{
71 .func = ftrace_stub,
72};
73
74static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
75ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050076ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050077ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078
Ingo Molnarf2252932008-05-22 10:37:48 +020079static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020080{
81 struct ftrace_ops *op = ftrace_list;
82
83 /* in case someone actually ports this to alpha! */
84 read_barrier_depends();
85
86 while (op != &ftrace_list_end) {
87 /* silly alpha */
88 read_barrier_depends();
89 op->func(ip, parent_ip);
90 op = op->next;
91 };
92}
93
Steven Rostedtdf4fc312008-11-26 00:16:23 -050094static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
95{
96 if (current->pid != ftrace_pid_trace)
97 return;
98
99 ftrace_pid_function(ip, parent_ip);
100}
101
102static void set_ftrace_pid_function(ftrace_func_t func)
103{
104 /* do not set ftrace_pid_function to itself! */
105 if (func != ftrace_pid_func)
106 ftrace_pid_function = func;
107}
108
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200109/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200110 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200111 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200112 * This NULLs the ftrace function and in essence stops
113 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200114 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200115void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200116{
Steven Rostedt3d083392008-05-12 21:20:42 +0200117 ftrace_trace_function = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500118 __ftrace_trace_function = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500119 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200120}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200121
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500122#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
123/*
124 * For those archs that do not test ftrace_trace_stop in their
125 * mcount call site, we need to do it from C.
126 */
127static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
128{
129 if (function_trace_stop)
130 return;
131
132 __ftrace_trace_function(ip, parent_ip);
133}
134#endif
135
Ingo Molnare309b412008-05-12 21:20:51 +0200136static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200137{
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400138 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200139 spin_lock(&ftrace_lock);
140
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200141 ops->next = ftrace_list;
142 /*
143 * We are entering ops into the ftrace_list but another
144 * CPU might be walking that list. We need to make sure
145 * the ops->next pointer is valid before another CPU sees
146 * the ops pointer included into the ftrace_list.
147 */
148 smp_wmb();
149 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +0200150
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200151 if (ftrace_enabled) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500152 ftrace_func_t func;
153
154 if (ops->next == &ftrace_list_end)
155 func = ops->func;
156 else
157 func = ftrace_list_func;
158
159 if (ftrace_pid_trace >= 0) {
160 set_ftrace_pid_function(func);
161 func = ftrace_pid_func;
162 }
163
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200164 /*
165 * For one func, simply call it directly.
166 * For more than one func, call the chain.
167 */
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500168#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500169 ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500170#else
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500171 __ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500172 ftrace_trace_function = ftrace_test_stop_func;
173#endif
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200174 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200175
176 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200177
178 return 0;
179}
180
Ingo Molnare309b412008-05-12 21:20:51 +0200181static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200182{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200183 struct ftrace_ops **p;
184 int ret = 0;
185
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400186 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200187 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200188
189 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200190 * If we are removing the last function, then simply point
191 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200192 */
193 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
194 ftrace_trace_function = ftrace_stub;
195 ftrace_list = &ftrace_list_end;
196 goto out;
197 }
198
199 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
200 if (*p == ops)
201 break;
202
203 if (*p != ops) {
204 ret = -1;
205 goto out;
206 }
207
208 *p = (*p)->next;
209
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200210 if (ftrace_enabled) {
211 /* If we only have one func left, then call that directly */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500212 if (ftrace_list->next == &ftrace_list_end) {
213 ftrace_func_t func = ftrace_list->func;
214
215 if (ftrace_pid_trace >= 0) {
216 set_ftrace_pid_function(func);
217 func = ftrace_pid_func;
218 }
219#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
220 ftrace_trace_function = func;
221#else
222 __ftrace_trace_function = func;
223#endif
224 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200225 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200226
227 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200228 spin_unlock(&ftrace_lock);
229
230 return ret;
231}
232
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500233static void ftrace_update_pid_func(void)
234{
235 ftrace_func_t func;
236
237 /* should not be called from interrupt context */
238 spin_lock(&ftrace_lock);
239
240 if (ftrace_trace_function == ftrace_stub)
241 goto out;
242
243 func = ftrace_trace_function;
244
245 if (ftrace_pid_trace >= 0) {
246 set_ftrace_pid_function(func);
247 func = ftrace_pid_func;
248 } else {
249 if (func != ftrace_pid_func)
250 goto out;
251
252 set_ftrace_pid_function(func);
253 }
254
255#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
256 ftrace_trace_function = func;
257#else
258 __ftrace_trace_function = func;
259#endif
260
261 out:
262 spin_unlock(&ftrace_lock);
263}
264
Steven Rostedt3d083392008-05-12 21:20:42 +0200265#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400266#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400267# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400268#endif
269
Steven Noonan71c67d52008-09-20 01:00:37 -0700270/*
271 * Since MCOUNT_ADDR may point to mcount itself, we do not want
272 * to get it confused by reading a reference in the code as we
273 * are parsing on objcopy output of text. Use a variable for
274 * it instead.
275 */
276static unsigned long mcount_addr = MCOUNT_ADDR;
277
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200278enum {
279 FTRACE_ENABLE_CALLS = (1 << 0),
280 FTRACE_DISABLE_CALLS = (1 << 1),
281 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
282 FTRACE_ENABLE_MCOUNT = (1 << 3),
283 FTRACE_DISABLE_MCOUNT = (1 << 4),
284};
285
Steven Rostedt5072c592008-05-12 21:20:43 +0200286static int ftrace_filtered;
287
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400288static LIST_HEAD(ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200289
Steven Rostedt41c52c02008-05-22 11:46:33 -0400290static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200291
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200292struct ftrace_page {
293 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700294 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200295 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700296};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200297
298#define ENTRIES_PER_PAGE \
299 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
300
301/* estimate from running different kernels */
302#define NR_TO_INIT 10000
303
304static struct ftrace_page *ftrace_pages_start;
305static struct ftrace_page *ftrace_pages;
306
Steven Rostedt37ad5082008-05-12 21:20:48 +0200307static struct dyn_ftrace *ftrace_free_records;
308
Abhishek Sagarecea6562008-06-21 23:47:53 +0530309
310#ifdef CONFIG_KPROBES
Ingo Molnarf17845e2008-10-24 12:47:10 +0200311
312static int frozen_record_count;
313
Abhishek Sagarecea6562008-06-21 23:47:53 +0530314static inline void freeze_record(struct dyn_ftrace *rec)
315{
316 if (!(rec->flags & FTRACE_FL_FROZEN)) {
317 rec->flags |= FTRACE_FL_FROZEN;
318 frozen_record_count++;
319 }
320}
321
322static inline void unfreeze_record(struct dyn_ftrace *rec)
323{
324 if (rec->flags & FTRACE_FL_FROZEN) {
325 rec->flags &= ~FTRACE_FL_FROZEN;
326 frozen_record_count--;
327 }
328}
329
330static inline int record_frozen(struct dyn_ftrace *rec)
331{
332 return rec->flags & FTRACE_FL_FROZEN;
333}
334#else
335# define freeze_record(rec) ({ 0; })
336# define unfreeze_record(rec) ({ 0; })
337# define record_frozen(rec) ({ 0; })
338#endif /* CONFIG_KPROBES */
339
Ingo Molnare309b412008-05-12 21:20:51 +0200340static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200341{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200342 rec->ip = (unsigned long)ftrace_free_records;
343 ftrace_free_records = rec;
344 rec->flags |= FTRACE_FL_FREE;
345}
346
Steven Rostedtfed19392008-08-14 22:47:19 -0400347void ftrace_release(void *start, unsigned long size)
348{
349 struct dyn_ftrace *rec;
350 struct ftrace_page *pg;
351 unsigned long s = (unsigned long)start;
352 unsigned long e = s + size;
353 int i;
354
Steven Rostedt00fd61a2008-08-15 21:40:04 -0400355 if (ftrace_disabled || !start)
Steven Rostedtfed19392008-08-14 22:47:19 -0400356 return;
357
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400358 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -0400359 spin_lock(&ftrace_lock);
360
361 for (pg = ftrace_pages_start; pg; pg = pg->next) {
362 for (i = 0; i < pg->index; i++) {
363 rec = &pg->records[i];
364
365 if ((rec->ip >= s) && (rec->ip < e))
366 ftrace_free_rec(rec);
367 }
368 }
369 spin_unlock(&ftrace_lock);
Steven Rostedtfed19392008-08-14 22:47:19 -0400370}
371
Ingo Molnare309b412008-05-12 21:20:51 +0200372static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200373{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200374 struct dyn_ftrace *rec;
375
376 /* First check for freed records */
377 if (ftrace_free_records) {
378 rec = ftrace_free_records;
379
Steven Rostedt37ad5082008-05-12 21:20:48 +0200380 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
Steven Rostedt6912896e2008-10-23 09:33:03 -0400381 FTRACE_WARN_ON_ONCE(1);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200382 ftrace_free_records = NULL;
383 return NULL;
384 }
385
386 ftrace_free_records = (void *)rec->ip;
387 memset(rec, 0, sizeof(*rec));
388 return rec;
389 }
390
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200391 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400392 if (!ftrace_pages->next) {
393 /* allocate another page */
394 ftrace_pages->next =
395 (void *)get_zeroed_page(GFP_KERNEL);
396 if (!ftrace_pages->next)
397 return NULL;
398 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200399 ftrace_pages = ftrace_pages->next;
400 }
401
402 return &ftrace_pages->records[ftrace_pages->index++];
403}
404
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400405static struct dyn_ftrace *
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200406ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200407{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400408 struct dyn_ftrace *rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200409
Steven Rostedtf3c7ac42008-11-14 16:21:19 -0800410 if (ftrace_disabled)
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400411 return NULL;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200412
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400413 rec = ftrace_alloc_dyn_node(ip);
414 if (!rec)
415 return NULL;
Steven Rostedt3d083392008-05-12 21:20:42 +0200416
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400417 rec->ip = ip;
Steven Rostedt3d083392008-05-12 21:20:42 +0200418
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400419 list_add(&rec->list, &ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200420
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400421 return rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200422}
423
Steven Rostedt05736a42008-09-22 14:55:47 -0700424static void print_ip_ins(const char *fmt, unsigned char *p)
425{
426 int i;
427
428 printk(KERN_CONT "%s", fmt);
429
430 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
431 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
432}
433
Steven Rostedt31e88902008-11-14 16:21:19 -0800434static void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800435{
436 switch (failed) {
437 case -EFAULT:
438 FTRACE_WARN_ON_ONCE(1);
439 pr_info("ftrace faulted on modifying ");
440 print_ip_sym(ip);
441 break;
442 case -EINVAL:
443 FTRACE_WARN_ON_ONCE(1);
444 pr_info("ftrace failed to modify ");
445 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800446 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800447 printk(KERN_CONT "\n");
448 break;
449 case -EPERM:
450 FTRACE_WARN_ON_ONCE(1);
451 pr_info("ftrace faulted on writing ");
452 print_ip_sym(ip);
453 break;
454 default:
455 FTRACE_WARN_ON_ONCE(1);
456 pr_info("ftrace faulted on unknown error ");
457 print_ip_sym(ip);
458 }
459}
460
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200461
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530462static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800463__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200464{
Ingo Molnare309b412008-05-12 21:20:51 +0200465 unsigned long ip, fl;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100466 unsigned long ftrace_addr;
467
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100468#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100469 if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
470 ftrace_addr = (unsigned long)ftrace_caller;
471 else
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100472 ftrace_addr = (unsigned long)ftrace_graph_caller;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100473#else
474 ftrace_addr = (unsigned long)ftrace_caller;
475#endif
Steven Rostedt5072c592008-05-12 21:20:43 +0200476
477 ip = rec->ip;
478
Steven Rostedt982c3502008-11-15 16:31:41 -0500479 /*
480 * If this record is not to be traced and
481 * it is not enabled then do nothing.
482 *
483 * If this record is not to be traced and
484 * it is enabled then disabled it.
485 *
486 */
487 if (rec->flags & FTRACE_FL_NOTRACE) {
488 if (rec->flags & FTRACE_FL_ENABLED)
489 rec->flags &= ~FTRACE_FL_ENABLED;
490 else
Steven Rostedt5072c592008-05-12 21:20:43 +0200491 return 0;
492
Steven Rostedt982c3502008-11-15 16:31:41 -0500493 } else if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200494 /*
Steven Rostedt982c3502008-11-15 16:31:41 -0500495 * Filtering is on:
Steven Rostedt5072c592008-05-12 21:20:43 +0200496 */
Steven Rostedt982c3502008-11-15 16:31:41 -0500497
498 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
499
500 /* Record is filtered and enabled, do nothing */
501 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
502 return 0;
503
504 /* Record is not filtered and is not enabled do nothing */
505 if (!fl)
506 return 0;
507
508 /* Record is not filtered but enabled, disable it */
509 if (fl == FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200510 rec->flags &= ~FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500511 else
512 /* Otherwise record is filtered but not enabled, enable it */
Steven Rostedt5072c592008-05-12 21:20:43 +0200513 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +0200514 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500515 /* Disable or not filtered */
Steven Rostedt5072c592008-05-12 21:20:43 +0200516
517 if (enable) {
Steven Rostedt982c3502008-11-15 16:31:41 -0500518 /* if record is enabled, do nothing */
Steven Rostedt41c52c02008-05-22 11:46:33 -0400519 if (rec->flags & FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200520 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500521
Steven Rostedt41c52c02008-05-22 11:46:33 -0400522 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500523
Steven Rostedt5072c592008-05-12 21:20:43 +0200524 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500525
526 /* if record is not enabled do nothing */
Steven Rostedt5072c592008-05-12 21:20:43 +0200527 if (!(rec->flags & FTRACE_FL_ENABLED))
528 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500529
Steven Rostedt5072c592008-05-12 21:20:43 +0200530 rec->flags &= ~FTRACE_FL_ENABLED;
531 }
532 }
533
Steven Rostedt982c3502008-11-15 16:31:41 -0500534 if (rec->flags & FTRACE_FL_ENABLED)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100535 return ftrace_make_call(rec, ftrace_addr);
Steven Rostedt31e88902008-11-14 16:21:19 -0800536 else
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100537 return ftrace_make_nop(NULL, rec, ftrace_addr);
Steven Rostedt5072c592008-05-12 21:20:43 +0200538}
539
540static void ftrace_replace_code(int enable)
541{
542 int i, failed;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200543 struct dyn_ftrace *rec;
544 struct ftrace_page *pg;
545
Steven Rostedt37ad5082008-05-12 21:20:48 +0200546 for (pg = ftrace_pages_start; pg; pg = pg->next) {
547 for (i = 0; i < pg->index; i++) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200548 rec = &pg->records[i];
549
Steven Rostedt918c1152008-11-14 16:21:19 -0800550 /*
551 * Skip over free records and records that have
552 * failed.
553 */
554 if (rec->flags & FTRACE_FL_FREE ||
555 rec->flags & FTRACE_FL_FAILED)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200556 continue;
557
558 /* ignore updates to this record's mcount site */
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200559 if (get_kprobe((void *)rec->ip)) {
560 freeze_record(rec);
Steven Rostedt5072c592008-05-12 21:20:43 +0200561 continue;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200562 } else {
563 unfreeze_record(rec);
564 }
565
Steven Rostedt31e88902008-11-14 16:21:19 -0800566 failed = __ftrace_replace_code(rec, enable);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200567 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
568 rec->flags |= FTRACE_FL_FAILED;
569 if ((system_state == SYSTEM_BOOTING) ||
570 !core_kernel_text(rec->ip)) {
571 ftrace_free_rec(rec);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800572 } else
Steven Rostedt31e88902008-11-14 16:21:19 -0800573 ftrace_bug(failed, rec->ip);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200574 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200575 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200576 }
577}
578
Ingo Molnare309b412008-05-12 21:20:51 +0200579static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800580ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200581{
582 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400583 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200584
585 ip = rec->ip;
586
Steven Rostedt31e88902008-11-14 16:21:19 -0800587 ret = ftrace_make_nop(mod, rec, mcount_addr);
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400588 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -0800589 ftrace_bug(ret, ip);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200590 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530591 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200592 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530593 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200594}
595
Ingo Molnare309b412008-05-12 21:20:51 +0200596static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200597{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200598 int *command = data;
599
Steven Rostedta3583242008-11-11 15:01:42 -0500600 if (*command & FTRACE_ENABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200601 ftrace_replace_code(1);
Steven Rostedta3583242008-11-11 15:01:42 -0500602 else if (*command & FTRACE_DISABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200603 ftrace_replace_code(0);
604
605 if (*command & FTRACE_UPDATE_TRACE_FUNC)
606 ftrace_update_ftrace_func(ftrace_trace_function);
607
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200608 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200609}
610
Ingo Molnare309b412008-05-12 21:20:51 +0200611static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200612{
Rusty Russell784e2d72008-07-28 12:16:31 -0500613 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt3d083392008-05-12 21:20:42 +0200614}
615
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200616static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500617static int ftrace_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500618
619static void ftrace_startup_enable(int command)
620{
621 if (saved_ftrace_func != ftrace_trace_function) {
622 saved_ftrace_func = ftrace_trace_function;
623 command |= FTRACE_UPDATE_TRACE_FUNC;
624 }
625
626 if (!command || !ftrace_enabled)
627 return;
628
629 ftrace_run_update_code(command);
630}
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200631
Ingo Molnare309b412008-05-12 21:20:51 +0200632static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200633{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200634 int command = 0;
635
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200636 if (unlikely(ftrace_disabled))
637 return;
638
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400639 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500640 ftrace_start_up++;
Steven Rostedt982c3502008-11-15 16:31:41 -0500641 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200642
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500643 ftrace_startup_enable(command);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200644
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400645 mutex_unlock(&ftrace_start_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200646}
647
Ingo Molnare309b412008-05-12 21:20:51 +0200648static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200649{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200650 int command = 0;
651
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200652 if (unlikely(ftrace_disabled))
653 return;
654
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400655 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500656 ftrace_start_up--;
657 if (!ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200658 command |= FTRACE_DISABLE_CALLS;
659
660 if (saved_ftrace_func != ftrace_trace_function) {
661 saved_ftrace_func = ftrace_trace_function;
662 command |= FTRACE_UPDATE_TRACE_FUNC;
663 }
664
665 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200666 goto out;
667
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200668 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200669 out:
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400670 mutex_unlock(&ftrace_start_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200671}
672
Ingo Molnare309b412008-05-12 21:20:51 +0200673static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200674{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200675 int command = FTRACE_ENABLE_MCOUNT;
676
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200677 if (unlikely(ftrace_disabled))
678 return;
679
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400680 mutex_lock(&ftrace_start_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200681 /* Force update next time */
682 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500683 /* ftrace_start_up is true if we want ftrace running */
684 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200685 command |= FTRACE_ENABLE_CALLS;
686
687 ftrace_run_update_code(command);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400688 mutex_unlock(&ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200689}
690
Ingo Molnare309b412008-05-12 21:20:51 +0200691static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200692{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200693 int command = FTRACE_DISABLE_MCOUNT;
694
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200695 if (unlikely(ftrace_disabled))
696 return;
697
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400698 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500699 /* ftrace_start_up is true if ftrace is running */
700 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200701 command |= FTRACE_DISABLE_CALLS;
702
703 ftrace_run_update_code(command);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400704 mutex_unlock(&ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200705}
706
Steven Rostedt3d083392008-05-12 21:20:42 +0200707static cycle_t ftrace_update_time;
708static unsigned long ftrace_update_cnt;
709unsigned long ftrace_update_tot_cnt;
710
Steven Rostedt31e88902008-11-14 16:21:19 -0800711static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +0200712{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400713 struct dyn_ftrace *p, *t;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530714 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200715
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200716 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200717 ftrace_update_cnt = 0;
718
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400719 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530720
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400721 /* If something went wrong, bail without enabling anything */
722 if (unlikely(ftrace_disabled))
723 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200724
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400725 list_del_init(&p->list);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530726
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400727 /* convert record (i.e, patch mcount-call with NOP) */
Steven Rostedt31e88902008-11-14 16:21:19 -0800728 if (ftrace_code_disable(mod, p)) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400729 p->flags |= FTRACE_FL_CONVERTED;
730 ftrace_update_cnt++;
731 } else
732 ftrace_free_rec(p);
Steven Rostedt3d083392008-05-12 21:20:42 +0200733 }
734
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200735 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200736 ftrace_update_time = stop - start;
737 ftrace_update_tot_cnt += ftrace_update_cnt;
738
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200739 return 0;
740}
741
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400742static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200743{
744 struct ftrace_page *pg;
745 int cnt;
746 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200747
748 /* allocate a few pages */
749 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
750 if (!ftrace_pages_start)
751 return -1;
752
753 /*
754 * Allocate a few more pages.
755 *
756 * TODO: have some parser search vmlinux before
757 * final linking to find all calls to ftrace.
758 * Then we can:
759 * a) know how many pages to allocate.
760 * and/or
761 * b) set up the table then.
762 *
763 * The dynamic code is still necessary for
764 * modules.
765 */
766
767 pg = ftrace_pages = ftrace_pages_start;
768
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400769 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400770 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +0800771 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200772
773 for (i = 0; i < cnt; i++) {
774 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
775
776 /* If we fail, we'll try later anyway */
777 if (!pg->next)
778 break;
779
780 pg = pg->next;
781 }
782
783 return 0;
784}
785
Steven Rostedt5072c592008-05-12 21:20:43 +0200786enum {
787 FTRACE_ITER_FILTER = (1 << 0),
788 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400789 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530790 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt5072c592008-05-12 21:20:43 +0200791};
792
793#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
794
795struct ftrace_iterator {
796 loff_t pos;
797 struct ftrace_page *pg;
798 unsigned idx;
799 unsigned flags;
800 unsigned char buffer[FTRACE_BUFF_MAX+1];
801 unsigned buffer_idx;
802 unsigned filtered;
803};
804
Ingo Molnare309b412008-05-12 21:20:51 +0200805static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200806t_next(struct seq_file *m, void *v, loff_t *pos)
807{
808 struct ftrace_iterator *iter = m->private;
809 struct dyn_ftrace *rec = NULL;
810
811 (*pos)++;
812
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400813 /* should not be called from interrupt context */
814 spin_lock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200815 retry:
816 if (iter->idx >= iter->pg->index) {
817 if (iter->pg->next) {
818 iter->pg = iter->pg->next;
819 iter->idx = 0;
820 goto retry;
821 }
822 } else {
823 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400824 if ((rec->flags & FTRACE_FL_FREE) ||
825
826 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530827 (rec->flags & FTRACE_FL_FAILED)) ||
828
829 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400830 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530831
Steven Rostedt0183fb12008-11-07 22:36:02 -0500832 ((iter->flags & FTRACE_ITER_FILTER) &&
833 !(rec->flags & FTRACE_FL_FILTER)) ||
834
Steven Rostedt41c52c02008-05-22 11:46:33 -0400835 ((iter->flags & FTRACE_ITER_NOTRACE) &&
836 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200837 rec = NULL;
838 goto retry;
839 }
840 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400841 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200842
843 iter->pos = *pos;
844
845 return rec;
846}
847
848static void *t_start(struct seq_file *m, loff_t *pos)
849{
850 struct ftrace_iterator *iter = m->private;
851 void *p = NULL;
852 loff_t l = -1;
853
walimis5821e1b2008-11-15 15:19:06 +0800854 if (*pos > iter->pos)
855 *pos = iter->pos;
856
857 l = *pos;
858 p = t_next(m, p, &l);
Steven Rostedt5072c592008-05-12 21:20:43 +0200859
860 return p;
861}
862
863static void t_stop(struct seq_file *m, void *p)
864{
865}
866
867static int t_show(struct seq_file *m, void *v)
868{
walimis5821e1b2008-11-15 15:19:06 +0800869 struct ftrace_iterator *iter = m->private;
Steven Rostedt5072c592008-05-12 21:20:43 +0200870 struct dyn_ftrace *rec = v;
871 char str[KSYM_SYMBOL_LEN];
walimis5821e1b2008-11-15 15:19:06 +0800872 int ret = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200873
874 if (!rec)
875 return 0;
876
877 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
878
walimis5821e1b2008-11-15 15:19:06 +0800879 ret = seq_printf(m, "%s\n", str);
880 if (ret < 0) {
881 iter->pos--;
882 iter->idx--;
883 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200884
885 return 0;
886}
887
888static struct seq_operations show_ftrace_seq_ops = {
889 .start = t_start,
890 .next = t_next,
891 .stop = t_stop,
892 .show = t_show,
893};
894
Ingo Molnare309b412008-05-12 21:20:51 +0200895static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200896ftrace_avail_open(struct inode *inode, struct file *file)
897{
898 struct ftrace_iterator *iter;
899 int ret;
900
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200901 if (unlikely(ftrace_disabled))
902 return -ENODEV;
903
Steven Rostedt5072c592008-05-12 21:20:43 +0200904 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
905 if (!iter)
906 return -ENOMEM;
907
908 iter->pg = ftrace_pages_start;
walimis5821e1b2008-11-15 15:19:06 +0800909 iter->pos = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200910
911 ret = seq_open(file, &show_ftrace_seq_ops);
912 if (!ret) {
913 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200914
Steven Rostedt5072c592008-05-12 21:20:43 +0200915 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200916 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200917 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200918 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200919
920 return ret;
921}
922
923int ftrace_avail_release(struct inode *inode, struct file *file)
924{
925 struct seq_file *m = (struct seq_file *)file->private_data;
926 struct ftrace_iterator *iter = m->private;
927
928 seq_release(inode, file);
929 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200930
Steven Rostedt5072c592008-05-12 21:20:43 +0200931 return 0;
932}
933
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530934static int
935ftrace_failures_open(struct inode *inode, struct file *file)
936{
937 int ret;
938 struct seq_file *m;
939 struct ftrace_iterator *iter;
940
941 ret = ftrace_avail_open(inode, file);
942 if (!ret) {
943 m = (struct seq_file *)file->private_data;
944 iter = (struct ftrace_iterator *)m->private;
945 iter->flags = FTRACE_ITER_FAILURES;
946 }
947
948 return ret;
949}
950
951
Steven Rostedt41c52c02008-05-22 11:46:33 -0400952static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200953{
954 struct ftrace_page *pg;
955 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400956 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200957 unsigned i;
958
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400959 /* should not be called from interrupt context */
960 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400961 if (enable)
962 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200963 pg = ftrace_pages_start;
964 while (pg) {
965 for (i = 0; i < pg->index; i++) {
966 rec = &pg->records[i];
967 if (rec->flags & FTRACE_FL_FAILED)
968 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400969 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +0200970 }
971 pg = pg->next;
972 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400973 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200974}
975
Ingo Molnare309b412008-05-12 21:20:51 +0200976static int
Steven Rostedt41c52c02008-05-22 11:46:33 -0400977ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200978{
979 struct ftrace_iterator *iter;
980 int ret = 0;
981
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200982 if (unlikely(ftrace_disabled))
983 return -ENODEV;
984
Steven Rostedt5072c592008-05-12 21:20:43 +0200985 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
986 if (!iter)
987 return -ENOMEM;
988
Steven Rostedt41c52c02008-05-22 11:46:33 -0400989 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200990 if ((file->f_mode & FMODE_WRITE) &&
991 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -0400992 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +0200993
994 if (file->f_mode & FMODE_READ) {
995 iter->pg = ftrace_pages_start;
walimis5821e1b2008-11-15 15:19:06 +0800996 iter->pos = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400997 iter->flags = enable ? FTRACE_ITER_FILTER :
998 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200999
1000 ret = seq_open(file, &show_ftrace_seq_ops);
1001 if (!ret) {
1002 struct seq_file *m = file->private_data;
1003 m->private = iter;
1004 } else
1005 kfree(iter);
1006 } else
1007 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001008 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001009
1010 return ret;
1011}
1012
Steven Rostedt41c52c02008-05-22 11:46:33 -04001013static int
1014ftrace_filter_open(struct inode *inode, struct file *file)
1015{
1016 return ftrace_regex_open(inode, file, 1);
1017}
1018
1019static int
1020ftrace_notrace_open(struct inode *inode, struct file *file)
1021{
1022 return ftrace_regex_open(inode, file, 0);
1023}
1024
Ingo Molnare309b412008-05-12 21:20:51 +02001025static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001026ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001027 size_t cnt, loff_t *ppos)
1028{
1029 if (file->f_mode & FMODE_READ)
1030 return seq_read(file, ubuf, cnt, ppos);
1031 else
1032 return -EPERM;
1033}
1034
Ingo Molnare309b412008-05-12 21:20:51 +02001035static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001036ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001037{
1038 loff_t ret;
1039
1040 if (file->f_mode & FMODE_READ)
1041 ret = seq_lseek(file, offset, origin);
1042 else
1043 file->f_pos = ret = 1;
1044
1045 return ret;
1046}
1047
1048enum {
1049 MATCH_FULL,
1050 MATCH_FRONT_ONLY,
1051 MATCH_MIDDLE_ONLY,
1052 MATCH_END_ONLY,
1053};
1054
Ingo Molnare309b412008-05-12 21:20:51 +02001055static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001056ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001057{
1058 char str[KSYM_SYMBOL_LEN];
1059 char *search = NULL;
1060 struct ftrace_page *pg;
1061 struct dyn_ftrace *rec;
1062 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001063 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001064 unsigned i, match = 0, search_len = 0;
1065
1066 for (i = 0; i < len; i++) {
1067 if (buff[i] == '*') {
1068 if (!i) {
1069 search = buff + i + 1;
1070 type = MATCH_END_ONLY;
1071 search_len = len - (i + 1);
1072 } else {
1073 if (type == MATCH_END_ONLY) {
1074 type = MATCH_MIDDLE_ONLY;
1075 } else {
1076 match = i;
1077 type = MATCH_FRONT_ONLY;
1078 }
1079 buff[i] = 0;
1080 break;
1081 }
1082 }
1083 }
1084
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001085 /* should not be called from interrupt context */
1086 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001087 if (enable)
1088 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001089 pg = ftrace_pages_start;
1090 while (pg) {
1091 for (i = 0; i < pg->index; i++) {
1092 int matched = 0;
1093 char *ptr;
1094
1095 rec = &pg->records[i];
1096 if (rec->flags & FTRACE_FL_FAILED)
1097 continue;
1098 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1099 switch (type) {
1100 case MATCH_FULL:
1101 if (strcmp(str, buff) == 0)
1102 matched = 1;
1103 break;
1104 case MATCH_FRONT_ONLY:
1105 if (memcmp(str, buff, match) == 0)
1106 matched = 1;
1107 break;
1108 case MATCH_MIDDLE_ONLY:
1109 if (strstr(str, search))
1110 matched = 1;
1111 break;
1112 case MATCH_END_ONLY:
1113 ptr = strstr(str, search);
1114 if (ptr && (ptr[search_len] == 0))
1115 matched = 1;
1116 break;
1117 }
1118 if (matched)
Steven Rostedt41c52c02008-05-22 11:46:33 -04001119 rec->flags |= flag;
Steven Rostedt5072c592008-05-12 21:20:43 +02001120 }
1121 pg = pg->next;
1122 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001123 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001124}
1125
Ingo Molnare309b412008-05-12 21:20:51 +02001126static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001127ftrace_regex_write(struct file *file, const char __user *ubuf,
1128 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001129{
1130 struct ftrace_iterator *iter;
1131 char ch;
1132 size_t read = 0;
1133 ssize_t ret;
1134
1135 if (!cnt || cnt < 0)
1136 return 0;
1137
Steven Rostedt41c52c02008-05-22 11:46:33 -04001138 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001139
1140 if (file->f_mode & FMODE_READ) {
1141 struct seq_file *m = file->private_data;
1142 iter = m->private;
1143 } else
1144 iter = file->private_data;
1145
1146 if (!*ppos) {
1147 iter->flags &= ~FTRACE_ITER_CONT;
1148 iter->buffer_idx = 0;
1149 }
1150
1151 ret = get_user(ch, ubuf++);
1152 if (ret)
1153 goto out;
1154 read++;
1155 cnt--;
1156
1157 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1158 /* skip white space */
1159 while (cnt && isspace(ch)) {
1160 ret = get_user(ch, ubuf++);
1161 if (ret)
1162 goto out;
1163 read++;
1164 cnt--;
1165 }
1166
Steven Rostedt5072c592008-05-12 21:20:43 +02001167 if (isspace(ch)) {
1168 file->f_pos += read;
1169 ret = read;
1170 goto out;
1171 }
1172
1173 iter->buffer_idx = 0;
1174 }
1175
1176 while (cnt && !isspace(ch)) {
1177 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1178 iter->buffer[iter->buffer_idx++] = ch;
1179 else {
1180 ret = -EINVAL;
1181 goto out;
1182 }
1183 ret = get_user(ch, ubuf++);
1184 if (ret)
1185 goto out;
1186 read++;
1187 cnt--;
1188 }
1189
1190 if (isspace(ch)) {
1191 iter->filtered++;
1192 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001193 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001194 iter->buffer_idx = 0;
1195 } else
1196 iter->flags |= FTRACE_ITER_CONT;
1197
1198
1199 file->f_pos += read;
1200
1201 ret = read;
1202 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001203 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001204
1205 return ret;
1206}
1207
Steven Rostedt41c52c02008-05-22 11:46:33 -04001208static ssize_t
1209ftrace_filter_write(struct file *file, const char __user *ubuf,
1210 size_t cnt, loff_t *ppos)
1211{
1212 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1213}
1214
1215static ssize_t
1216ftrace_notrace_write(struct file *file, const char __user *ubuf,
1217 size_t cnt, loff_t *ppos)
1218{
1219 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1220}
1221
1222static void
1223ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1224{
1225 if (unlikely(ftrace_disabled))
1226 return;
1227
1228 mutex_lock(&ftrace_regex_lock);
1229 if (reset)
1230 ftrace_filter_reset(enable);
1231 if (buf)
1232 ftrace_match(buf, len, enable);
1233 mutex_unlock(&ftrace_regex_lock);
1234}
1235
Steven Rostedt77a2b372008-05-12 21:20:45 +02001236/**
1237 * ftrace_set_filter - set a function to filter on in ftrace
1238 * @buf - the string that holds the function filter text.
1239 * @len - the length of the string.
1240 * @reset - non zero to reset all filters before applying this filter.
1241 *
1242 * Filters denote which functions should be enabled when tracing is enabled.
1243 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1244 */
Ingo Molnare309b412008-05-12 21:20:51 +02001245void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001246{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001247 ftrace_set_regex(buf, len, reset, 1);
1248}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001249
Steven Rostedt41c52c02008-05-22 11:46:33 -04001250/**
1251 * ftrace_set_notrace - set a function to not trace in ftrace
1252 * @buf - the string that holds the function notrace text.
1253 * @len - the length of the string.
1254 * @reset - non zero to reset all filters before applying this filter.
1255 *
1256 * Notrace Filters denote which functions should not be enabled when tracing
1257 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1258 * for tracing.
1259 */
1260void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1261{
1262 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001263}
1264
Ingo Molnare309b412008-05-12 21:20:51 +02001265static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001266ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001267{
1268 struct seq_file *m = (struct seq_file *)file->private_data;
1269 struct ftrace_iterator *iter;
1270
Steven Rostedt41c52c02008-05-22 11:46:33 -04001271 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001272 if (file->f_mode & FMODE_READ) {
1273 iter = m->private;
1274
1275 seq_release(inode, file);
1276 } else
1277 iter = file->private_data;
1278
1279 if (iter->buffer_idx) {
1280 iter->filtered++;
1281 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001282 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001283 }
1284
1285 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001286 mutex_lock(&ftrace_start_lock);
Steven Rostedtee02a2e2008-11-15 16:31:41 -05001287 if (ftrace_start_up && ftrace_enabled)
Steven Rostedt5072c592008-05-12 21:20:43 +02001288 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001289 mutex_unlock(&ftrace_start_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001290 mutex_unlock(&ftrace_sysctl_lock);
1291
1292 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001293 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001294 return 0;
1295}
1296
Steven Rostedt41c52c02008-05-22 11:46:33 -04001297static int
1298ftrace_filter_release(struct inode *inode, struct file *file)
1299{
1300 return ftrace_regex_release(inode, file, 1);
1301}
1302
1303static int
1304ftrace_notrace_release(struct inode *inode, struct file *file)
1305{
1306 return ftrace_regex_release(inode, file, 0);
1307}
1308
Steven Rostedt5072c592008-05-12 21:20:43 +02001309static struct file_operations ftrace_avail_fops = {
1310 .open = ftrace_avail_open,
1311 .read = seq_read,
1312 .llseek = seq_lseek,
1313 .release = ftrace_avail_release,
1314};
1315
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301316static struct file_operations ftrace_failures_fops = {
1317 .open = ftrace_failures_open,
1318 .read = seq_read,
1319 .llseek = seq_lseek,
1320 .release = ftrace_avail_release,
1321};
1322
Steven Rostedt5072c592008-05-12 21:20:43 +02001323static struct file_operations ftrace_filter_fops = {
1324 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001325 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001326 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001327 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001328 .release = ftrace_filter_release,
1329};
1330
Steven Rostedt41c52c02008-05-22 11:46:33 -04001331static struct file_operations ftrace_notrace_fops = {
1332 .open = ftrace_notrace_open,
1333 .read = ftrace_regex_read,
1334 .write = ftrace_notrace_write,
1335 .llseek = ftrace_regex_lseek,
1336 .release = ftrace_notrace_release,
1337};
1338
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001339static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02001340{
Steven Rostedt5072c592008-05-12 21:20:43 +02001341 struct dentry *entry;
1342
Steven Rostedt5072c592008-05-12 21:20:43 +02001343 entry = debugfs_create_file("available_filter_functions", 0444,
1344 d_tracer, NULL, &ftrace_avail_fops);
1345 if (!entry)
1346 pr_warning("Could not create debugfs "
1347 "'available_filter_functions' entry\n");
1348
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301349 entry = debugfs_create_file("failures", 0444,
1350 d_tracer, NULL, &ftrace_failures_fops);
1351 if (!entry)
1352 pr_warning("Could not create debugfs 'failures' entry\n");
1353
Steven Rostedt5072c592008-05-12 21:20:43 +02001354 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1355 NULL, &ftrace_filter_fops);
1356 if (!entry)
1357 pr_warning("Could not create debugfs "
1358 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001359
1360 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1361 NULL, &ftrace_notrace_fops);
1362 if (!entry)
1363 pr_warning("Could not create debugfs "
1364 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001365
Steven Rostedt5072c592008-05-12 21:20:43 +02001366 return 0;
1367}
1368
Steven Rostedt31e88902008-11-14 16:21:19 -08001369static int ftrace_convert_nops(struct module *mod,
1370 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001371 unsigned long *end)
1372{
1373 unsigned long *p;
1374 unsigned long addr;
1375 unsigned long flags;
1376
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001377 mutex_lock(&ftrace_start_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001378 p = start;
1379 while (p < end) {
1380 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08001381 /*
1382 * Some architecture linkers will pad between
1383 * the different mcount_loc sections of different
1384 * object files to satisfy alignments.
1385 * Skip any NULL pointers.
1386 */
1387 if (!addr)
1388 continue;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001389 ftrace_record_ip(addr);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001390 }
1391
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001392 /* disable interrupts to prevent kstop machine */
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001393 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08001394 ftrace_update_code(mod);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001395 local_irq_restore(flags);
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001396 mutex_unlock(&ftrace_start_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001397
1398 return 0;
1399}
1400
Steven Rostedt31e88902008-11-14 16:21:19 -08001401void ftrace_init_module(struct module *mod,
1402 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04001403{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04001404 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04001405 return;
Steven Rostedt31e88902008-11-14 16:21:19 -08001406 ftrace_convert_nops(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04001407}
1408
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001409extern unsigned long __start_mcount_loc[];
1410extern unsigned long __stop_mcount_loc[];
1411
1412void __init ftrace_init(void)
1413{
1414 unsigned long count, addr, flags;
1415 int ret;
1416
1417 /* Keep the ftrace pointer to the stub */
1418 addr = (unsigned long)ftrace_stub;
1419
1420 local_irq_save(flags);
1421 ftrace_dyn_arch_init(&addr);
1422 local_irq_restore(flags);
1423
1424 /* ftrace_dyn_arch_init places the return code in addr */
1425 if (addr)
1426 goto failed;
1427
1428 count = __stop_mcount_loc - __start_mcount_loc;
1429
1430 ret = ftrace_dyn_table_alloc(count);
1431 if (ret)
1432 goto failed;
1433
1434 last_ftrace_enabled = ftrace_enabled = 1;
1435
Steven Rostedt31e88902008-11-14 16:21:19 -08001436 ret = ftrace_convert_nops(NULL,
1437 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001438 __stop_mcount_loc);
1439
1440 return;
1441 failed:
1442 ftrace_disabled = 1;
1443}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001444
Steven Rostedt3d083392008-05-12 21:20:42 +02001445#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01001446
1447static int __init ftrace_nodyn_init(void)
1448{
1449 ftrace_enabled = 1;
1450 return 0;
1451}
1452device_initcall(ftrace_nodyn_init);
1453
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001454static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1455static inline void ftrace_startup_enable(int command) { }
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001456# define ftrace_startup() do { } while (0)
1457# define ftrace_shutdown() do { } while (0)
1458# define ftrace_startup_sysctl() do { } while (0)
1459# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001460#endif /* CONFIG_DYNAMIC_FTRACE */
1461
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001462static ssize_t
1463ftrace_pid_read(struct file *file, char __user *ubuf,
1464 size_t cnt, loff_t *ppos)
1465{
1466 char buf[64];
1467 int r;
1468
1469 if (ftrace_pid_trace >= 0)
1470 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1471 else
1472 r = sprintf(buf, "no pid\n");
1473
1474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1475}
1476
1477static ssize_t
1478ftrace_pid_write(struct file *filp, const char __user *ubuf,
1479 size_t cnt, loff_t *ppos)
1480{
1481 char buf[64];
1482 long val;
1483 int ret;
1484
1485 if (cnt >= sizeof(buf))
1486 return -EINVAL;
1487
1488 if (copy_from_user(&buf, ubuf, cnt))
1489 return -EFAULT;
1490
1491 buf[cnt] = 0;
1492
1493 ret = strict_strtol(buf, 10, &val);
1494 if (ret < 0)
1495 return ret;
1496
1497 mutex_lock(&ftrace_start_lock);
1498 if (ret < 0) {
1499 /* disable pid tracing */
1500 if (ftrace_pid_trace < 0)
1501 goto out;
1502 ftrace_pid_trace = -1;
1503
1504 } else {
1505
1506 if (ftrace_pid_trace == val)
1507 goto out;
1508
1509 ftrace_pid_trace = val;
1510 }
1511
1512 /* update the function call */
1513 ftrace_update_pid_func();
1514 ftrace_startup_enable(0);
1515
1516 out:
1517 mutex_unlock(&ftrace_start_lock);
1518
1519 return cnt;
1520}
1521
1522static struct file_operations ftrace_pid_fops = {
1523 .read = ftrace_pid_read,
1524 .write = ftrace_pid_write,
1525};
1526
1527static __init int ftrace_init_debugfs(void)
1528{
1529 struct dentry *d_tracer;
1530 struct dentry *entry;
1531
1532 d_tracer = tracing_init_dentry();
1533 if (!d_tracer)
1534 return 0;
1535
1536 ftrace_init_dyn_debugfs(d_tracer);
1537
1538 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1539 NULL, &ftrace_pid_fops);
1540 if (!entry)
1541 pr_warning("Could not create debugfs "
1542 "'set_ftrace_pid' entry\n");
1543 return 0;
1544}
1545
1546fs_initcall(ftrace_init_debugfs);
1547
Steven Rostedt3d083392008-05-12 21:20:42 +02001548/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04001549 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001550 *
1551 * This function should be used by panic code. It stops ftrace
1552 * but in a not so nice way. If you need to simply kill ftrace
1553 * from a non-atomic section, use ftrace_kill.
1554 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04001555void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001556{
1557 ftrace_disabled = 1;
1558 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001559 clear_ftrace_function();
1560}
1561
1562/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001563 * register_ftrace_function - register a function for profiling
1564 * @ops - ops structure that holds the function for profiling.
1565 *
1566 * Register a function to be called by all functions in the
1567 * kernel.
1568 *
1569 * Note: @ops->func and all the functions it calls must be labeled
1570 * with "notrace", otherwise it will go into a
1571 * recursive loop.
1572 */
1573int register_ftrace_function(struct ftrace_ops *ops)
1574{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001575 int ret;
1576
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001577 if (unlikely(ftrace_disabled))
1578 return -1;
1579
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001580 mutex_lock(&ftrace_sysctl_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001581
1582 if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
1583 ret = -EBUSY;
1584 goto out;
1585 }
1586
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001587 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001588 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001589
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001590out:
1591 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001592 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001593}
1594
1595/**
1596 * unregister_ftrace_function - unresgister a function for profiling.
1597 * @ops - ops structure that holds the function to unregister
1598 *
1599 * Unregister a function that was added to be called by ftrace profiling.
1600 */
1601int unregister_ftrace_function(struct ftrace_ops *ops)
1602{
1603 int ret;
1604
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001605 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001606 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001607 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001608 mutex_unlock(&ftrace_sysctl_lock);
1609
1610 return ret;
1611}
1612
Ingo Molnare309b412008-05-12 21:20:51 +02001613int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001614ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001615 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001616 loff_t *ppos)
1617{
1618 int ret;
1619
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001620 if (unlikely(ftrace_disabled))
1621 return -ENODEV;
1622
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001623 mutex_lock(&ftrace_sysctl_lock);
1624
Steven Rostedt5072c592008-05-12 21:20:43 +02001625 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001626
1627 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1628 goto out;
1629
1630 last_ftrace_enabled = ftrace_enabled;
1631
1632 if (ftrace_enabled) {
1633
1634 ftrace_startup_sysctl();
1635
1636 /* we are starting ftrace again */
1637 if (ftrace_list != &ftrace_list_end) {
1638 if (ftrace_list->next == &ftrace_list_end)
1639 ftrace_trace_function = ftrace_list->func;
1640 else
1641 ftrace_trace_function = ftrace_list_func;
1642 }
1643
1644 } else {
1645 /* stopping ftrace calls (just send to ftrace_stub) */
1646 ftrace_trace_function = ftrace_stub;
1647
1648 ftrace_shutdown_sysctl();
1649 }
1650
1651 out:
1652 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001653 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001654}
Ingo Molnarf17845e2008-10-24 12:47:10 +02001655
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001656#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001657
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001658static atomic_t ftrace_graph_active;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001659
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001660/* The callbacks that hook a function */
1661trace_func_graph_ret_t ftrace_graph_return =
1662 (trace_func_graph_ret_t)ftrace_stub;
1663trace_func_graph_ent_t ftrace_graph_entry =
1664 (trace_func_graph_ent_t)ftrace_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001665
1666/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1667static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1668{
1669 int i;
1670 int ret = 0;
1671 unsigned long flags;
1672 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1673 struct task_struct *g, *t;
1674
1675 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1676 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1677 * sizeof(struct ftrace_ret_stack),
1678 GFP_KERNEL);
1679 if (!ret_stack_list[i]) {
1680 start = 0;
1681 end = i;
1682 ret = -ENOMEM;
1683 goto free;
1684 }
1685 }
1686
1687 read_lock_irqsave(&tasklist_lock, flags);
1688 do_each_thread(g, t) {
1689 if (start == end) {
1690 ret = -EAGAIN;
1691 goto unlock;
1692 }
1693
1694 if (t->ret_stack == NULL) {
1695 t->ret_stack = ret_stack_list[start++];
1696 t->curr_ret_stack = -1;
1697 atomic_set(&t->trace_overrun, 0);
1698 }
1699 } while_each_thread(g, t);
1700
1701unlock:
1702 read_unlock_irqrestore(&tasklist_lock, flags);
1703free:
1704 for (i = start; i < end; i++)
1705 kfree(ret_stack_list[i]);
1706 return ret;
1707}
1708
1709/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001710static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001711{
1712 struct ftrace_ret_stack **ret_stack_list;
1713 int ret;
1714
1715 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1716 sizeof(struct ftrace_ret_stack *),
1717 GFP_KERNEL);
1718
1719 if (!ret_stack_list)
1720 return -ENOMEM;
1721
1722 do {
1723 ret = alloc_retstack_tasklist(ret_stack_list);
1724 } while (ret == -EAGAIN);
1725
1726 kfree(ret_stack_list);
1727 return ret;
1728}
1729
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001730int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1731 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01001732{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001733 int ret = 0;
1734
1735 mutex_lock(&ftrace_sysctl_lock);
1736
1737 /*
1738 * Don't launch return tracing if normal function
1739 * tracing is already running.
1740 */
1741 if (ftrace_trace_function != ftrace_stub) {
1742 ret = -EBUSY;
1743 goto out;
1744 }
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001745 atomic_inc(&ftrace_graph_active);
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001746 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001747 if (ret) {
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001748 atomic_dec(&ftrace_graph_active);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001749 goto out;
1750 }
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001751 ftrace_tracing_type = FTRACE_TYPE_RETURN;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001752 ftrace_graph_return = retfunc;
1753 ftrace_graph_entry = entryfunc;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001754 ftrace_startup();
1755
1756out:
1757 mutex_unlock(&ftrace_sysctl_lock);
1758 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01001759}
1760
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001761void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01001762{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001763 mutex_lock(&ftrace_sysctl_lock);
1764
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001765 atomic_dec(&ftrace_graph_active);
1766 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1767 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001768 ftrace_shutdown();
1769 /* Restore normal tracing type */
1770 ftrace_tracing_type = FTRACE_TYPE_ENTER;
1771
1772 mutex_unlock(&ftrace_sysctl_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01001773}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001774
1775/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001776void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001777{
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001778 if (atomic_read(&ftrace_graph_active)) {
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001779 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1780 * sizeof(struct ftrace_ret_stack),
1781 GFP_KERNEL);
1782 if (!t->ret_stack)
1783 return;
1784 t->curr_ret_stack = -1;
1785 atomic_set(&t->trace_overrun, 0);
1786 } else
1787 t->ret_stack = NULL;
1788}
1789
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001790void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001791{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01001792 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1793
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001794 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01001795 /* NULL must become visible to IRQs before we free it: */
1796 barrier();
1797
1798 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001799}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01001800#endif
1801