blob: 1843edc098a6e5a2b76972c63233a657b531497c [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010024#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020025#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020026#include <linux/ctype.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010027#include <linux/hash.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020028#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020029
Steven Rostedt3d083392008-05-12 21:20:42 +020030#include "trace.h"
31
Steven Rostedt4eebcc82008-05-12 21:20:48 +020032/* ftrace_enabled is a method to turn ftrace on or off */
33int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020034static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020035
Steven Rostedt4eebcc82008-05-12 21:20:48 +020036/*
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
39 */
40static int ftrace_disabled __read_mostly;
41
Steven Rostedt3d083392008-05-12 21:20:42 +020042static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020043static DEFINE_MUTEX(ftrace_sysctl_lock);
44
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020045static struct ftrace_ops ftrace_list_end __read_mostly =
46{
47 .func = ftrace_stub,
48};
49
50static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
Ingo Molnare309b412008-05-12 21:20:51 +020053void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020054{
55 struct ftrace_ops *op = ftrace_list;
56
57 /* in case someone actually ports this to alpha! */
58 read_barrier_depends();
59
60 while (op != &ftrace_list_end) {
61 /* silly alpha */
62 read_barrier_depends();
63 op->func(ip, parent_ip);
64 op = op->next;
65 };
66}
67
68/**
Steven Rostedt3d083392008-05-12 21:20:42 +020069 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020070 *
Steven Rostedt3d083392008-05-12 21:20:42 +020071 * This NULLs the ftrace function and in essence stops
72 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020073 */
Steven Rostedt3d083392008-05-12 21:20:42 +020074void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020075{
Steven Rostedt3d083392008-05-12 21:20:42 +020076 ftrace_trace_function = ftrace_stub;
77}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078
Ingo Molnare309b412008-05-12 21:20:51 +020079static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020080{
81 /* Should never be called by interrupts */
82 spin_lock(&ftrace_lock);
83
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020084 ops->next = ftrace_list;
85 /*
86 * We are entering ops into the ftrace_list but another
87 * CPU might be walking that list. We need to make sure
88 * the ops->next pointer is valid before another CPU sees
89 * the ops pointer included into the ftrace_list.
90 */
91 smp_wmb();
92 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020093
Steven Rostedtb0fc4942008-05-12 21:20:43 +020094 if (ftrace_enabled) {
95 /*
96 * For one func, simply call it directly.
97 * For more than one func, call the chain.
98 */
99 if (ops->next == &ftrace_list_end)
100 ftrace_trace_function = ops->func;
101 else
102 ftrace_trace_function = ftrace_list_func;
103 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200104
105 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200106
107 return 0;
108}
109
Ingo Molnare309b412008-05-12 21:20:51 +0200110static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200111{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200112 struct ftrace_ops **p;
113 int ret = 0;
114
Steven Rostedt3d083392008-05-12 21:20:42 +0200115 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200116
117 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200118 * If we are removing the last function, then simply point
119 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200120 */
121 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122 ftrace_trace_function = ftrace_stub;
123 ftrace_list = &ftrace_list_end;
124 goto out;
125 }
126
127 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
128 if (*p == ops)
129 break;
130
131 if (*p != ops) {
132 ret = -1;
133 goto out;
134 }
135
136 *p = (*p)->next;
137
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200138 if (ftrace_enabled) {
139 /* If we only have one func left, then call that directly */
140 if (ftrace_list == &ftrace_list_end ||
141 ftrace_list->next == &ftrace_list_end)
142 ftrace_trace_function = ftrace_list->func;
143 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200144
145 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200146 spin_unlock(&ftrace_lock);
147
148 return ret;
149}
150
151#ifdef CONFIG_DYNAMIC_FTRACE
152
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200153static struct task_struct *ftraced_task;
154static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
155static unsigned long ftraced_iteration_counter;
156
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200157enum {
158 FTRACE_ENABLE_CALLS = (1 << 0),
159 FTRACE_DISABLE_CALLS = (1 << 1),
160 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
161 FTRACE_ENABLE_MCOUNT = (1 << 3),
162 FTRACE_DISABLE_MCOUNT = (1 << 4),
163};
164
Steven Rostedt5072c592008-05-12 21:20:43 +0200165static int ftrace_filtered;
166
Steven Rostedt3d083392008-05-12 21:20:42 +0200167static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
168
169static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
170
171static DEFINE_SPINLOCK(ftrace_shutdown_lock);
172static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400173static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200174
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200175struct ftrace_page {
176 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700177 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200178 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700179};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200180
181#define ENTRIES_PER_PAGE \
182 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
183
184/* estimate from running different kernels */
185#define NR_TO_INIT 10000
186
187static struct ftrace_page *ftrace_pages_start;
188static struct ftrace_page *ftrace_pages;
189
Steven Rostedt3d083392008-05-12 21:20:42 +0200190static int ftraced_trigger;
191static int ftraced_suspend;
192
193static int ftrace_record_suspend;
194
Steven Rostedt37ad5082008-05-12 21:20:48 +0200195static struct dyn_ftrace *ftrace_free_records;
196
Ingo Molnare309b412008-05-12 21:20:51 +0200197static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200198ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200199{
200 struct dyn_ftrace *p;
201 struct hlist_node *t;
202 int found = 0;
203
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530204 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200205 if (p->ip == ip) {
206 found = 1;
207 break;
208 }
209 }
210
211 return found;
212}
213
Ingo Molnare309b412008-05-12 21:20:51 +0200214static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200215ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
216{
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530217 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
Steven Rostedt3d083392008-05-12 21:20:42 +0200218}
219
Ingo Molnare309b412008-05-12 21:20:51 +0200220static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200221{
222 /* no locking, only called from kstop_machine */
223
224 rec->ip = (unsigned long)ftrace_free_records;
225 ftrace_free_records = rec;
226 rec->flags |= FTRACE_FL_FREE;
227}
228
Ingo Molnare309b412008-05-12 21:20:51 +0200229static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200230{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200231 struct dyn_ftrace *rec;
232
233 /* First check for freed records */
234 if (ftrace_free_records) {
235 rec = ftrace_free_records;
236
Steven Rostedt37ad5082008-05-12 21:20:48 +0200237 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
238 WARN_ON_ONCE(1);
239 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200240 ftrace_disabled = 1;
241 ftrace_enabled = 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200242 return NULL;
243 }
244
245 ftrace_free_records = (void *)rec->ip;
246 memset(rec, 0, sizeof(*rec));
247 return rec;
248 }
249
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200250 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
251 if (!ftrace_pages->next)
252 return NULL;
253 ftrace_pages = ftrace_pages->next;
254 }
255
256 return &ftrace_pages->records[ftrace_pages->index++];
257}
258
Ingo Molnare309b412008-05-12 21:20:51 +0200259static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200260ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200261{
262 struct dyn_ftrace *node;
263 unsigned long flags;
264 unsigned long key;
265 int resched;
266 int atomic;
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200267 int cpu;
Steven Rostedt3d083392008-05-12 21:20:42 +0200268
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200269 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200270 return;
271
Steven Rostedt3d083392008-05-12 21:20:42 +0200272 resched = need_resched();
273 preempt_disable_notrace();
274
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200275 /*
276 * We simply need to protect against recursion.
277 * Use the the raw version of smp_processor_id and not
278 * __get_cpu_var which can call debug hooks that can
279 * cause a recursive crash here.
280 */
281 cpu = raw_smp_processor_id();
282 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
283 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
Steven Rostedt3d083392008-05-12 21:20:42 +0200284 goto out;
285
286 if (unlikely(ftrace_record_suspend))
287 goto out;
288
289 key = hash_long(ip, FTRACE_HASHBITS);
290
291 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
292
293 if (ftrace_ip_in_hash(ip, key))
294 goto out;
295
296 atomic = irqs_disabled();
297
298 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
299
300 /* This ip may have hit the hash before the lock */
301 if (ftrace_ip_in_hash(ip, key))
302 goto out_unlock;
303
304 /*
305 * There's a slight race that the ftraced will update the
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200306 * hash and reset here. If it is already converted, skip it.
Steven Rostedt3d083392008-05-12 21:20:42 +0200307 */
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200308 if (ftrace_ip_converted(ip))
309 goto out_unlock;
310
311 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200312 if (!node)
313 goto out_unlock;
314
315 node->ip = ip;
316
317 ftrace_add_hash(node, key);
318
319 ftraced_trigger = 1;
320
321 out_unlock:
322 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
323 out:
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200324 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
Steven Rostedt3d083392008-05-12 21:20:42 +0200325
326 /* prevent recursion with scheduler */
327 if (resched)
328 preempt_enable_no_resched_notrace();
329 else
330 preempt_enable_notrace();
331}
332
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200333#define FTRACE_ADDR ((long)(ftrace_caller))
334#define MCOUNT_ADDR ((long)(mcount))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200335
Ingo Molnare309b412008-05-12 21:20:51 +0200336static void
Steven Rostedt5072c592008-05-12 21:20:43 +0200337__ftrace_replace_code(struct dyn_ftrace *rec,
338 unsigned char *old, unsigned char *new, int enable)
339{
Steven Rostedt41c52c02008-05-22 11:46:33 -0400340 unsigned long ip, fl;
Steven Rostedt5072c592008-05-12 21:20:43 +0200341 int failed;
342
343 ip = rec->ip;
344
345 if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200346 /*
347 * If filtering is on:
348 *
349 * If this record is set to be filtered and
350 * is enabled then do nothing.
351 *
352 * If this record is set to be filtered and
353 * it is not enabled, enable it.
354 *
355 * If this record is not set to be filtered
356 * and it is not enabled do nothing.
357 *
Steven Rostedt41c52c02008-05-22 11:46:33 -0400358 * If this record is set not to trace then
359 * do nothing.
360 *
Steven Rostedt5072c592008-05-12 21:20:43 +0200361 * If this record is not set to be filtered and
362 * it is enabled, disable it.
363 */
364 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
365
366 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
Steven Rostedt41c52c02008-05-22 11:46:33 -0400367 (fl == 0) || (rec->flags & FTRACE_FL_NOTRACE))
Steven Rostedt5072c592008-05-12 21:20:43 +0200368 return;
369
370 /*
371 * If it is enabled disable it,
372 * otherwise enable it!
373 */
374 if (fl == FTRACE_FL_ENABLED) {
375 /* swap new and old */
376 new = old;
377 old = ftrace_call_replace(ip, FTRACE_ADDR);
378 rec->flags &= ~FTRACE_FL_ENABLED;
379 } else {
380 new = ftrace_call_replace(ip, FTRACE_ADDR);
381 rec->flags |= FTRACE_FL_ENABLED;
382 }
383 } else {
384
Steven Rostedt41c52c02008-05-22 11:46:33 -0400385 if (enable) {
386 /*
387 * If this record is set not to trace and is
388 * not enabled, do nothing.
389 */
390 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
391 if (fl == FTRACE_FL_NOTRACE)
392 return;
393
Steven Rostedt5072c592008-05-12 21:20:43 +0200394 new = ftrace_call_replace(ip, FTRACE_ADDR);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400395 } else
Steven Rostedt5072c592008-05-12 21:20:43 +0200396 old = ftrace_call_replace(ip, FTRACE_ADDR);
397
398 if (enable) {
399 if (rec->flags & FTRACE_FL_ENABLED)
400 return;
401 rec->flags |= FTRACE_FL_ENABLED;
402 } else {
403 if (!(rec->flags & FTRACE_FL_ENABLED))
404 return;
405 rec->flags &= ~FTRACE_FL_ENABLED;
406 }
407 }
408
409 failed = ftrace_modify_code(ip, old, new);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200410 if (failed) {
411 unsigned long key;
412 /* It is possible that the function hasn't been converted yet */
413 key = hash_long(ip, FTRACE_HASHBITS);
414 if (!ftrace_ip_in_hash(ip, key)) {
415 rec->flags |= FTRACE_FL_FAILED;
416 ftrace_free_rec(rec);
417 }
418
419 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200420}
421
Ingo Molnare309b412008-05-12 21:20:51 +0200422static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200423{
424 unsigned char *new = NULL, *old = NULL;
425 struct dyn_ftrace *rec;
426 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200427 int i;
428
Steven Rostedt5072c592008-05-12 21:20:43 +0200429 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200430 old = ftrace_nop_replace();
431 else
432 new = ftrace_nop_replace();
433
434 for (pg = ftrace_pages_start; pg; pg = pg->next) {
435 for (i = 0; i < pg->index; i++) {
436 rec = &pg->records[i];
437
438 /* don't modify code that has already faulted */
439 if (rec->flags & FTRACE_FL_FAILED)
440 continue;
441
Steven Rostedt5072c592008-05-12 21:20:43 +0200442 __ftrace_replace_code(rec, old, new, enable);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200443 }
444 }
445}
446
Ingo Molnare309b412008-05-12 21:20:51 +0200447static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200448{
449 if (ftrace_pages->next)
450 return;
451
452 /* allocate another page */
453 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
454}
Steven Rostedt3d083392008-05-12 21:20:42 +0200455
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530456static int
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200457ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200458{
459 unsigned long ip;
460 unsigned char *nop, *call;
461 int failed;
462
463 ip = rec->ip;
464
465 nop = ftrace_nop_replace();
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200466 call = ftrace_call_replace(ip, MCOUNT_ADDR);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200467
468 failed = ftrace_modify_code(ip, call, nop);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200469 if (failed) {
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200470 rec->flags |= FTRACE_FL_FAILED;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200471 ftrace_free_rec(rec);
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530472 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200473 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530474 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200475}
476
Ingo Molnare309b412008-05-12 21:20:51 +0200477static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200478{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200479 unsigned long addr;
480 int *command = data;
481
482 if (*command & FTRACE_ENABLE_CALLS)
483 ftrace_replace_code(1);
484 else if (*command & FTRACE_DISABLE_CALLS)
485 ftrace_replace_code(0);
486
487 if (*command & FTRACE_UPDATE_TRACE_FUNC)
488 ftrace_update_ftrace_func(ftrace_trace_function);
489
490 if (*command & FTRACE_ENABLE_MCOUNT) {
491 addr = (unsigned long)ftrace_record_ip;
492 ftrace_mcount_set(&addr);
493 } else if (*command & FTRACE_DISABLE_MCOUNT) {
494 addr = (unsigned long)ftrace_stub;
495 ftrace_mcount_set(&addr);
496 }
497
498 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200499}
500
Ingo Molnare309b412008-05-12 21:20:51 +0200501static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200502{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200503 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
Steven Rostedt3d083392008-05-12 21:20:42 +0200504}
505
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200506static ftrace_func_t saved_ftrace_func;
507
Ingo Molnare309b412008-05-12 21:20:51 +0200508static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200509{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200510 int command = 0;
511
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200512 if (unlikely(ftrace_disabled))
513 return;
514
Steven Rostedt3d083392008-05-12 21:20:42 +0200515 mutex_lock(&ftraced_lock);
516 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200517 if (ftraced_suspend == 1)
518 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200519
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200520 if (saved_ftrace_func != ftrace_trace_function) {
521 saved_ftrace_func = ftrace_trace_function;
522 command |= FTRACE_UPDATE_TRACE_FUNC;
523 }
524
525 if (!command || !ftrace_enabled)
526 goto out;
527
528 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200529 out:
530 mutex_unlock(&ftraced_lock);
531}
532
Ingo Molnare309b412008-05-12 21:20:51 +0200533static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200534{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200535 int command = 0;
536
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200537 if (unlikely(ftrace_disabled))
538 return;
539
Steven Rostedt3d083392008-05-12 21:20:42 +0200540 mutex_lock(&ftraced_lock);
541 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200542 if (!ftraced_suspend)
543 command |= FTRACE_DISABLE_CALLS;
544
545 if (saved_ftrace_func != ftrace_trace_function) {
546 saved_ftrace_func = ftrace_trace_function;
547 command |= FTRACE_UPDATE_TRACE_FUNC;
548 }
549
550 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200551 goto out;
552
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200553 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200554 out:
555 mutex_unlock(&ftraced_lock);
556}
557
Ingo Molnare309b412008-05-12 21:20:51 +0200558static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200559{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200560 int command = FTRACE_ENABLE_MCOUNT;
561
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200562 if (unlikely(ftrace_disabled))
563 return;
564
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200565 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200566 /* Force update next time */
567 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200568 /* ftraced_suspend is true if we want ftrace running */
569 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200570 command |= FTRACE_ENABLE_CALLS;
571
572 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200573 mutex_unlock(&ftraced_lock);
574}
575
Ingo Molnare309b412008-05-12 21:20:51 +0200576static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200577{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200578 int command = FTRACE_DISABLE_MCOUNT;
579
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200580 if (unlikely(ftrace_disabled))
581 return;
582
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200583 mutex_lock(&ftraced_lock);
584 /* ftraced_suspend is true if ftrace is running */
585 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200586 command |= FTRACE_DISABLE_CALLS;
587
588 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200589 mutex_unlock(&ftraced_lock);
590}
591
Steven Rostedt3d083392008-05-12 21:20:42 +0200592static cycle_t ftrace_update_time;
593static unsigned long ftrace_update_cnt;
594unsigned long ftrace_update_tot_cnt;
595
Ingo Molnare309b412008-05-12 21:20:51 +0200596static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200597{
598 struct dyn_ftrace *p;
599 struct hlist_head head;
600 struct hlist_node *t;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200601 int save_ftrace_enabled;
Steven Rostedt3d083392008-05-12 21:20:42 +0200602 cycle_t start, stop;
603 int i;
604
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200605 /* Don't be recording funcs now */
606 save_ftrace_enabled = ftrace_enabled;
607 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200608
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200609 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200610 ftrace_update_cnt = 0;
611
612 /* No locks needed, the machine is stopped! */
613 for (i = 0; i < FTRACE_HASHSIZE; i++) {
614 if (hlist_empty(&ftrace_hash[i]))
615 continue;
616
617 head = ftrace_hash[i];
618 INIT_HLIST_HEAD(&ftrace_hash[i]);
619
620 /* all CPUS are stopped, we are safe to modify code */
621 hlist_for_each_entry(p, t, &head, node) {
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530622 if (ftrace_code_disable(p))
623 ftrace_update_cnt++;
Steven Rostedt3d083392008-05-12 21:20:42 +0200624 }
625
626 }
627
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200628 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200629 ftrace_update_time = stop - start;
630 ftrace_update_tot_cnt += ftrace_update_cnt;
631
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200632 ftrace_enabled = save_ftrace_enabled;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200633
634 return 0;
635}
636
Ingo Molnare309b412008-05-12 21:20:51 +0200637static void ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200638{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200639 if (unlikely(ftrace_disabled))
640 return;
641
Steven Rostedt3d083392008-05-12 21:20:42 +0200642 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
643}
644
Ingo Molnare309b412008-05-12 21:20:51 +0200645static int ftraced(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200646{
647 unsigned long usecs;
648
Steven Rostedt3d083392008-05-12 21:20:42 +0200649 while (!kthread_should_stop()) {
650
Steven Rostedt07a267c2008-05-12 21:20:55 +0200651 set_current_state(TASK_INTERRUPTIBLE);
652
Steven Rostedt3d083392008-05-12 21:20:42 +0200653 /* check once a second */
654 schedule_timeout(HZ);
655
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200656 if (unlikely(ftrace_disabled))
657 continue;
658
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200659 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200660 mutex_lock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200661 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200662 ftrace_record_suspend++;
663 ftrace_update_code();
664 usecs = nsecs_to_usecs(ftrace_update_time);
665 if (ftrace_update_tot_cnt > 100000) {
666 ftrace_update_tot_cnt = 0;
667 pr_info("hm, dftrace overflow: %lu change%s"
668 " (%lu total) in %lu usec%s\n",
669 ftrace_update_cnt,
670 ftrace_update_cnt != 1 ? "s" : "",
671 ftrace_update_tot_cnt,
672 usecs, usecs != 1 ? "s" : "");
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200673 ftrace_disabled = 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200674 WARN_ON_ONCE(1);
675 }
676 ftraced_trigger = 0;
677 ftrace_record_suspend--;
678 }
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200679 ftraced_iteration_counter++;
Steven Rostedt3d083392008-05-12 21:20:42 +0200680 mutex_unlock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200681 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200682
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200683 wake_up_interruptible(&ftraced_waiters);
684
Steven Rostedt3d083392008-05-12 21:20:42 +0200685 ftrace_shutdown_replenish();
Steven Rostedt3d083392008-05-12 21:20:42 +0200686 }
687 __set_current_state(TASK_RUNNING);
688 return 0;
689}
690
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200691static int __init ftrace_dyn_table_alloc(void)
692{
693 struct ftrace_page *pg;
694 int cnt;
695 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200696
697 /* allocate a few pages */
698 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
699 if (!ftrace_pages_start)
700 return -1;
701
702 /*
703 * Allocate a few more pages.
704 *
705 * TODO: have some parser search vmlinux before
706 * final linking to find all calls to ftrace.
707 * Then we can:
708 * a) know how many pages to allocate.
709 * and/or
710 * b) set up the table then.
711 *
712 * The dynamic code is still necessary for
713 * modules.
714 */
715
716 pg = ftrace_pages = ftrace_pages_start;
717
718 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
719
720 for (i = 0; i < cnt; i++) {
721 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
722
723 /* If we fail, we'll try later anyway */
724 if (!pg->next)
725 break;
726
727 pg = pg->next;
728 }
729
730 return 0;
731}
732
Steven Rostedt5072c592008-05-12 21:20:43 +0200733enum {
734 FTRACE_ITER_FILTER = (1 << 0),
735 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400736 FTRACE_ITER_NOTRACE = (1 << 2),
Steven Rostedt5072c592008-05-12 21:20:43 +0200737};
738
739#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
740
741struct ftrace_iterator {
742 loff_t pos;
743 struct ftrace_page *pg;
744 unsigned idx;
745 unsigned flags;
746 unsigned char buffer[FTRACE_BUFF_MAX+1];
747 unsigned buffer_idx;
748 unsigned filtered;
749};
750
Ingo Molnare309b412008-05-12 21:20:51 +0200751static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200752t_next(struct seq_file *m, void *v, loff_t *pos)
753{
754 struct ftrace_iterator *iter = m->private;
755 struct dyn_ftrace *rec = NULL;
756
757 (*pos)++;
758
759 retry:
760 if (iter->idx >= iter->pg->index) {
761 if (iter->pg->next) {
762 iter->pg = iter->pg->next;
763 iter->idx = 0;
764 goto retry;
765 }
766 } else {
767 rec = &iter->pg->records[iter->idx++];
768 if ((rec->flags & FTRACE_FL_FAILED) ||
769 ((iter->flags & FTRACE_ITER_FILTER) &&
Steven Rostedt41c52c02008-05-22 11:46:33 -0400770 !(rec->flags & FTRACE_FL_FILTER)) ||
771 ((iter->flags & FTRACE_ITER_NOTRACE) &&
772 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200773 rec = NULL;
774 goto retry;
775 }
776 }
777
778 iter->pos = *pos;
779
780 return rec;
781}
782
783static void *t_start(struct seq_file *m, loff_t *pos)
784{
785 struct ftrace_iterator *iter = m->private;
786 void *p = NULL;
787 loff_t l = -1;
788
789 if (*pos != iter->pos) {
790 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
791 ;
792 } else {
793 l = *pos;
794 p = t_next(m, p, &l);
795 }
796
797 return p;
798}
799
800static void t_stop(struct seq_file *m, void *p)
801{
802}
803
804static int t_show(struct seq_file *m, void *v)
805{
806 struct dyn_ftrace *rec = v;
807 char str[KSYM_SYMBOL_LEN];
808
809 if (!rec)
810 return 0;
811
812 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
813
814 seq_printf(m, "%s\n", str);
815
816 return 0;
817}
818
819static struct seq_operations show_ftrace_seq_ops = {
820 .start = t_start,
821 .next = t_next,
822 .stop = t_stop,
823 .show = t_show,
824};
825
Ingo Molnare309b412008-05-12 21:20:51 +0200826static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200827ftrace_avail_open(struct inode *inode, struct file *file)
828{
829 struct ftrace_iterator *iter;
830 int ret;
831
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200832 if (unlikely(ftrace_disabled))
833 return -ENODEV;
834
Steven Rostedt5072c592008-05-12 21:20:43 +0200835 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
836 if (!iter)
837 return -ENOMEM;
838
839 iter->pg = ftrace_pages_start;
840 iter->pos = -1;
841
842 ret = seq_open(file, &show_ftrace_seq_ops);
843 if (!ret) {
844 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200845
Steven Rostedt5072c592008-05-12 21:20:43 +0200846 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200847 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200848 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200849 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200850
851 return ret;
852}
853
854int ftrace_avail_release(struct inode *inode, struct file *file)
855{
856 struct seq_file *m = (struct seq_file *)file->private_data;
857 struct ftrace_iterator *iter = m->private;
858
859 seq_release(inode, file);
860 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200861
Steven Rostedt5072c592008-05-12 21:20:43 +0200862 return 0;
863}
864
Steven Rostedt41c52c02008-05-22 11:46:33 -0400865static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200866{
867 struct ftrace_page *pg;
868 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400869 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200870 unsigned i;
871
872 /* keep kstop machine from running */
873 preempt_disable();
Steven Rostedt41c52c02008-05-22 11:46:33 -0400874 if (enable)
875 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200876 pg = ftrace_pages_start;
877 while (pg) {
878 for (i = 0; i < pg->index; i++) {
879 rec = &pg->records[i];
880 if (rec->flags & FTRACE_FL_FAILED)
881 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400882 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +0200883 }
884 pg = pg->next;
885 }
886 preempt_enable();
887}
888
Ingo Molnare309b412008-05-12 21:20:51 +0200889static int
Steven Rostedt41c52c02008-05-22 11:46:33 -0400890ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200891{
892 struct ftrace_iterator *iter;
893 int ret = 0;
894
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200895 if (unlikely(ftrace_disabled))
896 return -ENODEV;
897
Steven Rostedt5072c592008-05-12 21:20:43 +0200898 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
899 if (!iter)
900 return -ENOMEM;
901
Steven Rostedt41c52c02008-05-22 11:46:33 -0400902 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200903 if ((file->f_mode & FMODE_WRITE) &&
904 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -0400905 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +0200906
907 if (file->f_mode & FMODE_READ) {
908 iter->pg = ftrace_pages_start;
909 iter->pos = -1;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400910 iter->flags = enable ? FTRACE_ITER_FILTER :
911 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200912
913 ret = seq_open(file, &show_ftrace_seq_ops);
914 if (!ret) {
915 struct seq_file *m = file->private_data;
916 m->private = iter;
917 } else
918 kfree(iter);
919 } else
920 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400921 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200922
923 return ret;
924}
925
Steven Rostedt41c52c02008-05-22 11:46:33 -0400926static int
927ftrace_filter_open(struct inode *inode, struct file *file)
928{
929 return ftrace_regex_open(inode, file, 1);
930}
931
932static int
933ftrace_notrace_open(struct inode *inode, struct file *file)
934{
935 return ftrace_regex_open(inode, file, 0);
936}
937
Ingo Molnare309b412008-05-12 21:20:51 +0200938static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -0400939ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +0200940 size_t cnt, loff_t *ppos)
941{
942 if (file->f_mode & FMODE_READ)
943 return seq_read(file, ubuf, cnt, ppos);
944 else
945 return -EPERM;
946}
947
Ingo Molnare309b412008-05-12 21:20:51 +0200948static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -0400949ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +0200950{
951 loff_t ret;
952
953 if (file->f_mode & FMODE_READ)
954 ret = seq_lseek(file, offset, origin);
955 else
956 file->f_pos = ret = 1;
957
958 return ret;
959}
960
961enum {
962 MATCH_FULL,
963 MATCH_FRONT_ONLY,
964 MATCH_MIDDLE_ONLY,
965 MATCH_END_ONLY,
966};
967
Ingo Molnare309b412008-05-12 21:20:51 +0200968static void
Steven Rostedt41c52c02008-05-22 11:46:33 -0400969ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200970{
971 char str[KSYM_SYMBOL_LEN];
972 char *search = NULL;
973 struct ftrace_page *pg;
974 struct dyn_ftrace *rec;
975 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400976 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200977 unsigned i, match = 0, search_len = 0;
978
979 for (i = 0; i < len; i++) {
980 if (buff[i] == '*') {
981 if (!i) {
982 search = buff + i + 1;
983 type = MATCH_END_ONLY;
984 search_len = len - (i + 1);
985 } else {
986 if (type == MATCH_END_ONLY) {
987 type = MATCH_MIDDLE_ONLY;
988 } else {
989 match = i;
990 type = MATCH_FRONT_ONLY;
991 }
992 buff[i] = 0;
993 break;
994 }
995 }
996 }
997
998 /* keep kstop machine from running */
999 preempt_disable();
Steven Rostedt41c52c02008-05-22 11:46:33 -04001000 if (enable)
1001 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001002 pg = ftrace_pages_start;
1003 while (pg) {
1004 for (i = 0; i < pg->index; i++) {
1005 int matched = 0;
1006 char *ptr;
1007
1008 rec = &pg->records[i];
1009 if (rec->flags & FTRACE_FL_FAILED)
1010 continue;
1011 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1012 switch (type) {
1013 case MATCH_FULL:
1014 if (strcmp(str, buff) == 0)
1015 matched = 1;
1016 break;
1017 case MATCH_FRONT_ONLY:
1018 if (memcmp(str, buff, match) == 0)
1019 matched = 1;
1020 break;
1021 case MATCH_MIDDLE_ONLY:
1022 if (strstr(str, search))
1023 matched = 1;
1024 break;
1025 case MATCH_END_ONLY:
1026 ptr = strstr(str, search);
1027 if (ptr && (ptr[search_len] == 0))
1028 matched = 1;
1029 break;
1030 }
1031 if (matched)
Steven Rostedt41c52c02008-05-22 11:46:33 -04001032 rec->flags |= flag;
Steven Rostedt5072c592008-05-12 21:20:43 +02001033 }
1034 pg = pg->next;
1035 }
1036 preempt_enable();
1037}
1038
Ingo Molnare309b412008-05-12 21:20:51 +02001039static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001040ftrace_regex_write(struct file *file, const char __user *ubuf,
1041 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001042{
1043 struct ftrace_iterator *iter;
1044 char ch;
1045 size_t read = 0;
1046 ssize_t ret;
1047
1048 if (!cnt || cnt < 0)
1049 return 0;
1050
Steven Rostedt41c52c02008-05-22 11:46:33 -04001051 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001052
1053 if (file->f_mode & FMODE_READ) {
1054 struct seq_file *m = file->private_data;
1055 iter = m->private;
1056 } else
1057 iter = file->private_data;
1058
1059 if (!*ppos) {
1060 iter->flags &= ~FTRACE_ITER_CONT;
1061 iter->buffer_idx = 0;
1062 }
1063
1064 ret = get_user(ch, ubuf++);
1065 if (ret)
1066 goto out;
1067 read++;
1068 cnt--;
1069
1070 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1071 /* skip white space */
1072 while (cnt && isspace(ch)) {
1073 ret = get_user(ch, ubuf++);
1074 if (ret)
1075 goto out;
1076 read++;
1077 cnt--;
1078 }
1079
Steven Rostedt5072c592008-05-12 21:20:43 +02001080 if (isspace(ch)) {
1081 file->f_pos += read;
1082 ret = read;
1083 goto out;
1084 }
1085
1086 iter->buffer_idx = 0;
1087 }
1088
1089 while (cnt && !isspace(ch)) {
1090 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1091 iter->buffer[iter->buffer_idx++] = ch;
1092 else {
1093 ret = -EINVAL;
1094 goto out;
1095 }
1096 ret = get_user(ch, ubuf++);
1097 if (ret)
1098 goto out;
1099 read++;
1100 cnt--;
1101 }
1102
1103 if (isspace(ch)) {
1104 iter->filtered++;
1105 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001106 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001107 iter->buffer_idx = 0;
1108 } else
1109 iter->flags |= FTRACE_ITER_CONT;
1110
1111
1112 file->f_pos += read;
1113
1114 ret = read;
1115 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001116 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001117
1118 return ret;
1119}
1120
Steven Rostedt41c52c02008-05-22 11:46:33 -04001121static ssize_t
1122ftrace_filter_write(struct file *file, const char __user *ubuf,
1123 size_t cnt, loff_t *ppos)
1124{
1125 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1126}
1127
1128static ssize_t
1129ftrace_notrace_write(struct file *file, const char __user *ubuf,
1130 size_t cnt, loff_t *ppos)
1131{
1132 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1133}
1134
1135static void
1136ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1137{
1138 if (unlikely(ftrace_disabled))
1139 return;
1140
1141 mutex_lock(&ftrace_regex_lock);
1142 if (reset)
1143 ftrace_filter_reset(enable);
1144 if (buf)
1145 ftrace_match(buf, len, enable);
1146 mutex_unlock(&ftrace_regex_lock);
1147}
1148
Steven Rostedt77a2b372008-05-12 21:20:45 +02001149/**
1150 * ftrace_set_filter - set a function to filter on in ftrace
1151 * @buf - the string that holds the function filter text.
1152 * @len - the length of the string.
1153 * @reset - non zero to reset all filters before applying this filter.
1154 *
1155 * Filters denote which functions should be enabled when tracing is enabled.
1156 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1157 */
Ingo Molnare309b412008-05-12 21:20:51 +02001158void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001159{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001160 ftrace_set_regex(buf, len, reset, 1);
1161}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001162
Steven Rostedt41c52c02008-05-22 11:46:33 -04001163/**
1164 * ftrace_set_notrace - set a function to not trace in ftrace
1165 * @buf - the string that holds the function notrace text.
1166 * @len - the length of the string.
1167 * @reset - non zero to reset all filters before applying this filter.
1168 *
1169 * Notrace Filters denote which functions should not be enabled when tracing
1170 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1171 * for tracing.
1172 */
1173void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1174{
1175 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001176}
1177
Ingo Molnare309b412008-05-12 21:20:51 +02001178static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001179ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001180{
1181 struct seq_file *m = (struct seq_file *)file->private_data;
1182 struct ftrace_iterator *iter;
1183
Steven Rostedt41c52c02008-05-22 11:46:33 -04001184 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001185 if (file->f_mode & FMODE_READ) {
1186 iter = m->private;
1187
1188 seq_release(inode, file);
1189 } else
1190 iter = file->private_data;
1191
1192 if (iter->buffer_idx) {
1193 iter->filtered++;
1194 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001195 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001196 }
1197
1198 mutex_lock(&ftrace_sysctl_lock);
1199 mutex_lock(&ftraced_lock);
1200 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1201 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1202 mutex_unlock(&ftraced_lock);
1203 mutex_unlock(&ftrace_sysctl_lock);
1204
1205 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001206 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001207 return 0;
1208}
1209
Steven Rostedt41c52c02008-05-22 11:46:33 -04001210static int
1211ftrace_filter_release(struct inode *inode, struct file *file)
1212{
1213 return ftrace_regex_release(inode, file, 1);
1214}
1215
1216static int
1217ftrace_notrace_release(struct inode *inode, struct file *file)
1218{
1219 return ftrace_regex_release(inode, file, 0);
1220}
1221
Steven Rostedt5072c592008-05-12 21:20:43 +02001222static struct file_operations ftrace_avail_fops = {
1223 .open = ftrace_avail_open,
1224 .read = seq_read,
1225 .llseek = seq_lseek,
1226 .release = ftrace_avail_release,
1227};
1228
1229static struct file_operations ftrace_filter_fops = {
1230 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001231 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001232 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001233 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001234 .release = ftrace_filter_release,
1235};
1236
Steven Rostedt41c52c02008-05-22 11:46:33 -04001237static struct file_operations ftrace_notrace_fops = {
1238 .open = ftrace_notrace_open,
1239 .read = ftrace_regex_read,
1240 .write = ftrace_notrace_write,
1241 .llseek = ftrace_regex_lseek,
1242 .release = ftrace_notrace_release,
1243};
1244
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001245/**
1246 * ftrace_force_update - force an update to all recording ftrace functions
1247 *
1248 * The ftrace dynamic update daemon only wakes up once a second.
1249 * There may be cases where an update needs to be done immediately
1250 * for tests or internal kernel tracing to begin. This function
1251 * wakes the daemon to do an update and will not return until the
1252 * update is complete.
1253 */
1254int ftrace_force_update(void)
1255{
1256 unsigned long last_counter;
1257 DECLARE_WAITQUEUE(wait, current);
1258 int ret = 0;
1259
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001260 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001261 return -ENODEV;
1262
1263 mutex_lock(&ftraced_lock);
1264 last_counter = ftraced_iteration_counter;
1265
1266 set_current_state(TASK_INTERRUPTIBLE);
1267 add_wait_queue(&ftraced_waiters, &wait);
1268
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001269 if (unlikely(!ftraced_task)) {
1270 ret = -ENODEV;
1271 goto out;
1272 }
1273
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001274 do {
1275 mutex_unlock(&ftraced_lock);
1276 wake_up_process(ftraced_task);
1277 schedule();
1278 mutex_lock(&ftraced_lock);
1279 if (signal_pending(current)) {
1280 ret = -EINTR;
1281 break;
1282 }
1283 set_current_state(TASK_INTERRUPTIBLE);
1284 } while (last_counter == ftraced_iteration_counter);
1285
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001286 out:
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001287 mutex_unlock(&ftraced_lock);
1288 remove_wait_queue(&ftraced_waiters, &wait);
1289 set_current_state(TASK_RUNNING);
1290
1291 return ret;
1292}
1293
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001294static void ftrace_force_shutdown(void)
1295{
1296 struct task_struct *task;
1297 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1298
1299 mutex_lock(&ftraced_lock);
1300 task = ftraced_task;
1301 ftraced_task = NULL;
1302 ftraced_suspend = -1;
1303 ftrace_run_update_code(command);
1304 mutex_unlock(&ftraced_lock);
1305
1306 if (task)
1307 kthread_stop(task);
1308}
1309
Steven Rostedt5072c592008-05-12 21:20:43 +02001310static __init int ftrace_init_debugfs(void)
1311{
1312 struct dentry *d_tracer;
1313 struct dentry *entry;
1314
1315 d_tracer = tracing_init_dentry();
1316
1317 entry = debugfs_create_file("available_filter_functions", 0444,
1318 d_tracer, NULL, &ftrace_avail_fops);
1319 if (!entry)
1320 pr_warning("Could not create debugfs "
1321 "'available_filter_functions' entry\n");
1322
1323 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1324 NULL, &ftrace_filter_fops);
1325 if (!entry)
1326 pr_warning("Could not create debugfs "
1327 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001328
1329 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1330 NULL, &ftrace_notrace_fops);
1331 if (!entry)
1332 pr_warning("Could not create debugfs "
1333 "'set_ftrace_notrace' entry\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02001334 return 0;
1335}
1336
1337fs_initcall(ftrace_init_debugfs);
1338
Ingo Molnare309b412008-05-12 21:20:51 +02001339static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001340{
1341 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001342 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001343 int ret;
1344
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001345 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001346
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001347 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1348
1349 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001350 if (addr) {
1351 ret = (int)addr;
1352 goto failed;
1353 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001354
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001355 ret = ftrace_dyn_table_alloc();
Steven Rostedt3d083392008-05-12 21:20:42 +02001356 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001357 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001358
1359 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001360 if (IS_ERR(p)) {
1361 ret = -1;
1362 goto failed;
1363 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001364
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001365 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001366 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001367
1368 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001369
1370 failed:
1371 ftrace_disabled = 1;
1372 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001373}
1374
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001375core_initcall(ftrace_dynamic_init);
Steven Rostedt3d083392008-05-12 21:20:42 +02001376#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001377# define ftrace_startup() do { } while (0)
1378# define ftrace_shutdown() do { } while (0)
1379# define ftrace_startup_sysctl() do { } while (0)
1380# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001381# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001382#endif /* CONFIG_DYNAMIC_FTRACE */
1383
1384/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001385 * ftrace_kill - totally shutdown ftrace
1386 *
1387 * This is a safety measure. If something was detected that seems
1388 * wrong, calling this function will keep ftrace from doing
1389 * any more modifications, and updates.
1390 * used when something went wrong.
1391 */
1392void ftrace_kill(void)
1393{
1394 mutex_lock(&ftrace_sysctl_lock);
1395 ftrace_disabled = 1;
1396 ftrace_enabled = 0;
1397
1398 clear_ftrace_function();
1399 mutex_unlock(&ftrace_sysctl_lock);
1400
1401 /* Try to totally disable ftrace */
1402 ftrace_force_shutdown();
1403}
1404
1405/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001406 * register_ftrace_function - register a function for profiling
1407 * @ops - ops structure that holds the function for profiling.
1408 *
1409 * Register a function to be called by all functions in the
1410 * kernel.
1411 *
1412 * Note: @ops->func and all the functions it calls must be labeled
1413 * with "notrace", otherwise it will go into a
1414 * recursive loop.
1415 */
1416int register_ftrace_function(struct ftrace_ops *ops)
1417{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001418 int ret;
1419
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001420 if (unlikely(ftrace_disabled))
1421 return -1;
1422
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001423 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001424 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001425 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001426 mutex_unlock(&ftrace_sysctl_lock);
1427
1428 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001429}
1430
1431/**
1432 * unregister_ftrace_function - unresgister a function for profiling.
1433 * @ops - ops structure that holds the function to unregister
1434 *
1435 * Unregister a function that was added to be called by ftrace profiling.
1436 */
1437int unregister_ftrace_function(struct ftrace_ops *ops)
1438{
1439 int ret;
1440
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001441 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001442 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001443 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001444 mutex_unlock(&ftrace_sysctl_lock);
1445
1446 return ret;
1447}
1448
Ingo Molnare309b412008-05-12 21:20:51 +02001449int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001450ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001451 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001452 loff_t *ppos)
1453{
1454 int ret;
1455
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001456 if (unlikely(ftrace_disabled))
1457 return -ENODEV;
1458
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001459 mutex_lock(&ftrace_sysctl_lock);
1460
Steven Rostedt5072c592008-05-12 21:20:43 +02001461 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001462
1463 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1464 goto out;
1465
1466 last_ftrace_enabled = ftrace_enabled;
1467
1468 if (ftrace_enabled) {
1469
1470 ftrace_startup_sysctl();
1471
1472 /* we are starting ftrace again */
1473 if (ftrace_list != &ftrace_list_end) {
1474 if (ftrace_list->next == &ftrace_list_end)
1475 ftrace_trace_function = ftrace_list->func;
1476 else
1477 ftrace_trace_function = ftrace_list_func;
1478 }
1479
1480 } else {
1481 /* stopping ftrace calls (just send to ftrace_stub) */
1482 ftrace_trace_function = ftrace_stub;
1483
1484 ftrace_shutdown_sysctl();
1485 }
1486
1487 out:
1488 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001489 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001490}