blob: 07b2a14943f8b3ee39d3487ae731e799c6d120e9 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/kthread.h>
22#include <linux/hardirq.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020023#include <linux/ftrace.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020025#include <linux/sysctl.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020026#include <linux/hash.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020028#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020029
Steven Rostedt3d083392008-05-12 21:20:42 +020030#include "trace.h"
31
Steven Rostedt4eebcc82008-05-12 21:20:48 +020032/* ftrace_enabled is a method to turn ftrace on or off */
33int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020034static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020035
Steven Rostedt4eebcc82008-05-12 21:20:48 +020036/*
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
39 */
40static int ftrace_disabled __read_mostly;
41
Steven Rostedt3d083392008-05-12 21:20:42 +020042static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020043static DEFINE_MUTEX(ftrace_sysctl_lock);
44
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020045static struct ftrace_ops ftrace_list_end __read_mostly =
46{
47 .func = ftrace_stub,
48};
49
50static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
53/* mcount is defined per arch in assembly */
54EXPORT_SYMBOL(mcount);
55
Ingo Molnare309b412008-05-12 21:20:51 +020056void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020057{
58 struct ftrace_ops *op = ftrace_list;
59
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
62
63 while (op != &ftrace_list_end) {
64 /* silly alpha */
65 read_barrier_depends();
66 op->func(ip, parent_ip);
67 op = op->next;
68 };
69}
70
71/**
Steven Rostedt3d083392008-05-12 21:20:42 +020072 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020073 *
Steven Rostedt3d083392008-05-12 21:20:42 +020074 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020076 */
Steven Rostedt3d083392008-05-12 21:20:42 +020077void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078{
Steven Rostedt3d083392008-05-12 21:20:42 +020079 ftrace_trace_function = ftrace_stub;
80}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020081
Ingo Molnare309b412008-05-12 21:20:51 +020082static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020083{
84 /* Should never be called by interrupts */
85 spin_lock(&ftrace_lock);
86
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020087 ops->next = ftrace_list;
88 /*
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
93 */
94 smp_wmb();
95 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020096
Steven Rostedtb0fc4942008-05-12 21:20:43 +020097 if (ftrace_enabled) {
98 /*
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
101 */
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
104 else
105 ftrace_trace_function = ftrace_list_func;
106 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200107
108 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200109
110 return 0;
111}
112
Ingo Molnare309b412008-05-12 21:20:51 +0200113static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200114{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115 struct ftrace_ops **p;
116 int ret = 0;
117
Steven Rostedt3d083392008-05-12 21:20:42 +0200118 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119
120 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200121 * If we are removing the last function, then simply point
122 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200123 */
124 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125 ftrace_trace_function = ftrace_stub;
126 ftrace_list = &ftrace_list_end;
127 goto out;
128 }
129
130 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131 if (*p == ops)
132 break;
133
134 if (*p != ops) {
135 ret = -1;
136 goto out;
137 }
138
139 *p = (*p)->next;
140
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200141 if (ftrace_enabled) {
142 /* If we only have one func left, then call that directly */
143 if (ftrace_list == &ftrace_list_end ||
144 ftrace_list->next == &ftrace_list_end)
145 ftrace_trace_function = ftrace_list->func;
146 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200147
148 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200149 spin_unlock(&ftrace_lock);
150
151 return ret;
152}
153
154#ifdef CONFIG_DYNAMIC_FTRACE
155
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200156static struct task_struct *ftraced_task;
157static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
158static unsigned long ftraced_iteration_counter;
159
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200160enum {
161 FTRACE_ENABLE_CALLS = (1 << 0),
162 FTRACE_DISABLE_CALLS = (1 << 1),
163 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
164 FTRACE_ENABLE_MCOUNT = (1 << 3),
165 FTRACE_DISABLE_MCOUNT = (1 << 4),
166};
167
Steven Rostedt5072c592008-05-12 21:20:43 +0200168static int ftrace_filtered;
169
Steven Rostedt3d083392008-05-12 21:20:42 +0200170static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200176static DEFINE_MUTEX(ftrace_filter_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200177
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200178struct ftrace_page {
179 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700180 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200181 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700182};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200183
184#define ENTRIES_PER_PAGE \
185 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187/* estimate from running different kernels */
188#define NR_TO_INIT 10000
189
190static struct ftrace_page *ftrace_pages_start;
191static struct ftrace_page *ftrace_pages;
192
Steven Rostedt3d083392008-05-12 21:20:42 +0200193static int ftraced_trigger;
194static int ftraced_suspend;
195
196static int ftrace_record_suspend;
197
Steven Rostedt37ad5082008-05-12 21:20:48 +0200198static struct dyn_ftrace *ftrace_free_records;
199
Ingo Molnare309b412008-05-12 21:20:51 +0200200static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200201ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200202{
203 struct dyn_ftrace *p;
204 struct hlist_node *t;
205 int found = 0;
206
207 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
208 if (p->ip == ip) {
209 found = 1;
210 break;
211 }
212 }
213
214 return found;
215}
216
Ingo Molnare309b412008-05-12 21:20:51 +0200217static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200218ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
219{
220 hlist_add_head(&node->node, &ftrace_hash[key]);
221}
222
Ingo Molnare309b412008-05-12 21:20:51 +0200223static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200224{
225 /* no locking, only called from kstop_machine */
226
227 rec->ip = (unsigned long)ftrace_free_records;
228 ftrace_free_records = rec;
229 rec->flags |= FTRACE_FL_FREE;
230}
231
Ingo Molnare309b412008-05-12 21:20:51 +0200232static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200233{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200234 struct dyn_ftrace *rec;
235
236 /* First check for freed records */
237 if (ftrace_free_records) {
238 rec = ftrace_free_records;
239
Steven Rostedt37ad5082008-05-12 21:20:48 +0200240 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
241 WARN_ON_ONCE(1);
242 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200243 ftrace_disabled = 1;
244 ftrace_enabled = 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200245 return NULL;
246 }
247
248 ftrace_free_records = (void *)rec->ip;
249 memset(rec, 0, sizeof(*rec));
250 return rec;
251 }
252
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200253 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
254 if (!ftrace_pages->next)
255 return NULL;
256 ftrace_pages = ftrace_pages->next;
257 }
258
259 return &ftrace_pages->records[ftrace_pages->index++];
260}
261
Ingo Molnare309b412008-05-12 21:20:51 +0200262static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200263ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200264{
265 struct dyn_ftrace *node;
266 unsigned long flags;
267 unsigned long key;
268 int resched;
269 int atomic;
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200270 int cpu;
Steven Rostedt3d083392008-05-12 21:20:42 +0200271
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200272 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200273 return;
274
Steven Rostedt3d083392008-05-12 21:20:42 +0200275 resched = need_resched();
276 preempt_disable_notrace();
277
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200278 /*
279 * We simply need to protect against recursion.
280 * Use the the raw version of smp_processor_id and not
281 * __get_cpu_var which can call debug hooks that can
282 * cause a recursive crash here.
283 */
284 cpu = raw_smp_processor_id();
285 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
286 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
Steven Rostedt3d083392008-05-12 21:20:42 +0200287 goto out;
288
289 if (unlikely(ftrace_record_suspend))
290 goto out;
291
292 key = hash_long(ip, FTRACE_HASHBITS);
293
294 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
295
296 if (ftrace_ip_in_hash(ip, key))
297 goto out;
298
299 atomic = irqs_disabled();
300
301 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
302
303 /* This ip may have hit the hash before the lock */
304 if (ftrace_ip_in_hash(ip, key))
305 goto out_unlock;
306
307 /*
308 * There's a slight race that the ftraced will update the
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200309 * hash and reset here. If it is already converted, skip it.
Steven Rostedt3d083392008-05-12 21:20:42 +0200310 */
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200311 if (ftrace_ip_converted(ip))
312 goto out_unlock;
313
314 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200315 if (!node)
316 goto out_unlock;
317
318 node->ip = ip;
319
320 ftrace_add_hash(node, key);
321
322 ftraced_trigger = 1;
323
324 out_unlock:
325 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
326 out:
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200327 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
Steven Rostedt3d083392008-05-12 21:20:42 +0200328
329 /* prevent recursion with scheduler */
330 if (resched)
331 preempt_enable_no_resched_notrace();
332 else
333 preempt_enable_notrace();
334}
335
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200336#define FTRACE_ADDR ((long)(ftrace_caller))
337#define MCOUNT_ADDR ((long)(mcount))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200338
Ingo Molnare309b412008-05-12 21:20:51 +0200339static void
Steven Rostedt5072c592008-05-12 21:20:43 +0200340__ftrace_replace_code(struct dyn_ftrace *rec,
341 unsigned char *old, unsigned char *new, int enable)
342{
343 unsigned long ip;
344 int failed;
345
346 ip = rec->ip;
347
348 if (ftrace_filtered && enable) {
349 unsigned long fl;
350 /*
351 * If filtering is on:
352 *
353 * If this record is set to be filtered and
354 * is enabled then do nothing.
355 *
356 * If this record is set to be filtered and
357 * it is not enabled, enable it.
358 *
359 * If this record is not set to be filtered
360 * and it is not enabled do nothing.
361 *
362 * If this record is not set to be filtered and
363 * it is enabled, disable it.
364 */
365 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
366
367 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
368 (fl == 0))
369 return;
370
371 /*
372 * If it is enabled disable it,
373 * otherwise enable it!
374 */
375 if (fl == FTRACE_FL_ENABLED) {
376 /* swap new and old */
377 new = old;
378 old = ftrace_call_replace(ip, FTRACE_ADDR);
379 rec->flags &= ~FTRACE_FL_ENABLED;
380 } else {
381 new = ftrace_call_replace(ip, FTRACE_ADDR);
382 rec->flags |= FTRACE_FL_ENABLED;
383 }
384 } else {
385
386 if (enable)
387 new = ftrace_call_replace(ip, FTRACE_ADDR);
388 else
389 old = ftrace_call_replace(ip, FTRACE_ADDR);
390
391 if (enable) {
392 if (rec->flags & FTRACE_FL_ENABLED)
393 return;
394 rec->flags |= FTRACE_FL_ENABLED;
395 } else {
396 if (!(rec->flags & FTRACE_FL_ENABLED))
397 return;
398 rec->flags &= ~FTRACE_FL_ENABLED;
399 }
400 }
401
402 failed = ftrace_modify_code(ip, old, new);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200403 if (failed) {
404 unsigned long key;
405 /* It is possible that the function hasn't been converted yet */
406 key = hash_long(ip, FTRACE_HASHBITS);
407 if (!ftrace_ip_in_hash(ip, key)) {
408 rec->flags |= FTRACE_FL_FAILED;
409 ftrace_free_rec(rec);
410 }
411
412 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200413}
414
Ingo Molnare309b412008-05-12 21:20:51 +0200415static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200416{
417 unsigned char *new = NULL, *old = NULL;
418 struct dyn_ftrace *rec;
419 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200420 int i;
421
Steven Rostedt5072c592008-05-12 21:20:43 +0200422 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200423 old = ftrace_nop_replace();
424 else
425 new = ftrace_nop_replace();
426
427 for (pg = ftrace_pages_start; pg; pg = pg->next) {
428 for (i = 0; i < pg->index; i++) {
429 rec = &pg->records[i];
430
431 /* don't modify code that has already faulted */
432 if (rec->flags & FTRACE_FL_FAILED)
433 continue;
434
Steven Rostedt5072c592008-05-12 21:20:43 +0200435 __ftrace_replace_code(rec, old, new, enable);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200436 }
437 }
438}
439
Ingo Molnare309b412008-05-12 21:20:51 +0200440static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200441{
442 if (ftrace_pages->next)
443 return;
444
445 /* allocate another page */
446 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
447}
Steven Rostedt3d083392008-05-12 21:20:42 +0200448
Ingo Molnare309b412008-05-12 21:20:51 +0200449static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200450ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200451{
452 unsigned long ip;
453 unsigned char *nop, *call;
454 int failed;
455
456 ip = rec->ip;
457
458 nop = ftrace_nop_replace();
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200459 call = ftrace_call_replace(ip, MCOUNT_ADDR);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200460
461 failed = ftrace_modify_code(ip, call, nop);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200462 if (failed) {
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200463 rec->flags |= FTRACE_FL_FAILED;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200464 ftrace_free_rec(rec);
465 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200466}
467
Ingo Molnare309b412008-05-12 21:20:51 +0200468static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200469{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200470 unsigned long addr;
471 int *command = data;
472
473 if (*command & FTRACE_ENABLE_CALLS)
474 ftrace_replace_code(1);
475 else if (*command & FTRACE_DISABLE_CALLS)
476 ftrace_replace_code(0);
477
478 if (*command & FTRACE_UPDATE_TRACE_FUNC)
479 ftrace_update_ftrace_func(ftrace_trace_function);
480
481 if (*command & FTRACE_ENABLE_MCOUNT) {
482 addr = (unsigned long)ftrace_record_ip;
483 ftrace_mcount_set(&addr);
484 } else if (*command & FTRACE_DISABLE_MCOUNT) {
485 addr = (unsigned long)ftrace_stub;
486 ftrace_mcount_set(&addr);
487 }
488
489 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200490}
491
Ingo Molnare309b412008-05-12 21:20:51 +0200492static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200493{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200494 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
Steven Rostedt3d083392008-05-12 21:20:42 +0200495}
496
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200497static ftrace_func_t saved_ftrace_func;
498
Ingo Molnare309b412008-05-12 21:20:51 +0200499static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200500{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200501 int command = 0;
502
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200503 if (unlikely(ftrace_disabled))
504 return;
505
Steven Rostedt3d083392008-05-12 21:20:42 +0200506 mutex_lock(&ftraced_lock);
507 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200508 if (ftraced_suspend == 1)
509 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200510
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200511 if (saved_ftrace_func != ftrace_trace_function) {
512 saved_ftrace_func = ftrace_trace_function;
513 command |= FTRACE_UPDATE_TRACE_FUNC;
514 }
515
516 if (!command || !ftrace_enabled)
517 goto out;
518
519 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200520 out:
521 mutex_unlock(&ftraced_lock);
522}
523
Ingo Molnare309b412008-05-12 21:20:51 +0200524static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200525{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200526 int command = 0;
527
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200528 if (unlikely(ftrace_disabled))
529 return;
530
Steven Rostedt3d083392008-05-12 21:20:42 +0200531 mutex_lock(&ftraced_lock);
532 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200533 if (!ftraced_suspend)
534 command |= FTRACE_DISABLE_CALLS;
535
536 if (saved_ftrace_func != ftrace_trace_function) {
537 saved_ftrace_func = ftrace_trace_function;
538 command |= FTRACE_UPDATE_TRACE_FUNC;
539 }
540
541 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200542 goto out;
543
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200544 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200545 out:
546 mutex_unlock(&ftraced_lock);
547}
548
Ingo Molnare309b412008-05-12 21:20:51 +0200549static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200550{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200551 int command = FTRACE_ENABLE_MCOUNT;
552
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200553 if (unlikely(ftrace_disabled))
554 return;
555
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200556 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200557 /* Force update next time */
558 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200559 /* ftraced_suspend is true if we want ftrace running */
560 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200561 command |= FTRACE_ENABLE_CALLS;
562
563 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200564 mutex_unlock(&ftraced_lock);
565}
566
Ingo Molnare309b412008-05-12 21:20:51 +0200567static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200568{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200569 int command = FTRACE_DISABLE_MCOUNT;
570
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200571 if (unlikely(ftrace_disabled))
572 return;
573
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200574 mutex_lock(&ftraced_lock);
575 /* ftraced_suspend is true if ftrace is running */
576 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200577 command |= FTRACE_DISABLE_CALLS;
578
579 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200580 mutex_unlock(&ftraced_lock);
581}
582
Steven Rostedt3d083392008-05-12 21:20:42 +0200583static cycle_t ftrace_update_time;
584static unsigned long ftrace_update_cnt;
585unsigned long ftrace_update_tot_cnt;
586
Ingo Molnare309b412008-05-12 21:20:51 +0200587static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200588{
589 struct dyn_ftrace *p;
590 struct hlist_head head;
591 struct hlist_node *t;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200592 int save_ftrace_enabled;
Steven Rostedt3d083392008-05-12 21:20:42 +0200593 cycle_t start, stop;
594 int i;
595
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200596 /* Don't be recording funcs now */
597 save_ftrace_enabled = ftrace_enabled;
598 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200599
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200600 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200601 ftrace_update_cnt = 0;
602
603 /* No locks needed, the machine is stopped! */
604 for (i = 0; i < FTRACE_HASHSIZE; i++) {
605 if (hlist_empty(&ftrace_hash[i]))
606 continue;
607
608 head = ftrace_hash[i];
609 INIT_HLIST_HEAD(&ftrace_hash[i]);
610
611 /* all CPUS are stopped, we are safe to modify code */
612 hlist_for_each_entry(p, t, &head, node) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200613 ftrace_code_disable(p);
Steven Rostedt3d083392008-05-12 21:20:42 +0200614 ftrace_update_cnt++;
615 }
616
617 }
618
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200619 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200620 ftrace_update_time = stop - start;
621 ftrace_update_tot_cnt += ftrace_update_cnt;
622
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200623 ftrace_enabled = save_ftrace_enabled;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200624
625 return 0;
626}
627
Ingo Molnare309b412008-05-12 21:20:51 +0200628static void ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200629{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200630 if (unlikely(ftrace_disabled))
631 return;
632
Steven Rostedt3d083392008-05-12 21:20:42 +0200633 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
634}
635
Ingo Molnare309b412008-05-12 21:20:51 +0200636static int ftraced(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200637{
638 unsigned long usecs;
639
Steven Rostedt3d083392008-05-12 21:20:42 +0200640 while (!kthread_should_stop()) {
641
Steven Rostedt07a267c2008-05-12 21:20:55 +0200642 set_current_state(TASK_INTERRUPTIBLE);
643
Steven Rostedt3d083392008-05-12 21:20:42 +0200644 /* check once a second */
645 schedule_timeout(HZ);
646
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200647 if (unlikely(ftrace_disabled))
648 continue;
649
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200650 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200651 mutex_lock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200652 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200653 ftrace_record_suspend++;
654 ftrace_update_code();
655 usecs = nsecs_to_usecs(ftrace_update_time);
656 if (ftrace_update_tot_cnt > 100000) {
657 ftrace_update_tot_cnt = 0;
658 pr_info("hm, dftrace overflow: %lu change%s"
659 " (%lu total) in %lu usec%s\n",
660 ftrace_update_cnt,
661 ftrace_update_cnt != 1 ? "s" : "",
662 ftrace_update_tot_cnt,
663 usecs, usecs != 1 ? "s" : "");
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200664 ftrace_disabled = 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200665 WARN_ON_ONCE(1);
666 }
667 ftraced_trigger = 0;
668 ftrace_record_suspend--;
669 }
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200670 ftraced_iteration_counter++;
Steven Rostedt3d083392008-05-12 21:20:42 +0200671 mutex_unlock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200672 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200673
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200674 wake_up_interruptible(&ftraced_waiters);
675
Steven Rostedt3d083392008-05-12 21:20:42 +0200676 ftrace_shutdown_replenish();
Steven Rostedt3d083392008-05-12 21:20:42 +0200677 }
678 __set_current_state(TASK_RUNNING);
679 return 0;
680}
681
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200682static int __init ftrace_dyn_table_alloc(void)
683{
684 struct ftrace_page *pg;
685 int cnt;
686 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200687
688 /* allocate a few pages */
689 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
690 if (!ftrace_pages_start)
691 return -1;
692
693 /*
694 * Allocate a few more pages.
695 *
696 * TODO: have some parser search vmlinux before
697 * final linking to find all calls to ftrace.
698 * Then we can:
699 * a) know how many pages to allocate.
700 * and/or
701 * b) set up the table then.
702 *
703 * The dynamic code is still necessary for
704 * modules.
705 */
706
707 pg = ftrace_pages = ftrace_pages_start;
708
709 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
710
711 for (i = 0; i < cnt; i++) {
712 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
713
714 /* If we fail, we'll try later anyway */
715 if (!pg->next)
716 break;
717
718 pg = pg->next;
719 }
720
721 return 0;
722}
723
Steven Rostedt5072c592008-05-12 21:20:43 +0200724enum {
725 FTRACE_ITER_FILTER = (1 << 0),
726 FTRACE_ITER_CONT = (1 << 1),
727};
728
729#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
730
731struct ftrace_iterator {
732 loff_t pos;
733 struct ftrace_page *pg;
734 unsigned idx;
735 unsigned flags;
736 unsigned char buffer[FTRACE_BUFF_MAX+1];
737 unsigned buffer_idx;
738 unsigned filtered;
739};
740
Ingo Molnare309b412008-05-12 21:20:51 +0200741static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200742t_next(struct seq_file *m, void *v, loff_t *pos)
743{
744 struct ftrace_iterator *iter = m->private;
745 struct dyn_ftrace *rec = NULL;
746
747 (*pos)++;
748
749 retry:
750 if (iter->idx >= iter->pg->index) {
751 if (iter->pg->next) {
752 iter->pg = iter->pg->next;
753 iter->idx = 0;
754 goto retry;
755 }
756 } else {
757 rec = &iter->pg->records[iter->idx++];
758 if ((rec->flags & FTRACE_FL_FAILED) ||
759 ((iter->flags & FTRACE_ITER_FILTER) &&
760 !(rec->flags & FTRACE_FL_FILTER))) {
761 rec = NULL;
762 goto retry;
763 }
764 }
765
766 iter->pos = *pos;
767
768 return rec;
769}
770
771static void *t_start(struct seq_file *m, loff_t *pos)
772{
773 struct ftrace_iterator *iter = m->private;
774 void *p = NULL;
775 loff_t l = -1;
776
777 if (*pos != iter->pos) {
778 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
779 ;
780 } else {
781 l = *pos;
782 p = t_next(m, p, &l);
783 }
784
785 return p;
786}
787
788static void t_stop(struct seq_file *m, void *p)
789{
790}
791
792static int t_show(struct seq_file *m, void *v)
793{
794 struct dyn_ftrace *rec = v;
795 char str[KSYM_SYMBOL_LEN];
796
797 if (!rec)
798 return 0;
799
800 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
801
802 seq_printf(m, "%s\n", str);
803
804 return 0;
805}
806
807static struct seq_operations show_ftrace_seq_ops = {
808 .start = t_start,
809 .next = t_next,
810 .stop = t_stop,
811 .show = t_show,
812};
813
Ingo Molnare309b412008-05-12 21:20:51 +0200814static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200815ftrace_avail_open(struct inode *inode, struct file *file)
816{
817 struct ftrace_iterator *iter;
818 int ret;
819
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200820 if (unlikely(ftrace_disabled))
821 return -ENODEV;
822
Steven Rostedt5072c592008-05-12 21:20:43 +0200823 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
824 if (!iter)
825 return -ENOMEM;
826
827 iter->pg = ftrace_pages_start;
828 iter->pos = -1;
829
830 ret = seq_open(file, &show_ftrace_seq_ops);
831 if (!ret) {
832 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200833
Steven Rostedt5072c592008-05-12 21:20:43 +0200834 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200835 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200836 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200837 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200838
839 return ret;
840}
841
842int ftrace_avail_release(struct inode *inode, struct file *file)
843{
844 struct seq_file *m = (struct seq_file *)file->private_data;
845 struct ftrace_iterator *iter = m->private;
846
847 seq_release(inode, file);
848 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200849
Steven Rostedt5072c592008-05-12 21:20:43 +0200850 return 0;
851}
852
Ingo Molnare309b412008-05-12 21:20:51 +0200853static void ftrace_filter_reset(void)
Steven Rostedt5072c592008-05-12 21:20:43 +0200854{
855 struct ftrace_page *pg;
856 struct dyn_ftrace *rec;
857 unsigned i;
858
859 /* keep kstop machine from running */
860 preempt_disable();
861 ftrace_filtered = 0;
862 pg = ftrace_pages_start;
863 while (pg) {
864 for (i = 0; i < pg->index; i++) {
865 rec = &pg->records[i];
866 if (rec->flags & FTRACE_FL_FAILED)
867 continue;
868 rec->flags &= ~FTRACE_FL_FILTER;
869 }
870 pg = pg->next;
871 }
872 preempt_enable();
873}
874
Ingo Molnare309b412008-05-12 21:20:51 +0200875static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200876ftrace_filter_open(struct inode *inode, struct file *file)
877{
878 struct ftrace_iterator *iter;
879 int ret = 0;
880
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200881 if (unlikely(ftrace_disabled))
882 return -ENODEV;
883
Steven Rostedt5072c592008-05-12 21:20:43 +0200884 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
885 if (!iter)
886 return -ENOMEM;
887
888 mutex_lock(&ftrace_filter_lock);
889 if ((file->f_mode & FMODE_WRITE) &&
890 !(file->f_flags & O_APPEND))
891 ftrace_filter_reset();
892
893 if (file->f_mode & FMODE_READ) {
894 iter->pg = ftrace_pages_start;
895 iter->pos = -1;
896 iter->flags = FTRACE_ITER_FILTER;
897
898 ret = seq_open(file, &show_ftrace_seq_ops);
899 if (!ret) {
900 struct seq_file *m = file->private_data;
901 m->private = iter;
902 } else
903 kfree(iter);
904 } else
905 file->private_data = iter;
906 mutex_unlock(&ftrace_filter_lock);
907
908 return ret;
909}
910
Ingo Molnare309b412008-05-12 21:20:51 +0200911static ssize_t
Steven Rostedt5072c592008-05-12 21:20:43 +0200912ftrace_filter_read(struct file *file, char __user *ubuf,
913 size_t cnt, loff_t *ppos)
914{
915 if (file->f_mode & FMODE_READ)
916 return seq_read(file, ubuf, cnt, ppos);
917 else
918 return -EPERM;
919}
920
Ingo Molnare309b412008-05-12 21:20:51 +0200921static loff_t
Steven Rostedt5072c592008-05-12 21:20:43 +0200922ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
923{
924 loff_t ret;
925
926 if (file->f_mode & FMODE_READ)
927 ret = seq_lseek(file, offset, origin);
928 else
929 file->f_pos = ret = 1;
930
931 return ret;
932}
933
934enum {
935 MATCH_FULL,
936 MATCH_FRONT_ONLY,
937 MATCH_MIDDLE_ONLY,
938 MATCH_END_ONLY,
939};
940
Ingo Molnare309b412008-05-12 21:20:51 +0200941static void
Steven Rostedt5072c592008-05-12 21:20:43 +0200942ftrace_match(unsigned char *buff, int len)
943{
944 char str[KSYM_SYMBOL_LEN];
945 char *search = NULL;
946 struct ftrace_page *pg;
947 struct dyn_ftrace *rec;
948 int type = MATCH_FULL;
949 unsigned i, match = 0, search_len = 0;
950
951 for (i = 0; i < len; i++) {
952 if (buff[i] == '*') {
953 if (!i) {
954 search = buff + i + 1;
955 type = MATCH_END_ONLY;
956 search_len = len - (i + 1);
957 } else {
958 if (type == MATCH_END_ONLY) {
959 type = MATCH_MIDDLE_ONLY;
960 } else {
961 match = i;
962 type = MATCH_FRONT_ONLY;
963 }
964 buff[i] = 0;
965 break;
966 }
967 }
968 }
969
970 /* keep kstop machine from running */
971 preempt_disable();
972 ftrace_filtered = 1;
973 pg = ftrace_pages_start;
974 while (pg) {
975 for (i = 0; i < pg->index; i++) {
976 int matched = 0;
977 char *ptr;
978
979 rec = &pg->records[i];
980 if (rec->flags & FTRACE_FL_FAILED)
981 continue;
982 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
983 switch (type) {
984 case MATCH_FULL:
985 if (strcmp(str, buff) == 0)
986 matched = 1;
987 break;
988 case MATCH_FRONT_ONLY:
989 if (memcmp(str, buff, match) == 0)
990 matched = 1;
991 break;
992 case MATCH_MIDDLE_ONLY:
993 if (strstr(str, search))
994 matched = 1;
995 break;
996 case MATCH_END_ONLY:
997 ptr = strstr(str, search);
998 if (ptr && (ptr[search_len] == 0))
999 matched = 1;
1000 break;
1001 }
1002 if (matched)
1003 rec->flags |= FTRACE_FL_FILTER;
1004 }
1005 pg = pg->next;
1006 }
1007 preempt_enable();
1008}
1009
Ingo Molnare309b412008-05-12 21:20:51 +02001010static ssize_t
Steven Rostedt5072c592008-05-12 21:20:43 +02001011ftrace_filter_write(struct file *file, const char __user *ubuf,
1012 size_t cnt, loff_t *ppos)
1013{
1014 struct ftrace_iterator *iter;
1015 char ch;
1016 size_t read = 0;
1017 ssize_t ret;
1018
1019 if (!cnt || cnt < 0)
1020 return 0;
1021
1022 mutex_lock(&ftrace_filter_lock);
1023
1024 if (file->f_mode & FMODE_READ) {
1025 struct seq_file *m = file->private_data;
1026 iter = m->private;
1027 } else
1028 iter = file->private_data;
1029
1030 if (!*ppos) {
1031 iter->flags &= ~FTRACE_ITER_CONT;
1032 iter->buffer_idx = 0;
1033 }
1034
1035 ret = get_user(ch, ubuf++);
1036 if (ret)
1037 goto out;
1038 read++;
1039 cnt--;
1040
1041 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1042 /* skip white space */
1043 while (cnt && isspace(ch)) {
1044 ret = get_user(ch, ubuf++);
1045 if (ret)
1046 goto out;
1047 read++;
1048 cnt--;
1049 }
1050
1051
1052 if (isspace(ch)) {
1053 file->f_pos += read;
1054 ret = read;
1055 goto out;
1056 }
1057
1058 iter->buffer_idx = 0;
1059 }
1060
1061 while (cnt && !isspace(ch)) {
1062 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1063 iter->buffer[iter->buffer_idx++] = ch;
1064 else {
1065 ret = -EINVAL;
1066 goto out;
1067 }
1068 ret = get_user(ch, ubuf++);
1069 if (ret)
1070 goto out;
1071 read++;
1072 cnt--;
1073 }
1074
1075 if (isspace(ch)) {
1076 iter->filtered++;
1077 iter->buffer[iter->buffer_idx] = 0;
1078 ftrace_match(iter->buffer, iter->buffer_idx);
1079 iter->buffer_idx = 0;
1080 } else
1081 iter->flags |= FTRACE_ITER_CONT;
1082
1083
1084 file->f_pos += read;
1085
1086 ret = read;
1087 out:
1088 mutex_unlock(&ftrace_filter_lock);
1089
1090 return ret;
1091}
1092
Steven Rostedt77a2b372008-05-12 21:20:45 +02001093/**
1094 * ftrace_set_filter - set a function to filter on in ftrace
1095 * @buf - the string that holds the function filter text.
1096 * @len - the length of the string.
1097 * @reset - non zero to reset all filters before applying this filter.
1098 *
1099 * Filters denote which functions should be enabled when tracing is enabled.
1100 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1101 */
Ingo Molnare309b412008-05-12 21:20:51 +02001102void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001103{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001104 if (unlikely(ftrace_disabled))
1105 return;
1106
Steven Rostedt77a2b372008-05-12 21:20:45 +02001107 mutex_lock(&ftrace_filter_lock);
1108 if (reset)
1109 ftrace_filter_reset();
1110 if (buf)
1111 ftrace_match(buf, len);
1112 mutex_unlock(&ftrace_filter_lock);
1113}
1114
Ingo Molnare309b412008-05-12 21:20:51 +02001115static int
Steven Rostedt5072c592008-05-12 21:20:43 +02001116ftrace_filter_release(struct inode *inode, struct file *file)
1117{
1118 struct seq_file *m = (struct seq_file *)file->private_data;
1119 struct ftrace_iterator *iter;
1120
1121 mutex_lock(&ftrace_filter_lock);
1122 if (file->f_mode & FMODE_READ) {
1123 iter = m->private;
1124
1125 seq_release(inode, file);
1126 } else
1127 iter = file->private_data;
1128
1129 if (iter->buffer_idx) {
1130 iter->filtered++;
1131 iter->buffer[iter->buffer_idx] = 0;
1132 ftrace_match(iter->buffer, iter->buffer_idx);
1133 }
1134
1135 mutex_lock(&ftrace_sysctl_lock);
1136 mutex_lock(&ftraced_lock);
1137 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1138 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1139 mutex_unlock(&ftraced_lock);
1140 mutex_unlock(&ftrace_sysctl_lock);
1141
1142 kfree(iter);
1143 mutex_unlock(&ftrace_filter_lock);
1144 return 0;
1145}
1146
1147static struct file_operations ftrace_avail_fops = {
1148 .open = ftrace_avail_open,
1149 .read = seq_read,
1150 .llseek = seq_lseek,
1151 .release = ftrace_avail_release,
1152};
1153
1154static struct file_operations ftrace_filter_fops = {
1155 .open = ftrace_filter_open,
1156 .read = ftrace_filter_read,
1157 .write = ftrace_filter_write,
1158 .llseek = ftrace_filter_lseek,
1159 .release = ftrace_filter_release,
1160};
1161
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001162/**
1163 * ftrace_force_update - force an update to all recording ftrace functions
1164 *
1165 * The ftrace dynamic update daemon only wakes up once a second.
1166 * There may be cases where an update needs to be done immediately
1167 * for tests or internal kernel tracing to begin. This function
1168 * wakes the daemon to do an update and will not return until the
1169 * update is complete.
1170 */
1171int ftrace_force_update(void)
1172{
1173 unsigned long last_counter;
1174 DECLARE_WAITQUEUE(wait, current);
1175 int ret = 0;
1176
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001177 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001178 return -ENODEV;
1179
1180 mutex_lock(&ftraced_lock);
1181 last_counter = ftraced_iteration_counter;
1182
1183 set_current_state(TASK_INTERRUPTIBLE);
1184 add_wait_queue(&ftraced_waiters, &wait);
1185
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001186 if (unlikely(!ftraced_task)) {
1187 ret = -ENODEV;
1188 goto out;
1189 }
1190
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001191 do {
1192 mutex_unlock(&ftraced_lock);
1193 wake_up_process(ftraced_task);
1194 schedule();
1195 mutex_lock(&ftraced_lock);
1196 if (signal_pending(current)) {
1197 ret = -EINTR;
1198 break;
1199 }
1200 set_current_state(TASK_INTERRUPTIBLE);
1201 } while (last_counter == ftraced_iteration_counter);
1202
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001203 out:
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001204 mutex_unlock(&ftraced_lock);
1205 remove_wait_queue(&ftraced_waiters, &wait);
1206 set_current_state(TASK_RUNNING);
1207
1208 return ret;
1209}
1210
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001211static void ftrace_force_shutdown(void)
1212{
1213 struct task_struct *task;
1214 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1215
1216 mutex_lock(&ftraced_lock);
1217 task = ftraced_task;
1218 ftraced_task = NULL;
1219 ftraced_suspend = -1;
1220 ftrace_run_update_code(command);
1221 mutex_unlock(&ftraced_lock);
1222
1223 if (task)
1224 kthread_stop(task);
1225}
1226
Steven Rostedt5072c592008-05-12 21:20:43 +02001227static __init int ftrace_init_debugfs(void)
1228{
1229 struct dentry *d_tracer;
1230 struct dentry *entry;
1231
1232 d_tracer = tracing_init_dentry();
1233
1234 entry = debugfs_create_file("available_filter_functions", 0444,
1235 d_tracer, NULL, &ftrace_avail_fops);
1236 if (!entry)
1237 pr_warning("Could not create debugfs "
1238 "'available_filter_functions' entry\n");
1239
1240 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1241 NULL, &ftrace_filter_fops);
1242 if (!entry)
1243 pr_warning("Could not create debugfs "
1244 "'set_ftrace_filter' entry\n");
1245 return 0;
1246}
1247
1248fs_initcall(ftrace_init_debugfs);
1249
Ingo Molnare309b412008-05-12 21:20:51 +02001250static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001251{
1252 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001253 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001254 int ret;
1255
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001256 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001257
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001258 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1259
1260 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001261 if (addr) {
1262 ret = (int)addr;
1263 goto failed;
1264 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001265
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001266 ret = ftrace_dyn_table_alloc();
Steven Rostedt3d083392008-05-12 21:20:42 +02001267 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001268 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001269
1270 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001271 if (IS_ERR(p)) {
1272 ret = -1;
1273 goto failed;
1274 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001275
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001276 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001277 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001278
1279 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001280
1281 failed:
1282 ftrace_disabled = 1;
1283 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001284}
1285
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001286core_initcall(ftrace_dynamic_init);
Steven Rostedt3d083392008-05-12 21:20:42 +02001287#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001288# define ftrace_startup() do { } while (0)
1289# define ftrace_shutdown() do { } while (0)
1290# define ftrace_startup_sysctl() do { } while (0)
1291# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001292# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001293#endif /* CONFIG_DYNAMIC_FTRACE */
1294
1295/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001296 * ftrace_kill - totally shutdown ftrace
1297 *
1298 * This is a safety measure. If something was detected that seems
1299 * wrong, calling this function will keep ftrace from doing
1300 * any more modifications, and updates.
1301 * used when something went wrong.
1302 */
1303void ftrace_kill(void)
1304{
1305 mutex_lock(&ftrace_sysctl_lock);
1306 ftrace_disabled = 1;
1307 ftrace_enabled = 0;
1308
1309 clear_ftrace_function();
1310 mutex_unlock(&ftrace_sysctl_lock);
1311
1312 /* Try to totally disable ftrace */
1313 ftrace_force_shutdown();
1314}
1315
1316/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001317 * register_ftrace_function - register a function for profiling
1318 * @ops - ops structure that holds the function for profiling.
1319 *
1320 * Register a function to be called by all functions in the
1321 * kernel.
1322 *
1323 * Note: @ops->func and all the functions it calls must be labeled
1324 * with "notrace", otherwise it will go into a
1325 * recursive loop.
1326 */
1327int register_ftrace_function(struct ftrace_ops *ops)
1328{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001329 int ret;
1330
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001331 if (unlikely(ftrace_disabled))
1332 return -1;
1333
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001334 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001335 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001336 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001337 mutex_unlock(&ftrace_sysctl_lock);
1338
1339 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001340}
1341
1342/**
1343 * unregister_ftrace_function - unresgister a function for profiling.
1344 * @ops - ops structure that holds the function to unregister
1345 *
1346 * Unregister a function that was added to be called by ftrace profiling.
1347 */
1348int unregister_ftrace_function(struct ftrace_ops *ops)
1349{
1350 int ret;
1351
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001352 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001353 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001354 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001355 mutex_unlock(&ftrace_sysctl_lock);
1356
1357 return ret;
1358}
1359
Ingo Molnare309b412008-05-12 21:20:51 +02001360int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001361ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001362 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001363 loff_t *ppos)
1364{
1365 int ret;
1366
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001367 if (unlikely(ftrace_disabled))
1368 return -ENODEV;
1369
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001370 mutex_lock(&ftrace_sysctl_lock);
1371
Steven Rostedt5072c592008-05-12 21:20:43 +02001372 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001373
1374 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1375 goto out;
1376
1377 last_ftrace_enabled = ftrace_enabled;
1378
1379 if (ftrace_enabled) {
1380
1381 ftrace_startup_sysctl();
1382
1383 /* we are starting ftrace again */
1384 if (ftrace_list != &ftrace_list_end) {
1385 if (ftrace_list->next == &ftrace_list_end)
1386 ftrace_trace_function = ftrace_list->func;
1387 else
1388 ftrace_trace_function = ftrace_list_func;
1389 }
1390
1391 } else {
1392 /* stopping ftrace calls (just send to ftrace_stub) */
1393 ftrace_trace_function = ftrace_stub;
1394
1395 ftrace_shutdown_sysctl();
1396 }
1397
1398 out:
1399 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001400 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001401}