blob: 43665add980586d13f30559f406e0a8c006951dc [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053024#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010028#include <linux/hash.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020030
Abhishek Sagar395a59d2008-06-21 23:47:27 +053031#include <asm/ftrace.h>
32
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include "trace.h"
34
Steven Rostedt4eebcc82008-05-12 21:20:48 +020035/* ftrace_enabled is a method to turn ftrace on or off */
36int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020037static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020038
Steven Rostedt4eebcc82008-05-12 21:20:48 +020039/*
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
42 */
43static int ftrace_disabled __read_mostly;
44
Steven Rostedt3d083392008-05-12 21:20:42 +020045static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020046static DEFINE_MUTEX(ftrace_sysctl_lock);
47
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020048static struct ftrace_ops ftrace_list_end __read_mostly =
49{
50 .func = ftrace_stub,
51};
52
53static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
Ingo Molnarf2252932008-05-22 10:37:48 +020056static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020057{
58 struct ftrace_ops *op = ftrace_list;
59
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
62
63 while (op != &ftrace_list_end) {
64 /* silly alpha */
65 read_barrier_depends();
66 op->func(ip, parent_ip);
67 op = op->next;
68 };
69}
70
71/**
Steven Rostedt3d083392008-05-12 21:20:42 +020072 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020073 *
Steven Rostedt3d083392008-05-12 21:20:42 +020074 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020076 */
Steven Rostedt3d083392008-05-12 21:20:42 +020077void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078{
Steven Rostedt3d083392008-05-12 21:20:42 +020079 ftrace_trace_function = ftrace_stub;
80}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020081
Ingo Molnare309b412008-05-12 21:20:51 +020082static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020083{
Steven Rostedt99ecdc42008-08-15 21:40:05 -040084 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +020085 spin_lock(&ftrace_lock);
86
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020087 ops->next = ftrace_list;
88 /*
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
93 */
94 smp_wmb();
95 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020096
Steven Rostedtb0fc4942008-05-12 21:20:43 +020097 if (ftrace_enabled) {
98 /*
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
101 */
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
104 else
105 ftrace_trace_function = ftrace_list_func;
106 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200107
108 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200109
110 return 0;
111}
112
Ingo Molnare309b412008-05-12 21:20:51 +0200113static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200114{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115 struct ftrace_ops **p;
116 int ret = 0;
117
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400118 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200119 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200120
121 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200122 * If we are removing the last function, then simply point
123 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200124 */
125 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126 ftrace_trace_function = ftrace_stub;
127 ftrace_list = &ftrace_list_end;
128 goto out;
129 }
130
131 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
132 if (*p == ops)
133 break;
134
135 if (*p != ops) {
136 ret = -1;
137 goto out;
138 }
139
140 *p = (*p)->next;
141
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200142 if (ftrace_enabled) {
143 /* If we only have one func left, then call that directly */
144 if (ftrace_list == &ftrace_list_end ||
145 ftrace_list->next == &ftrace_list_end)
146 ftrace_trace_function = ftrace_list->func;
147 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200148
149 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200150 spin_unlock(&ftrace_lock);
151
152 return ret;
153}
154
155#ifdef CONFIG_DYNAMIC_FTRACE
156
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400157#ifndef CONFIG_FTRACE_MCOUNT_RECORD
158/*
159 * The hash lock is only needed when the recording of the mcount
160 * callers are dynamic. That is, by the caller themselves and
161 * not recorded via the compilation.
162 */
163static DEFINE_SPINLOCK(ftrace_hash_lock);
164#define ftrace_hash_lock(flags) spin_lock_irqsave(ftrace_hash_lock, flags)
165#define ftrace_hash_unlock(flags) spin_lock_irqsave(ftrace_hash_lock, flags)
166#else
167/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
168#define ftrace_hash_lock(flags) do { (void)flags; } while (0)
169#define ftrace_hash_unlock(flags) do { } while(0)
170#endif
171
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200172static struct task_struct *ftraced_task;
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200173
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200174enum {
175 FTRACE_ENABLE_CALLS = (1 << 0),
176 FTRACE_DISABLE_CALLS = (1 << 1),
177 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
178 FTRACE_ENABLE_MCOUNT = (1 << 3),
179 FTRACE_DISABLE_MCOUNT = (1 << 4),
180};
181
Steven Rostedt5072c592008-05-12 21:20:43 +0200182static int ftrace_filtered;
Abhishek Sagarecea6562008-06-21 23:47:53 +0530183static int tracing_on;
184static int frozen_record_count;
Steven Rostedt5072c592008-05-12 21:20:43 +0200185
Steven Rostedt3d083392008-05-12 21:20:42 +0200186static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
187
188static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
189
Steven Rostedt3d083392008-05-12 21:20:42 +0200190static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400191static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200192
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200193struct ftrace_page {
194 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700195 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200196 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700197};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200198
199#define ENTRIES_PER_PAGE \
200 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
201
202/* estimate from running different kernels */
203#define NR_TO_INIT 10000
204
205static struct ftrace_page *ftrace_pages_start;
206static struct ftrace_page *ftrace_pages;
207
Steven Rostedt3d083392008-05-12 21:20:42 +0200208static int ftraced_trigger;
209static int ftraced_suspend;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400210static int ftraced_stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200211
212static int ftrace_record_suspend;
213
Steven Rostedt37ad5082008-05-12 21:20:48 +0200214static struct dyn_ftrace *ftrace_free_records;
215
Abhishek Sagarecea6562008-06-21 23:47:53 +0530216
217#ifdef CONFIG_KPROBES
218static inline void freeze_record(struct dyn_ftrace *rec)
219{
220 if (!(rec->flags & FTRACE_FL_FROZEN)) {
221 rec->flags |= FTRACE_FL_FROZEN;
222 frozen_record_count++;
223 }
224}
225
226static inline void unfreeze_record(struct dyn_ftrace *rec)
227{
228 if (rec->flags & FTRACE_FL_FROZEN) {
229 rec->flags &= ~FTRACE_FL_FROZEN;
230 frozen_record_count--;
231 }
232}
233
234static inline int record_frozen(struct dyn_ftrace *rec)
235{
236 return rec->flags & FTRACE_FL_FROZEN;
237}
238#else
239# define freeze_record(rec) ({ 0; })
240# define unfreeze_record(rec) ({ 0; })
241# define record_frozen(rec) ({ 0; })
242#endif /* CONFIG_KPROBES */
243
244int skip_trace(unsigned long ip)
245{
246 unsigned long fl;
247 struct dyn_ftrace *rec;
248 struct hlist_node *t;
249 struct hlist_head *head;
250
251 if (frozen_record_count == 0)
252 return 0;
253
254 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
255 hlist_for_each_entry_rcu(rec, t, head, node) {
256 if (rec->ip == ip) {
257 if (record_frozen(rec)) {
258 if (rec->flags & FTRACE_FL_FAILED)
259 return 1;
260
261 if (!(rec->flags & FTRACE_FL_CONVERTED))
262 return 1;
263
264 if (!tracing_on || !ftrace_enabled)
265 return 1;
266
267 if (ftrace_filtered) {
268 fl = rec->flags & (FTRACE_FL_FILTER |
269 FTRACE_FL_NOTRACE);
270 if (!fl || (fl & FTRACE_FL_NOTRACE))
271 return 1;
272 }
273 }
274 break;
275 }
276 }
277
278 return 0;
279}
280
Ingo Molnare309b412008-05-12 21:20:51 +0200281static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200282ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200283{
284 struct dyn_ftrace *p;
285 struct hlist_node *t;
286 int found = 0;
287
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530288 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200289 if (p->ip == ip) {
290 found = 1;
291 break;
292 }
293 }
294
295 return found;
296}
297
Ingo Molnare309b412008-05-12 21:20:51 +0200298static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200299ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
300{
Abhishek Sagarffdaa352008-05-24 23:45:02 +0530301 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
Steven Rostedt3d083392008-05-12 21:20:42 +0200302}
303
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530304/* called from kstop_machine */
305static inline void ftrace_del_hash(struct dyn_ftrace *node)
306{
307 hlist_del(&node->node);
308}
309
Ingo Molnare309b412008-05-12 21:20:51 +0200310static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200311{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200312 rec->ip = (unsigned long)ftrace_free_records;
313 ftrace_free_records = rec;
314 rec->flags |= FTRACE_FL_FREE;
315}
316
Steven Rostedtfed19392008-08-14 22:47:19 -0400317void ftrace_release(void *start, unsigned long size)
318{
319 struct dyn_ftrace *rec;
320 struct ftrace_page *pg;
321 unsigned long s = (unsigned long)start;
322 unsigned long e = s + size;
323 int i;
324
Steven Rostedt00fd61a2008-08-15 21:40:04 -0400325 if (ftrace_disabled || !start)
Steven Rostedtfed19392008-08-14 22:47:19 -0400326 return;
327
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400328 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -0400329 spin_lock(&ftrace_lock);
330
331 for (pg = ftrace_pages_start; pg; pg = pg->next) {
332 for (i = 0; i < pg->index; i++) {
333 rec = &pg->records[i];
334
335 if ((rec->ip >= s) && (rec->ip < e))
336 ftrace_free_rec(rec);
337 }
338 }
339 spin_unlock(&ftrace_lock);
340
341}
342
Ingo Molnare309b412008-05-12 21:20:51 +0200343static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200344{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200345 struct dyn_ftrace *rec;
346
347 /* First check for freed records */
348 if (ftrace_free_records) {
349 rec = ftrace_free_records;
350
Steven Rostedt37ad5082008-05-12 21:20:48 +0200351 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
352 WARN_ON_ONCE(1);
353 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200354 ftrace_disabled = 1;
355 ftrace_enabled = 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200356 return NULL;
357 }
358
359 ftrace_free_records = (void *)rec->ip;
360 memset(rec, 0, sizeof(*rec));
361 return rec;
362 }
363
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200364 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
365 if (!ftrace_pages->next)
366 return NULL;
367 ftrace_pages = ftrace_pages->next;
368 }
369
370 return &ftrace_pages->records[ftrace_pages->index++];
371}
372
Ingo Molnare309b412008-05-12 21:20:51 +0200373static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200374ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200375{
376 struct dyn_ftrace *node;
377 unsigned long flags;
378 unsigned long key;
379 int resched;
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200380 int cpu;
Steven Rostedt3d083392008-05-12 21:20:42 +0200381
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200382 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200383 return;
384
Steven Rostedt3d083392008-05-12 21:20:42 +0200385 resched = need_resched();
386 preempt_disable_notrace();
387
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200388 /*
389 * We simply need to protect against recursion.
390 * Use the the raw version of smp_processor_id and not
391 * __get_cpu_var which can call debug hooks that can
392 * cause a recursive crash here.
393 */
394 cpu = raw_smp_processor_id();
395 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
396 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
Steven Rostedt3d083392008-05-12 21:20:42 +0200397 goto out;
398
399 if (unlikely(ftrace_record_suspend))
400 goto out;
401
402 key = hash_long(ip, FTRACE_HASHBITS);
403
404 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
405
406 if (ftrace_ip_in_hash(ip, key))
407 goto out;
408
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400409 ftrace_hash_lock(flags);
Steven Rostedt3d083392008-05-12 21:20:42 +0200410
411 /* This ip may have hit the hash before the lock */
412 if (ftrace_ip_in_hash(ip, key))
413 goto out_unlock;
414
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200415 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200416 if (!node)
417 goto out_unlock;
418
419 node->ip = ip;
420
421 ftrace_add_hash(node, key);
422
423 ftraced_trigger = 1;
424
425 out_unlock:
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400426 ftrace_hash_unlock(flags);
Steven Rostedt3d083392008-05-12 21:20:42 +0200427 out:
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200428 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
Steven Rostedt3d083392008-05-12 21:20:42 +0200429
430 /* prevent recursion with scheduler */
431 if (resched)
432 preempt_enable_no_resched_notrace();
433 else
434 preempt_enable_notrace();
435}
436
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200437#define FTRACE_ADDR ((long)(ftrace_caller))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200438
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530439static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200440__ftrace_replace_code(struct dyn_ftrace *rec,
441 unsigned char *old, unsigned char *new, int enable)
442{
Steven Rostedt41c52c02008-05-22 11:46:33 -0400443 unsigned long ip, fl;
Steven Rostedt5072c592008-05-12 21:20:43 +0200444
445 ip = rec->ip;
446
447 if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200448 /*
449 * If filtering is on:
450 *
451 * If this record is set to be filtered and
452 * is enabled then do nothing.
453 *
454 * If this record is set to be filtered and
455 * it is not enabled, enable it.
456 *
457 * If this record is not set to be filtered
458 * and it is not enabled do nothing.
459 *
Steven Rostedt41c52c02008-05-22 11:46:33 -0400460 * If this record is set not to trace then
461 * do nothing.
462 *
Abhishek Sagara4500b82008-06-14 11:59:39 +0530463 * If this record is set not to trace and
464 * it is enabled then disable it.
465 *
Steven Rostedt5072c592008-05-12 21:20:43 +0200466 * If this record is not set to be filtered and
467 * it is enabled, disable it.
468 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530469
470 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
471 FTRACE_FL_ENABLED);
Steven Rostedt5072c592008-05-12 21:20:43 +0200472
473 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
Abhishek Sagara4500b82008-06-14 11:59:39 +0530474 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
475 !fl || (fl == FTRACE_FL_NOTRACE))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530476 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200477
478 /*
479 * If it is enabled disable it,
480 * otherwise enable it!
481 */
Abhishek Sagara4500b82008-06-14 11:59:39 +0530482 if (fl & FTRACE_FL_ENABLED) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200483 /* swap new and old */
484 new = old;
485 old = ftrace_call_replace(ip, FTRACE_ADDR);
486 rec->flags &= ~FTRACE_FL_ENABLED;
487 } else {
488 new = ftrace_call_replace(ip, FTRACE_ADDR);
489 rec->flags |= FTRACE_FL_ENABLED;
490 }
491 } else {
492
Steven Rostedt41c52c02008-05-22 11:46:33 -0400493 if (enable) {
494 /*
495 * If this record is set not to trace and is
496 * not enabled, do nothing.
497 */
498 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
499 if (fl == FTRACE_FL_NOTRACE)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530500 return 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400501
Steven Rostedt5072c592008-05-12 21:20:43 +0200502 new = ftrace_call_replace(ip, FTRACE_ADDR);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400503 } else
Steven Rostedt5072c592008-05-12 21:20:43 +0200504 old = ftrace_call_replace(ip, FTRACE_ADDR);
505
506 if (enable) {
507 if (rec->flags & FTRACE_FL_ENABLED)
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530508 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200509 rec->flags |= FTRACE_FL_ENABLED;
510 } else {
511 if (!(rec->flags & FTRACE_FL_ENABLED))
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530512 return 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200513 rec->flags &= ~FTRACE_FL_ENABLED;
514 }
515 }
516
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530517 return ftrace_modify_code(ip, old, new);
Steven Rostedt5072c592008-05-12 21:20:43 +0200518}
519
Ingo Molnare309b412008-05-12 21:20:51 +0200520static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200521{
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530522 int i, failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200523 unsigned char *new = NULL, *old = NULL;
524 struct dyn_ftrace *rec;
525 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200526
Steven Rostedt5072c592008-05-12 21:20:43 +0200527 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200528 old = ftrace_nop_replace();
529 else
530 new = ftrace_nop_replace();
531
532 for (pg = ftrace_pages_start; pg; pg = pg->next) {
533 for (i = 0; i < pg->index; i++) {
534 rec = &pg->records[i];
535
536 /* don't modify code that has already faulted */
537 if (rec->flags & FTRACE_FL_FAILED)
538 continue;
539
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530540 /* ignore updates to this record's mcount site */
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530541 if (get_kprobe((void *)rec->ip)) {
542 freeze_record(rec);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530543 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530544 } else {
545 unfreeze_record(rec);
546 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530547
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530548 failed = __ftrace_replace_code(rec, old, new, enable);
549 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
550 rec->flags |= FTRACE_FL_FAILED;
551 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530552 !core_kernel_text(rec->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530553 ftrace_del_hash(rec);
554 ftrace_free_rec(rec);
555 }
556 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200557 }
558 }
559}
560
Ingo Molnare309b412008-05-12 21:20:51 +0200561static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200562{
563 if (ftrace_pages->next)
564 return;
565
566 /* allocate another page */
567 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
568}
Steven Rostedt3d083392008-05-12 21:20:42 +0200569
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530570static int
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200571ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200572{
573 unsigned long ip;
574 unsigned char *nop, *call;
575 int failed;
576
577 ip = rec->ip;
578
579 nop = ftrace_nop_replace();
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200580 call = ftrace_call_replace(ip, MCOUNT_ADDR);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200581
582 failed = ftrace_modify_code(ip, call, nop);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200583 if (failed) {
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200584 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530585 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200586 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530587 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200588}
589
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400590static int __ftrace_update_code(void *ignore);
591
Ingo Molnare309b412008-05-12 21:20:51 +0200592static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200593{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200594 unsigned long addr;
595 int *command = data;
596
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400597 if (*command & FTRACE_ENABLE_CALLS) {
598 /*
599 * Update any recorded ips now that we have the
600 * machine stopped
601 */
602 __ftrace_update_code(NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200603 ftrace_replace_code(1);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530604 tracing_on = 1;
605 } else if (*command & FTRACE_DISABLE_CALLS) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200606 ftrace_replace_code(0);
Abhishek Sagarecea6562008-06-21 23:47:53 +0530607 tracing_on = 0;
608 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200609
610 if (*command & FTRACE_UPDATE_TRACE_FUNC)
611 ftrace_update_ftrace_func(ftrace_trace_function);
612
613 if (*command & FTRACE_ENABLE_MCOUNT) {
614 addr = (unsigned long)ftrace_record_ip;
615 ftrace_mcount_set(&addr);
616 } else if (*command & FTRACE_DISABLE_MCOUNT) {
617 addr = (unsigned long)ftrace_stub;
618 ftrace_mcount_set(&addr);
619 }
620
621 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200622}
623
Ingo Molnare309b412008-05-12 21:20:51 +0200624static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200625{
Rusty Russell784e2d72008-07-28 12:16:31 -0500626 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt3d083392008-05-12 21:20:42 +0200627}
628
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400629void ftrace_disable_daemon(void)
630{
631 /* Stop the daemon from calling kstop_machine */
632 mutex_lock(&ftraced_lock);
633 ftraced_stop = 1;
634 mutex_unlock(&ftraced_lock);
635
636 ftrace_force_update();
637}
638
639void ftrace_enable_daemon(void)
640{
641 mutex_lock(&ftraced_lock);
642 ftraced_stop = 0;
643 mutex_unlock(&ftraced_lock);
644
645 ftrace_force_update();
646}
647
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200648static ftrace_func_t saved_ftrace_func;
649
Ingo Molnare309b412008-05-12 21:20:51 +0200650static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200651{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200652 int command = 0;
653
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200654 if (unlikely(ftrace_disabled))
655 return;
656
Steven Rostedt3d083392008-05-12 21:20:42 +0200657 mutex_lock(&ftraced_lock);
658 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200659 if (ftraced_suspend == 1)
660 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200661
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200662 if (saved_ftrace_func != ftrace_trace_function) {
663 saved_ftrace_func = ftrace_trace_function;
664 command |= FTRACE_UPDATE_TRACE_FUNC;
665 }
666
667 if (!command || !ftrace_enabled)
668 goto out;
669
670 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200671 out:
672 mutex_unlock(&ftraced_lock);
673}
674
Ingo Molnare309b412008-05-12 21:20:51 +0200675static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200676{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200677 int command = 0;
678
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200679 if (unlikely(ftrace_disabled))
680 return;
681
Steven Rostedt3d083392008-05-12 21:20:42 +0200682 mutex_lock(&ftraced_lock);
683 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200684 if (!ftraced_suspend)
685 command |= FTRACE_DISABLE_CALLS;
686
687 if (saved_ftrace_func != ftrace_trace_function) {
688 saved_ftrace_func = ftrace_trace_function;
689 command |= FTRACE_UPDATE_TRACE_FUNC;
690 }
691
692 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200693 goto out;
694
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200695 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200696 out:
697 mutex_unlock(&ftraced_lock);
698}
699
Ingo Molnare309b412008-05-12 21:20:51 +0200700static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200701{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200702 int command = FTRACE_ENABLE_MCOUNT;
703
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200704 if (unlikely(ftrace_disabled))
705 return;
706
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200707 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200708 /* Force update next time */
709 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200710 /* ftraced_suspend is true if we want ftrace running */
711 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200712 command |= FTRACE_ENABLE_CALLS;
713
714 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200715 mutex_unlock(&ftraced_lock);
716}
717
Ingo Molnare309b412008-05-12 21:20:51 +0200718static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200719{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200720 int command = FTRACE_DISABLE_MCOUNT;
721
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200722 if (unlikely(ftrace_disabled))
723 return;
724
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200725 mutex_lock(&ftraced_lock);
726 /* ftraced_suspend is true if ftrace is running */
727 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200728 command |= FTRACE_DISABLE_CALLS;
729
730 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200731 mutex_unlock(&ftraced_lock);
732}
733
Steven Rostedt3d083392008-05-12 21:20:42 +0200734static cycle_t ftrace_update_time;
735static unsigned long ftrace_update_cnt;
736unsigned long ftrace_update_tot_cnt;
737
Ingo Molnare309b412008-05-12 21:20:51 +0200738static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200739{
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530740 int i, save_ftrace_enabled;
741 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200742 struct dyn_ftrace *p;
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530743 struct hlist_node *t, *n;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530744 struct hlist_head *head, temp_list;
Steven Rostedt3d083392008-05-12 21:20:42 +0200745
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200746 /* Don't be recording funcs now */
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400747 ftrace_record_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200748 save_ftrace_enabled = ftrace_enabled;
749 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200750
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200751 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200752 ftrace_update_cnt = 0;
753
754 /* No locks needed, the machine is stopped! */
755 for (i = 0; i < FTRACE_HASHSIZE; i++) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530756 INIT_HLIST_HEAD(&temp_list);
757 head = &ftrace_hash[i];
758
Steven Rostedt3d083392008-05-12 21:20:42 +0200759 /* all CPUS are stopped, we are safe to modify code */
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530760 hlist_for_each_entry_safe(p, t, n, head, node) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530761 /* Skip over failed records which have not been
762 * freed. */
763 if (p->flags & FTRACE_FL_FAILED)
764 continue;
Steven Rostedt3d083392008-05-12 21:20:42 +0200765
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530766 /* Unconverted records are always at the head of the
767 * hash bucket. Once we encounter a converted record,
768 * simply skip over to the next bucket. Saves ftraced
769 * some processor cycles (ftrace does its bid for
770 * global warming :-p ). */
771 if (p->flags & (FTRACE_FL_CONVERTED))
772 break;
773
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530774 /* Ignore updates to this record's mcount site.
775 * Reintroduce this record at the head of this
776 * bucket to attempt to "convert" it again if
777 * the kprobe on it is unregistered before the
778 * next run. */
779 if (get_kprobe((void *)p->ip)) {
780 ftrace_del_hash(p);
781 INIT_HLIST_NODE(&p->node);
782 hlist_add_head(&p->node, &temp_list);
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530783 freeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530784 continue;
Abhishek Sagar98a05ed2008-06-26 22:51:51 +0530785 } else {
786 unfreeze_record(p);
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530787 }
788
789 /* convert record (i.e, patch mcount-call with NOP) */
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530790 if (ftrace_code_disable(p)) {
791 p->flags |= FTRACE_FL_CONVERTED;
792 ftrace_update_cnt++;
793 } else {
794 if ((system_state == SYSTEM_BOOTING) ||
Abhishek Sagar34078a52008-06-03 08:33:41 +0530795 !core_kernel_text(p->ip)) {
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530796 ftrace_del_hash(p);
797 ftrace_free_rec(p);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530798 }
799 }
800 }
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530801
802 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
803 hlist_del(&p->node);
804 INIT_HLIST_NODE(&p->node);
805 hlist_add_head(&p->node, head);
806 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200807 }
808
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200809 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200810 ftrace_update_time = stop - start;
811 ftrace_update_tot_cnt += ftrace_update_cnt;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400812 ftraced_trigger = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200813
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200814 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400815 ftrace_record_suspend--;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200816
817 return 0;
818}
819
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400820static int ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200821{
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400822 if (unlikely(ftrace_disabled) ||
823 !ftrace_enabled || !ftraced_trigger)
824 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200825
Rusty Russell784e2d72008-07-28 12:16:31 -0500826 stop_machine(__ftrace_update_code, NULL, NULL);
Steven Rostedtad90c0e2008-05-27 20:48:37 -0400827
828 return 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200829}
830
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400831static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200832{
833 struct ftrace_page *pg;
834 int cnt;
835 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200836
837 /* allocate a few pages */
838 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
839 if (!ftrace_pages_start)
840 return -1;
841
842 /*
843 * Allocate a few more pages.
844 *
845 * TODO: have some parser search vmlinux before
846 * final linking to find all calls to ftrace.
847 * Then we can:
848 * a) know how many pages to allocate.
849 * and/or
850 * b) set up the table then.
851 *
852 * The dynamic code is still necessary for
853 * modules.
854 */
855
856 pg = ftrace_pages = ftrace_pages_start;
857
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400858 cnt = num_to_init / ENTRIES_PER_PAGE;
859 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
860 num_to_init, cnt);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200861
862 for (i = 0; i < cnt; i++) {
863 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
864
865 /* If we fail, we'll try later anyway */
866 if (!pg->next)
867 break;
868
869 pg = pg->next;
870 }
871
872 return 0;
873}
874
Steven Rostedt5072c592008-05-12 21:20:43 +0200875enum {
876 FTRACE_ITER_FILTER = (1 << 0),
877 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400878 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530879 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt5072c592008-05-12 21:20:43 +0200880};
881
882#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
883
884struct ftrace_iterator {
885 loff_t pos;
886 struct ftrace_page *pg;
887 unsigned idx;
888 unsigned flags;
889 unsigned char buffer[FTRACE_BUFF_MAX+1];
890 unsigned buffer_idx;
891 unsigned filtered;
892};
893
Ingo Molnare309b412008-05-12 21:20:51 +0200894static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200895t_next(struct seq_file *m, void *v, loff_t *pos)
896{
897 struct ftrace_iterator *iter = m->private;
898 struct dyn_ftrace *rec = NULL;
899
900 (*pos)++;
901
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400902 /* should not be called from interrupt context */
903 spin_lock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200904 retry:
905 if (iter->idx >= iter->pg->index) {
906 if (iter->pg->next) {
907 iter->pg = iter->pg->next;
908 iter->idx = 0;
909 goto retry;
910 }
911 } else {
912 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400913 if ((rec->flags & FTRACE_FL_FREE) ||
914
915 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530916 (rec->flags & FTRACE_FL_FAILED)) ||
917
918 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400919 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530920
Steven Rostedt41c52c02008-05-22 11:46:33 -0400921 ((iter->flags & FTRACE_ITER_NOTRACE) &&
922 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200923 rec = NULL;
924 goto retry;
925 }
926 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400927 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200928
929 iter->pos = *pos;
930
931 return rec;
932}
933
934static void *t_start(struct seq_file *m, loff_t *pos)
935{
936 struct ftrace_iterator *iter = m->private;
937 void *p = NULL;
938 loff_t l = -1;
939
940 if (*pos != iter->pos) {
941 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
942 ;
943 } else {
944 l = *pos;
945 p = t_next(m, p, &l);
946 }
947
948 return p;
949}
950
951static void t_stop(struct seq_file *m, void *p)
952{
953}
954
955static int t_show(struct seq_file *m, void *v)
956{
957 struct dyn_ftrace *rec = v;
958 char str[KSYM_SYMBOL_LEN];
959
960 if (!rec)
961 return 0;
962
963 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
964
965 seq_printf(m, "%s\n", str);
966
967 return 0;
968}
969
970static struct seq_operations show_ftrace_seq_ops = {
971 .start = t_start,
972 .next = t_next,
973 .stop = t_stop,
974 .show = t_show,
975};
976
Ingo Molnare309b412008-05-12 21:20:51 +0200977static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200978ftrace_avail_open(struct inode *inode, struct file *file)
979{
980 struct ftrace_iterator *iter;
981 int ret;
982
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200983 if (unlikely(ftrace_disabled))
984 return -ENODEV;
985
Steven Rostedt5072c592008-05-12 21:20:43 +0200986 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
987 if (!iter)
988 return -ENOMEM;
989
990 iter->pg = ftrace_pages_start;
991 iter->pos = -1;
992
993 ret = seq_open(file, &show_ftrace_seq_ops);
994 if (!ret) {
995 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200996
Steven Rostedt5072c592008-05-12 21:20:43 +0200997 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200998 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200999 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001000 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001001
1002 return ret;
1003}
1004
1005int ftrace_avail_release(struct inode *inode, struct file *file)
1006{
1007 struct seq_file *m = (struct seq_file *)file->private_data;
1008 struct ftrace_iterator *iter = m->private;
1009
1010 seq_release(inode, file);
1011 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001012
Steven Rostedt5072c592008-05-12 21:20:43 +02001013 return 0;
1014}
1015
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301016static int
1017ftrace_failures_open(struct inode *inode, struct file *file)
1018{
1019 int ret;
1020 struct seq_file *m;
1021 struct ftrace_iterator *iter;
1022
1023 ret = ftrace_avail_open(inode, file);
1024 if (!ret) {
1025 m = (struct seq_file *)file->private_data;
1026 iter = (struct ftrace_iterator *)m->private;
1027 iter->flags = FTRACE_ITER_FAILURES;
1028 }
1029
1030 return ret;
1031}
1032
1033
Steven Rostedt41c52c02008-05-22 11:46:33 -04001034static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001035{
1036 struct ftrace_page *pg;
1037 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001038 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001039 unsigned i;
1040
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001041 /* should not be called from interrupt context */
1042 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001043 if (enable)
1044 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +02001045 pg = ftrace_pages_start;
1046 while (pg) {
1047 for (i = 0; i < pg->index; i++) {
1048 rec = &pg->records[i];
1049 if (rec->flags & FTRACE_FL_FAILED)
1050 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001051 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +02001052 }
1053 pg = pg->next;
1054 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001055 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001056}
1057
Ingo Molnare309b412008-05-12 21:20:51 +02001058static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001059ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001060{
1061 struct ftrace_iterator *iter;
1062 int ret = 0;
1063
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001064 if (unlikely(ftrace_disabled))
1065 return -ENODEV;
1066
Steven Rostedt5072c592008-05-12 21:20:43 +02001067 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1068 if (!iter)
1069 return -ENOMEM;
1070
Steven Rostedt41c52c02008-05-22 11:46:33 -04001071 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001072 if ((file->f_mode & FMODE_WRITE) &&
1073 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -04001074 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001075
1076 if (file->f_mode & FMODE_READ) {
1077 iter->pg = ftrace_pages_start;
1078 iter->pos = -1;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001079 iter->flags = enable ? FTRACE_ITER_FILTER :
1080 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001081
1082 ret = seq_open(file, &show_ftrace_seq_ops);
1083 if (!ret) {
1084 struct seq_file *m = file->private_data;
1085 m->private = iter;
1086 } else
1087 kfree(iter);
1088 } else
1089 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001090 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001091
1092 return ret;
1093}
1094
Steven Rostedt41c52c02008-05-22 11:46:33 -04001095static int
1096ftrace_filter_open(struct inode *inode, struct file *file)
1097{
1098 return ftrace_regex_open(inode, file, 1);
1099}
1100
1101static int
1102ftrace_notrace_open(struct inode *inode, struct file *file)
1103{
1104 return ftrace_regex_open(inode, file, 0);
1105}
1106
Ingo Molnare309b412008-05-12 21:20:51 +02001107static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001108ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001109 size_t cnt, loff_t *ppos)
1110{
1111 if (file->f_mode & FMODE_READ)
1112 return seq_read(file, ubuf, cnt, ppos);
1113 else
1114 return -EPERM;
1115}
1116
Ingo Molnare309b412008-05-12 21:20:51 +02001117static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001118ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001119{
1120 loff_t ret;
1121
1122 if (file->f_mode & FMODE_READ)
1123 ret = seq_lseek(file, offset, origin);
1124 else
1125 file->f_pos = ret = 1;
1126
1127 return ret;
1128}
1129
1130enum {
1131 MATCH_FULL,
1132 MATCH_FRONT_ONLY,
1133 MATCH_MIDDLE_ONLY,
1134 MATCH_END_ONLY,
1135};
1136
Ingo Molnare309b412008-05-12 21:20:51 +02001137static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001138ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001139{
1140 char str[KSYM_SYMBOL_LEN];
1141 char *search = NULL;
1142 struct ftrace_page *pg;
1143 struct dyn_ftrace *rec;
1144 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001145 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001146 unsigned i, match = 0, search_len = 0;
1147
1148 for (i = 0; i < len; i++) {
1149 if (buff[i] == '*') {
1150 if (!i) {
1151 search = buff + i + 1;
1152 type = MATCH_END_ONLY;
1153 search_len = len - (i + 1);
1154 } else {
1155 if (type == MATCH_END_ONLY) {
1156 type = MATCH_MIDDLE_ONLY;
1157 } else {
1158 match = i;
1159 type = MATCH_FRONT_ONLY;
1160 }
1161 buff[i] = 0;
1162 break;
1163 }
1164 }
1165 }
1166
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001167 /* should not be called from interrupt context */
1168 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001169 if (enable)
1170 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001171 pg = ftrace_pages_start;
1172 while (pg) {
1173 for (i = 0; i < pg->index; i++) {
1174 int matched = 0;
1175 char *ptr;
1176
1177 rec = &pg->records[i];
1178 if (rec->flags & FTRACE_FL_FAILED)
1179 continue;
1180 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1181 switch (type) {
1182 case MATCH_FULL:
1183 if (strcmp(str, buff) == 0)
1184 matched = 1;
1185 break;
1186 case MATCH_FRONT_ONLY:
1187 if (memcmp(str, buff, match) == 0)
1188 matched = 1;
1189 break;
1190 case MATCH_MIDDLE_ONLY:
1191 if (strstr(str, search))
1192 matched = 1;
1193 break;
1194 case MATCH_END_ONLY:
1195 ptr = strstr(str, search);
1196 if (ptr && (ptr[search_len] == 0))
1197 matched = 1;
1198 break;
1199 }
1200 if (matched)
Steven Rostedt41c52c02008-05-22 11:46:33 -04001201 rec->flags |= flag;
Steven Rostedt5072c592008-05-12 21:20:43 +02001202 }
1203 pg = pg->next;
1204 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001205 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001206}
1207
Ingo Molnare309b412008-05-12 21:20:51 +02001208static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001209ftrace_regex_write(struct file *file, const char __user *ubuf,
1210 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001211{
1212 struct ftrace_iterator *iter;
1213 char ch;
1214 size_t read = 0;
1215 ssize_t ret;
1216
1217 if (!cnt || cnt < 0)
1218 return 0;
1219
Steven Rostedt41c52c02008-05-22 11:46:33 -04001220 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001221
1222 if (file->f_mode & FMODE_READ) {
1223 struct seq_file *m = file->private_data;
1224 iter = m->private;
1225 } else
1226 iter = file->private_data;
1227
1228 if (!*ppos) {
1229 iter->flags &= ~FTRACE_ITER_CONT;
1230 iter->buffer_idx = 0;
1231 }
1232
1233 ret = get_user(ch, ubuf++);
1234 if (ret)
1235 goto out;
1236 read++;
1237 cnt--;
1238
1239 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1240 /* skip white space */
1241 while (cnt && isspace(ch)) {
1242 ret = get_user(ch, ubuf++);
1243 if (ret)
1244 goto out;
1245 read++;
1246 cnt--;
1247 }
1248
Steven Rostedt5072c592008-05-12 21:20:43 +02001249 if (isspace(ch)) {
1250 file->f_pos += read;
1251 ret = read;
1252 goto out;
1253 }
1254
1255 iter->buffer_idx = 0;
1256 }
1257
1258 while (cnt && !isspace(ch)) {
1259 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1260 iter->buffer[iter->buffer_idx++] = ch;
1261 else {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 ret = get_user(ch, ubuf++);
1266 if (ret)
1267 goto out;
1268 read++;
1269 cnt--;
1270 }
1271
1272 if (isspace(ch)) {
1273 iter->filtered++;
1274 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001275 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001276 iter->buffer_idx = 0;
1277 } else
1278 iter->flags |= FTRACE_ITER_CONT;
1279
1280
1281 file->f_pos += read;
1282
1283 ret = read;
1284 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001285 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001286
1287 return ret;
1288}
1289
Steven Rostedt41c52c02008-05-22 11:46:33 -04001290static ssize_t
1291ftrace_filter_write(struct file *file, const char __user *ubuf,
1292 size_t cnt, loff_t *ppos)
1293{
1294 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1295}
1296
1297static ssize_t
1298ftrace_notrace_write(struct file *file, const char __user *ubuf,
1299 size_t cnt, loff_t *ppos)
1300{
1301 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1302}
1303
1304static void
1305ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1306{
1307 if (unlikely(ftrace_disabled))
1308 return;
1309
1310 mutex_lock(&ftrace_regex_lock);
1311 if (reset)
1312 ftrace_filter_reset(enable);
1313 if (buf)
1314 ftrace_match(buf, len, enable);
1315 mutex_unlock(&ftrace_regex_lock);
1316}
1317
Steven Rostedt77a2b372008-05-12 21:20:45 +02001318/**
1319 * ftrace_set_filter - set a function to filter on in ftrace
1320 * @buf - the string that holds the function filter text.
1321 * @len - the length of the string.
1322 * @reset - non zero to reset all filters before applying this filter.
1323 *
1324 * Filters denote which functions should be enabled when tracing is enabled.
1325 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1326 */
Ingo Molnare309b412008-05-12 21:20:51 +02001327void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001328{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001329 ftrace_set_regex(buf, len, reset, 1);
1330}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001331
Steven Rostedt41c52c02008-05-22 11:46:33 -04001332/**
1333 * ftrace_set_notrace - set a function to not trace in ftrace
1334 * @buf - the string that holds the function notrace text.
1335 * @len - the length of the string.
1336 * @reset - non zero to reset all filters before applying this filter.
1337 *
1338 * Notrace Filters denote which functions should not be enabled when tracing
1339 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1340 * for tracing.
1341 */
1342void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1343{
1344 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001345}
1346
Ingo Molnare309b412008-05-12 21:20:51 +02001347static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001348ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001349{
1350 struct seq_file *m = (struct seq_file *)file->private_data;
1351 struct ftrace_iterator *iter;
1352
Steven Rostedt41c52c02008-05-22 11:46:33 -04001353 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001354 if (file->f_mode & FMODE_READ) {
1355 iter = m->private;
1356
1357 seq_release(inode, file);
1358 } else
1359 iter = file->private_data;
1360
1361 if (iter->buffer_idx) {
1362 iter->filtered++;
1363 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001364 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001365 }
1366
1367 mutex_lock(&ftrace_sysctl_lock);
1368 mutex_lock(&ftraced_lock);
1369 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1370 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1371 mutex_unlock(&ftraced_lock);
1372 mutex_unlock(&ftrace_sysctl_lock);
1373
1374 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001375 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001376 return 0;
1377}
1378
Steven Rostedt41c52c02008-05-22 11:46:33 -04001379static int
1380ftrace_filter_release(struct inode *inode, struct file *file)
1381{
1382 return ftrace_regex_release(inode, file, 1);
1383}
1384
1385static int
1386ftrace_notrace_release(struct inode *inode, struct file *file)
1387{
1388 return ftrace_regex_release(inode, file, 0);
1389}
1390
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001391static ssize_t
1392ftraced_read(struct file *filp, char __user *ubuf,
1393 size_t cnt, loff_t *ppos)
1394{
1395 /* don't worry about races */
1396 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1397 int r = strlen(buf);
1398
1399 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1400}
1401
1402static ssize_t
1403ftraced_write(struct file *filp, const char __user *ubuf,
1404 size_t cnt, loff_t *ppos)
1405{
1406 char buf[64];
1407 long val;
1408 int ret;
1409
1410 if (cnt >= sizeof(buf))
1411 return -EINVAL;
1412
1413 if (copy_from_user(&buf, ubuf, cnt))
1414 return -EFAULT;
1415
1416 if (strncmp(buf, "enable", 6) == 0)
1417 val = 1;
1418 else if (strncmp(buf, "disable", 7) == 0)
1419 val = 0;
1420 else {
1421 buf[cnt] = 0;
1422
1423 ret = strict_strtoul(buf, 10, &val);
1424 if (ret < 0)
1425 return ret;
1426
1427 val = !!val;
1428 }
1429
1430 if (val)
1431 ftrace_enable_daemon();
1432 else
1433 ftrace_disable_daemon();
1434
1435 filp->f_pos += cnt;
1436
1437 return cnt;
1438}
1439
Steven Rostedt5072c592008-05-12 21:20:43 +02001440static struct file_operations ftrace_avail_fops = {
1441 .open = ftrace_avail_open,
1442 .read = seq_read,
1443 .llseek = seq_lseek,
1444 .release = ftrace_avail_release,
1445};
1446
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301447static struct file_operations ftrace_failures_fops = {
1448 .open = ftrace_failures_open,
1449 .read = seq_read,
1450 .llseek = seq_lseek,
1451 .release = ftrace_avail_release,
1452};
1453
Steven Rostedt5072c592008-05-12 21:20:43 +02001454static struct file_operations ftrace_filter_fops = {
1455 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001456 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001457 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001458 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001459 .release = ftrace_filter_release,
1460};
1461
Steven Rostedt41c52c02008-05-22 11:46:33 -04001462static struct file_operations ftrace_notrace_fops = {
1463 .open = ftrace_notrace_open,
1464 .read = ftrace_regex_read,
1465 .write = ftrace_notrace_write,
1466 .llseek = ftrace_regex_lseek,
1467 .release = ftrace_notrace_release,
1468};
1469
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001470static struct file_operations ftraced_fops = {
1471 .open = tracing_open_generic,
1472 .read = ftraced_read,
1473 .write = ftraced_write,
1474};
1475
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001476/**
1477 * ftrace_force_update - force an update to all recording ftrace functions
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001478 */
1479int ftrace_force_update(void)
1480{
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001481 int ret = 0;
1482
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001483 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001484 return -ENODEV;
1485
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001486 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001487 mutex_lock(&ftraced_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001488
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001489 /*
1490 * If ftraced_trigger is not set, then there is nothing
1491 * to update.
1492 */
1493 if (ftraced_trigger && !ftrace_update_code())
1494 ret = -EBUSY;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001495
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001496 mutex_unlock(&ftraced_lock);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001497 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001498
1499 return ret;
1500}
1501
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001502static void ftrace_force_shutdown(void)
1503{
1504 struct task_struct *task;
1505 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1506
1507 mutex_lock(&ftraced_lock);
1508 task = ftraced_task;
1509 ftraced_task = NULL;
1510 ftraced_suspend = -1;
1511 ftrace_run_update_code(command);
1512 mutex_unlock(&ftraced_lock);
1513
1514 if (task)
1515 kthread_stop(task);
1516}
1517
Steven Rostedt5072c592008-05-12 21:20:43 +02001518static __init int ftrace_init_debugfs(void)
1519{
1520 struct dentry *d_tracer;
1521 struct dentry *entry;
1522
1523 d_tracer = tracing_init_dentry();
1524
1525 entry = debugfs_create_file("available_filter_functions", 0444,
1526 d_tracer, NULL, &ftrace_avail_fops);
1527 if (!entry)
1528 pr_warning("Could not create debugfs "
1529 "'available_filter_functions' entry\n");
1530
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301531 entry = debugfs_create_file("failures", 0444,
1532 d_tracer, NULL, &ftrace_failures_fops);
1533 if (!entry)
1534 pr_warning("Could not create debugfs 'failures' entry\n");
1535
Steven Rostedt5072c592008-05-12 21:20:43 +02001536 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1537 NULL, &ftrace_filter_fops);
1538 if (!entry)
1539 pr_warning("Could not create debugfs "
1540 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001541
1542 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1543 NULL, &ftrace_notrace_fops);
1544 if (!entry)
1545 pr_warning("Could not create debugfs "
1546 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001547
1548 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1549 NULL, &ftraced_fops);
1550 if (!entry)
1551 pr_warning("Could not create debugfs "
1552 "'ftraced_enabled' entry\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02001553 return 0;
1554}
1555
1556fs_initcall(ftrace_init_debugfs);
1557
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001558#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1559static int ftrace_convert_nops(unsigned long *start,
1560 unsigned long *end)
1561{
1562 unsigned long *p;
1563 unsigned long addr;
1564 unsigned long flags;
1565
1566 p = start;
1567 while (p < end) {
1568 addr = ftrace_call_adjust(*p++);
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001569 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -04001570 spin_lock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001571 ftrace_record_ip(addr);
Steven Rostedtfed19392008-08-14 22:47:19 -04001572 spin_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001573 ftrace_shutdown_replenish();
1574 }
1575
1576 /* p is ignored */
1577 local_irq_save(flags);
1578 __ftrace_update_code(p);
1579 local_irq_restore(flags);
1580
1581 return 0;
1582}
1583
Steven Rostedt90d595f2008-08-14 15:45:09 -04001584void ftrace_init_module(unsigned long *start, unsigned long *end)
1585{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04001586 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04001587 return;
Steven Rostedt90d595f2008-08-14 15:45:09 -04001588 ftrace_convert_nops(start, end);
1589}
1590
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001591extern unsigned long __start_mcount_loc[];
1592extern unsigned long __stop_mcount_loc[];
1593
1594void __init ftrace_init(void)
1595{
1596 unsigned long count, addr, flags;
1597 int ret;
1598
1599 /* Keep the ftrace pointer to the stub */
1600 addr = (unsigned long)ftrace_stub;
1601
1602 local_irq_save(flags);
1603 ftrace_dyn_arch_init(&addr);
1604 local_irq_restore(flags);
1605
1606 /* ftrace_dyn_arch_init places the return code in addr */
1607 if (addr)
1608 goto failed;
1609
1610 count = __stop_mcount_loc - __start_mcount_loc;
1611
1612 ret = ftrace_dyn_table_alloc(count);
1613 if (ret)
1614 goto failed;
1615
1616 last_ftrace_enabled = ftrace_enabled = 1;
1617
1618 ret = ftrace_convert_nops(__start_mcount_loc,
1619 __stop_mcount_loc);
1620
1621 return;
1622 failed:
1623 ftrace_disabled = 1;
1624}
1625#else /* CONFIG_FTRACE_MCOUNT_RECORD */
1626static int ftraced(void *ignore)
1627{
1628 unsigned long usecs;
1629
1630 while (!kthread_should_stop()) {
1631
1632 set_current_state(TASK_INTERRUPTIBLE);
1633
1634 /* check once a second */
1635 schedule_timeout(HZ);
1636
1637 if (unlikely(ftrace_disabled))
1638 continue;
1639
1640 mutex_lock(&ftrace_sysctl_lock);
1641 mutex_lock(&ftraced_lock);
1642 if (!ftraced_suspend && !ftraced_stop &&
1643 ftrace_update_code()) {
1644 usecs = nsecs_to_usecs(ftrace_update_time);
1645 if (ftrace_update_tot_cnt > 100000) {
1646 ftrace_update_tot_cnt = 0;
1647 pr_info("hm, dftrace overflow: %lu change%s"
1648 " (%lu total) in %lu usec%s\n",
1649 ftrace_update_cnt,
1650 ftrace_update_cnt != 1 ? "s" : "",
1651 ftrace_update_tot_cnt,
1652 usecs, usecs != 1 ? "s" : "");
1653 ftrace_disabled = 1;
1654 WARN_ON_ONCE(1);
1655 }
1656 }
1657 mutex_unlock(&ftraced_lock);
1658 mutex_unlock(&ftrace_sysctl_lock);
1659
1660 ftrace_shutdown_replenish();
1661 }
1662 __set_current_state(TASK_RUNNING);
1663 return 0;
1664}
1665
Ingo Molnare309b412008-05-12 21:20:51 +02001666static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001667{
1668 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001669 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001670 int ret;
1671
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001672 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001673
Rusty Russell784e2d72008-07-28 12:16:31 -05001674 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001675
1676 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001677 if (addr) {
1678 ret = (int)addr;
1679 goto failed;
1680 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001681
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001682 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
Steven Rostedt3d083392008-05-12 21:20:42 +02001683 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001684 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001685
1686 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001687 if (IS_ERR(p)) {
1688 ret = -1;
1689 goto failed;
1690 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001691
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001692 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001693 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001694
1695 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001696
1697 failed:
1698 ftrace_disabled = 1;
1699 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001700}
1701
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001702core_initcall(ftrace_dynamic_init);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001703#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1704
Steven Rostedt3d083392008-05-12 21:20:42 +02001705#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001706# define ftrace_startup() do { } while (0)
1707# define ftrace_shutdown() do { } while (0)
1708# define ftrace_startup_sysctl() do { } while (0)
1709# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001710# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001711#endif /* CONFIG_DYNAMIC_FTRACE */
1712
1713/**
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001714 * ftrace_kill_atomic - kill ftrace from critical sections
1715 *
1716 * This function should be used by panic code. It stops ftrace
1717 * but in a not so nice way. If you need to simply kill ftrace
1718 * from a non-atomic section, use ftrace_kill.
1719 */
1720void ftrace_kill_atomic(void)
1721{
1722 ftrace_disabled = 1;
1723 ftrace_enabled = 0;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001724#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001725 ftraced_suspend = -1;
Ingo Molnarb2613e32008-07-11 16:44:27 +02001726#endif
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001727 clear_ftrace_function();
1728}
1729
1730/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001731 * ftrace_kill - totally shutdown ftrace
1732 *
1733 * This is a safety measure. If something was detected that seems
1734 * wrong, calling this function will keep ftrace from doing
1735 * any more modifications, and updates.
1736 * used when something went wrong.
1737 */
1738void ftrace_kill(void)
1739{
1740 mutex_lock(&ftrace_sysctl_lock);
1741 ftrace_disabled = 1;
1742 ftrace_enabled = 0;
1743
1744 clear_ftrace_function();
1745 mutex_unlock(&ftrace_sysctl_lock);
1746
1747 /* Try to totally disable ftrace */
1748 ftrace_force_shutdown();
1749}
1750
1751/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001752 * register_ftrace_function - register a function for profiling
1753 * @ops - ops structure that holds the function for profiling.
1754 *
1755 * Register a function to be called by all functions in the
1756 * kernel.
1757 *
1758 * Note: @ops->func and all the functions it calls must be labeled
1759 * with "notrace", otherwise it will go into a
1760 * recursive loop.
1761 */
1762int register_ftrace_function(struct ftrace_ops *ops)
1763{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001764 int ret;
1765
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001766 if (unlikely(ftrace_disabled))
1767 return -1;
1768
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001769 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001770 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001771 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001772 mutex_unlock(&ftrace_sysctl_lock);
1773
1774 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001775}
1776
1777/**
1778 * unregister_ftrace_function - unresgister a function for profiling.
1779 * @ops - ops structure that holds the function to unregister
1780 *
1781 * Unregister a function that was added to be called by ftrace profiling.
1782 */
1783int unregister_ftrace_function(struct ftrace_ops *ops)
1784{
1785 int ret;
1786
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001787 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001788 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001789 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001790 mutex_unlock(&ftrace_sysctl_lock);
1791
1792 return ret;
1793}
1794
Ingo Molnare309b412008-05-12 21:20:51 +02001795int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001796ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001797 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001798 loff_t *ppos)
1799{
1800 int ret;
1801
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001802 if (unlikely(ftrace_disabled))
1803 return -ENODEV;
1804
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001805 mutex_lock(&ftrace_sysctl_lock);
1806
Steven Rostedt5072c592008-05-12 21:20:43 +02001807 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001808
1809 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1810 goto out;
1811
1812 last_ftrace_enabled = ftrace_enabled;
1813
1814 if (ftrace_enabled) {
1815
1816 ftrace_startup_sysctl();
1817
1818 /* we are starting ftrace again */
1819 if (ftrace_list != &ftrace_list_end) {
1820 if (ftrace_list->next == &ftrace_list_end)
1821 ftrace_trace_function = ftrace_list->func;
1822 else
1823 ftrace_trace_function = ftrace_list_func;
1824 }
1825
1826 } else {
1827 /* stopping ftrace calls (just send to ftrace_stub) */
1828 ftrace_trace_function = ftrace_stub;
1829
1830 ftrace_shutdown_sysctl();
1831 }
1832
1833 out:
1834 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001835 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001836}