blob: 618c7ded59c857843ae0981e381a6f334debf584 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020014 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -080020#include <linux/suspend.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050021#include <linux/tracefs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010023#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Steven Rostedt5855fea2011-12-16 19:27:42 -050025#include <linux/bsearch.h>
Paul Gortmaker56d82e02011-05-26 17:53:52 -040026#include <linux/module.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010027#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020028#include <linux/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020030#include <linux/ctype.h>
Steven Rostedt68950612011-12-16 17:06:45 -050031#include <linux/sort.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020032#include <linux/list.h>
Steven Rostedt59df055f2009-02-14 15:29:06 -050033#include <linux/hash.h>
Paul E. McKenney3f379b02010-03-05 15:03:25 -080034#include <linux/rcupdate.h>
Masami Hiramatsu25f467d2019-02-24 01:50:20 +090035#include <linux/kprobes.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020036
Steven Rostedtad8d75f2009-04-14 19:39:12 -040037#include <trace/events/sched.h>
Steven Rostedt8aef2d22009-03-24 01:10:15 -040038
Steven Rostedt2af15d62009-05-28 13:37:24 -040039#include <asm/setup.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053040
Steven Rostedt0706f1c2009-03-23 23:12:58 -040041#include "trace_output.h"
Steven Rostedtbac429f2009-03-20 12:50:56 -040042#include "trace_stat.h"
Steven Rostedt3d083392008-05-12 21:20:42 +020043
Steven Rostedt6912896e2008-10-23 09:33:03 -040044#define FTRACE_WARN_ON(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040045 ({ \
46 int ___r = cond; \
47 if (WARN_ON(___r)) \
Steven Rostedt6912896e2008-10-23 09:33:03 -040048 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040049 ___r; \
50 })
Steven Rostedt6912896e2008-10-23 09:33:03 -040051
52#define FTRACE_WARN_ON_ONCE(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040053 ({ \
54 int ___r = cond; \
55 if (WARN_ON_ONCE(___r)) \
Steven Rostedt6912896e2008-10-23 09:33:03 -040056 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040057 ___r; \
58 })
Steven Rostedt6912896e2008-10-23 09:33:03 -040059
Steven Rostedt8fc0c702009-02-16 15:28:00 -050060/* hash bits for specific function selection */
61#define FTRACE_HASH_BITS 7
62#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
Steven Rostedt33dc9b12011-05-02 17:34:47 -040063#define FTRACE_HASH_DEFAULT_BITS 10
64#define FTRACE_HASH_MAX_BITS 12
Steven Rostedt8fc0c702009-02-16 15:28:00 -050065
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +090066#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -040067#define INIT_OPS_HASH(opsname) \
68 .func_hash = &opsname.local_hash, \
69 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -040070#define ASSIGN_OPS_HASH(opsname, val) \
71 .func_hash = val, \
72 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +090073#else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -040074#define INIT_OPS_HASH(opsname)
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -040075#define ASSIGN_OPS_HASH(opsname, val)
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +090076#endif
77
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040078static struct ftrace_ops ftrace_list_end __read_mostly = {
79 .func = ftrace_stub,
Steven Rostedt (Red Hat)395b97a2013-03-27 09:31:28 -040080 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -040081 INIT_OPS_HASH(ftrace_list_end)
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040082};
83
Steven Rostedt4eebcc82008-05-12 21:20:48 +020084/* ftrace_enabled is a method to turn ftrace on or off */
85int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020086static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020087
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040088/* Current function tracing op */
89struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -050090/* What to set function_trace_op to */
91static struct ftrace_ops *set_function_trace_op;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050092
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -040093static bool ftrace_pids_enabled(struct ftrace_ops *ops)
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -040094{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -040095 struct trace_array *tr;
96
97 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
98 return false;
99
100 tr = ops->private;
101
102 return tr->function_pids != NULL;
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400103}
104
105static void ftrace_update_trampoline(struct ftrace_ops *ops);
106
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200107/*
108 * ftrace_disabled is set when an anomaly is discovered.
109 * ftrace_disabled is much stronger than ftrace_enabled.
110 */
111static int ftrace_disabled __read_mostly;
112
Steven Rostedt52baf112009-02-14 01:15:39 -0500113static DEFINE_MUTEX(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200114
Steven Rostedtb8489142011-05-04 09:27:52 -0400115static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200116ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt2b499382011-05-03 22:49:52 -0400117static struct ftrace_ops global_ops;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200118
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400119#if ARCH_SUPPORTS_FTRACE_OPS
120static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400121 struct ftrace_ops *op, struct pt_regs *regs);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400122#else
123/* See comment below, where ftrace_ops_list_func is defined */
Sami Tolvanenc2f9bce2018-05-10 14:56:41 -0700124static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
125 struct ftrace_ops *op, struct pt_regs *regs);
126#define ftrace_ops_list_func ftrace_ops_no_ops
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400127#endif
Steven Rostedtb8489142011-05-04 09:27:52 -0400128
Steven Rostedt0a016402012-11-02 17:03:03 -0400129/*
130 * Traverse the ftrace_global_list, invoking all entries. The reason that we
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400131 * can use rcu_dereference_raw_notrace() is that elements removed from this list
Steven Rostedt0a016402012-11-02 17:03:03 -0400132 * are simply leaked, so there is no need to interact with a grace-period
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400133 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
Steven Rostedt0a016402012-11-02 17:03:03 -0400134 * concurrent insertions into the ftrace_global_list.
135 *
136 * Silly Alpha and silly pointer-speculation compiler optimizations!
137 */
138#define do_for_each_ftrace_op(op, list) \
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400139 op = rcu_dereference_raw_notrace(list); \
Steven Rostedt0a016402012-11-02 17:03:03 -0400140 do
141
142/*
143 * Optimized for just a single item in the list (as that is the normal case).
144 */
145#define while_for_each_ftrace_op(op) \
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400146 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
Steven Rostedt0a016402012-11-02 17:03:03 -0400147 unlikely((op) != &ftrace_list_end))
148
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +0900149static inline void ftrace_ops_init(struct ftrace_ops *ops)
150{
151#ifdef CONFIG_DYNAMIC_FTRACE
152 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -0400153 mutex_init(&ops->local_hash.regex_lock);
154 ops->func_hash = &ops->local_hash;
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +0900155 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
156 }
157#endif
158}
159
Steven Rostedtea701f12012-07-20 13:08:05 -0400160/**
161 * ftrace_nr_registered_ops - return number of ops registered
162 *
163 * Returns the number of ftrace_ops registered and tracing functions
164 */
165int ftrace_nr_registered_ops(void)
166{
167 struct ftrace_ops *ops;
168 int cnt = 0;
169
170 mutex_lock(&ftrace_lock);
171
172 for (ops = ftrace_ops_list;
173 ops != &ftrace_list_end; ops = ops->next)
174 cnt++;
175
176 mutex_unlock(&ftrace_lock);
177
178 return cnt;
179}
180
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400181static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400182 struct ftrace_ops *op, struct pt_regs *regs)
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500183{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400184 struct trace_array *tr = op->private;
185
186 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500187 return;
188
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400189 op->saved_func(ip, parent_ip, op, regs);
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500190}
191
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200192/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200193 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200194 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200195 * This NULLs the ftrace function and in essence stops
196 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200197 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200198void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200199{
Steven Rostedt3d083392008-05-12 21:20:42 +0200200 ftrace_trace_function = ftrace_stub;
201}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200202
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500203static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
Jiri Olsae2484912012-02-15 15:51:48 +0100204{
205 int cpu;
206
207 for_each_possible_cpu(cpu)
208 *per_cpu_ptr(ops->disabled, cpu) = 1;
209}
210
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500211static int per_cpu_ops_alloc(struct ftrace_ops *ops)
Jiri Olsae2484912012-02-15 15:51:48 +0100212{
213 int __percpu *disabled;
214
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500215 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
216 return -EINVAL;
217
Jiri Olsae2484912012-02-15 15:51:48 +0100218 disabled = alloc_percpu(int);
219 if (!disabled)
220 return -ENOMEM;
221
222 ops->disabled = disabled;
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500223 per_cpu_ops_disable_all(ops);
Jiri Olsae2484912012-02-15 15:51:48 +0100224 return 0;
225}
226
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500227static void ftrace_sync(struct work_struct *work)
228{
229 /*
230 * This function is just a stub to implement a hard force
231 * of synchronize_sched(). This requires synchronizing
232 * tasks even in userspace and idle.
233 *
234 * Yes, function tracing is rude.
235 */
236}
237
238static void ftrace_sync_ipi(void *data)
239{
240 /* Probably not needed, but do it anyway */
241 smp_rmb();
242}
243
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -0500244#ifdef CONFIG_FUNCTION_GRAPH_TRACER
245static void update_function_graph_func(void);
Steven Rostedt (Red Hat)55577202015-09-29 19:06:50 -0400246
247/* Both enabled by default (can be cleared by function_graph tracer flags */
248static bool fgraph_sleep_time = true;
249static bool fgraph_graph_time = true;
250
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -0500251#else
252static inline void update_function_graph_func(void) { }
253#endif
254
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100255
256static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
257{
258 /*
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500259 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100260 * then it needs to call the list anyway.
261 */
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500262 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
263 FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC)
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100264 return ftrace_ops_list_func;
265
266 return ftrace_ops_get_func(ops);
267}
268
Steven Rostedt2b499382011-05-03 22:49:52 -0400269static void update_ftrace_function(void)
270{
271 ftrace_func_t func;
272
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400273 /*
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400274 * Prepare the ftrace_ops that the arch callback will use.
275 * If there's only one ftrace_ops registered, the ftrace_ops_list
276 * will point to the ops we want.
277 */
278 set_function_trace_op = ftrace_ops_list;
279
280 /* If there's no ftrace_ops registered, just call the stub function */
281 if (ftrace_ops_list == &ftrace_list_end) {
282 func = ftrace_stub;
283
284 /*
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400285 * If we are at the end of the list and this ops is
Steven Rostedt47409742012-07-20 11:04:44 -0400286 * recursion safe and not dynamic and the arch supports passing ops,
287 * then have the mcount trampoline call the function directly.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400288 */
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400289 } else if (ftrace_ops_list->next == &ftrace_list_end) {
Steven Rostedt (Red Hat)00ccbf22015-02-19 15:56:14 +0100290 func = ftrace_ops_get_list_func(ftrace_ops_list);
Steven Rostedt (Red Hat)f7aad4e2014-09-10 10:42:46 -0400291
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400292 } else {
293 /* Just use the default ftrace_ops */
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500294 set_function_trace_op = &ftrace_list_end;
Steven Rostedtb8489142011-05-04 09:27:52 -0400295 func = ftrace_ops_list_func;
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400296 }
Steven Rostedt2b499382011-05-03 22:49:52 -0400297
Steven Rostedt (Red Hat)5f8bf2d22014-07-15 11:05:12 -0400298 update_function_graph_func();
299
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -0500300 /* If there's no change, then do nothing more here */
301 if (ftrace_trace_function == func)
302 return;
303
304 /*
305 * If we are using the list function, it doesn't care
306 * about the function_trace_ops.
307 */
308 if (func == ftrace_ops_list_func) {
309 ftrace_trace_function = func;
310 /*
311 * Don't even bother setting function_trace_ops,
312 * it would be racy to do so anyway.
313 */
314 return;
315 }
316
317#ifndef CONFIG_DYNAMIC_FTRACE
318 /*
319 * For static tracing, we need to be a bit more careful.
320 * The function change takes affect immediately. Thus,
321 * we need to coorditate the setting of the function_trace_ops
322 * with the setting of the ftrace_trace_function.
323 *
324 * Set the function to the list ops, which will call the
325 * function we want, albeit indirectly, but it handles the
326 * ftrace_ops and doesn't depend on function_trace_op.
327 */
328 ftrace_trace_function = ftrace_ops_list_func;
329 /*
330 * Make sure all CPUs see this. Yes this is slow, but static
331 * tracing is slow and nasty to have enabled.
332 */
333 schedule_on_each_cpu(ftrace_sync);
334 /* Now all cpus are using the list ops. */
335 function_trace_op = set_function_trace_op;
336 /* Make sure the function_trace_op is visible on all CPUs */
337 smp_wmb();
338 /* Nasty way to force a rmb on all cpus */
339 smp_call_function(ftrace_sync_ipi, NULL, 1);
340 /* OK, we are all set to update the ftrace_trace_function now! */
341#endif /* !CONFIG_DYNAMIC_FTRACE */
342
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400343 ftrace_trace_function = func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400344}
345
Jiaxing Wang7eea4fc2014-04-20 23:10:43 +0800346int using_ftrace_ops_list_func(void)
347{
348 return ftrace_trace_function == ftrace_ops_list_func;
349}
350
Steven Rostedt2b499382011-05-03 22:49:52 -0400351static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200352{
Steven Rostedt2b499382011-05-03 22:49:52 -0400353 ops->next = *list;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200354 /*
Steven Rostedtb8489142011-05-04 09:27:52 -0400355 * We are entering ops into the list but another
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200356 * CPU might be walking that list. We need to make sure
357 * the ops->next pointer is valid before another CPU sees
Steven Rostedtb8489142011-05-04 09:27:52 -0400358 * the ops pointer included into the list.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200359 */
Steven Rostedt2b499382011-05-03 22:49:52 -0400360 rcu_assign_pointer(*list, ops);
361}
Steven Rostedt3d083392008-05-12 21:20:42 +0200362
Steven Rostedt2b499382011-05-03 22:49:52 -0400363static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
364{
365 struct ftrace_ops **p;
366
367 /*
368 * If we are removing the last function, then simply point
369 * to the ftrace_stub.
370 */
371 if (*list == ops && ops->next == &ftrace_list_end) {
372 *list = &ftrace_list_end;
373 return 0;
374 }
375
376 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
377 if (*p == ops)
378 break;
379
380 if (*p != ops)
381 return -1;
382
383 *p = (*p)->next;
384 return 0;
385}
386
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400387static void ftrace_update_trampoline(struct ftrace_ops *ops);
388
Steven Rostedt2b499382011-05-03 22:49:52 -0400389static int __register_ftrace_function(struct ftrace_ops *ops)
390{
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -0500391 if (ops->flags & FTRACE_OPS_FL_DELETED)
392 return -EINVAL;
393
Steven Rostedtb8489142011-05-04 09:27:52 -0400394 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
395 return -EBUSY;
396
Masami Hiramatsu06aeaae2012-09-28 17:15:17 +0900397#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400398 /*
399 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
400 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
401 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
402 */
403 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
404 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
405 return -EINVAL;
406
407 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
408 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
409#endif
410
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400411 if (!core_kernel_data((unsigned long)ops))
412 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
413
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500414 if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
415 if (per_cpu_ops_alloc(ops))
Jiri Olsae2484912012-02-15 15:51:48 +0100416 return -ENOMEM;
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500417 }
418
419 add_ftrace_ops(&ftrace_ops_list, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400420
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400421 /* Always save the function, and reset at unregistering */
422 ops->saved_func = ops->func;
423
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400424 if (ftrace_pids_enabled(ops))
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400425 ops->func = ftrace_pid_func;
426
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -0400427 ftrace_update_trampoline(ops);
428
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400429 if (ftrace_enabled)
430 update_ftrace_function();
Steven Rostedt3d083392008-05-12 21:20:42 +0200431
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200432 return 0;
433}
434
Ingo Molnare309b412008-05-12 21:20:51 +0200435static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200436{
Steven Rostedt2b499382011-05-03 22:49:52 -0400437 int ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200438
Steven Rostedtb8489142011-05-04 09:27:52 -0400439 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
440 return -EBUSY;
441
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -0500442 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400443
Steven Rostedt2b499382011-05-03 22:49:52 -0400444 if (ret < 0)
445 return ret;
Steven Rostedtb8489142011-05-04 09:27:52 -0400446
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400447 if (ftrace_enabled)
448 update_ftrace_function();
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200449
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400450 ops->func = ops->saved_func;
451
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500452 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200453}
454
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500455static void ftrace_update_pid_func(void)
456{
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400457 struct ftrace_ops *op;
458
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400459 /* Only do something if we are tracing something */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500460 if (ftrace_trace_function == ftrace_stub)
KOSAKI Motohiro10dd3eb2009-03-06 15:29:04 +0900461 return;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500462
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400463 do_for_each_ftrace_op(op, ftrace_ops_list) {
464 if (op->flags & FTRACE_OPS_FL_PID) {
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -0400465 op->func = ftrace_pids_enabled(op) ?
466 ftrace_pid_func : op->saved_func;
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -0400467 ftrace_update_trampoline(op);
468 }
469 } while_for_each_ftrace_op(op);
470
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400471 update_ftrace_function();
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500472}
473
Steven Rostedt493762f2009-03-23 17:12:36 -0400474#ifdef CONFIG_FUNCTION_PROFILER
475struct ftrace_profile {
476 struct hlist_node node;
477 unsigned long ip;
478 unsigned long counter;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400479#ifdef CONFIG_FUNCTION_GRAPH_TRACER
480 unsigned long long time;
Chase Douglase330b3b2010-04-26 14:02:05 -0400481 unsigned long long time_squared;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400482#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400483};
484
485struct ftrace_profile_page {
486 struct ftrace_profile_page *next;
487 unsigned long index;
488 struct ftrace_profile records[];
489};
490
Steven Rostedtcafb1682009-03-24 20:50:39 -0400491struct ftrace_profile_stat {
492 atomic_t disabled;
493 struct hlist_head *hash;
494 struct ftrace_profile_page *pages;
495 struct ftrace_profile_page *start;
496 struct tracer_stat stat;
497};
498
Steven Rostedt493762f2009-03-23 17:12:36 -0400499#define PROFILE_RECORDS_SIZE \
500 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
501
502#define PROFILES_PER_PAGE \
503 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
504
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400505static int ftrace_profile_enabled __read_mostly;
506
507/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
Steven Rostedt493762f2009-03-23 17:12:36 -0400508static DEFINE_MUTEX(ftrace_profile_lock);
509
Steven Rostedtcafb1682009-03-24 20:50:39 -0400510static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
Steven Rostedt493762f2009-03-23 17:12:36 -0400511
Namhyung Kim20079eb2013-04-10 08:55:50 +0900512#define FTRACE_PROFILE_HASH_BITS 10
513#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
Steven Rostedt493762f2009-03-23 17:12:36 -0400514
Steven Rostedt493762f2009-03-23 17:12:36 -0400515static void *
516function_stat_next(void *v, int idx)
517{
518 struct ftrace_profile *rec = v;
519 struct ftrace_profile_page *pg;
520
521 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
522
523 again:
Li Zefan0296e422009-06-26 11:15:37 +0800524 if (idx != 0)
525 rec++;
526
Steven Rostedt493762f2009-03-23 17:12:36 -0400527 if ((void *)rec >= (void *)&pg->records[pg->index]) {
528 pg = pg->next;
529 if (!pg)
530 return NULL;
531 rec = &pg->records[0];
532 if (!rec->counter)
533 goto again;
534 }
535
536 return rec;
537}
538
539static void *function_stat_start(struct tracer_stat *trace)
540{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400541 struct ftrace_profile_stat *stat =
542 container_of(trace, struct ftrace_profile_stat, stat);
543
544 if (!stat || !stat->start)
545 return NULL;
546
547 return function_stat_next(&stat->start->records[0], 0);
Steven Rostedt493762f2009-03-23 17:12:36 -0400548}
549
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400550#ifdef CONFIG_FUNCTION_GRAPH_TRACER
551/* function graph compares on total time */
552static int function_stat_cmp(void *p1, void *p2)
553{
554 struct ftrace_profile *a = p1;
555 struct ftrace_profile *b = p2;
556
557 if (a->time < b->time)
558 return -1;
559 if (a->time > b->time)
560 return 1;
561 else
562 return 0;
563}
564#else
565/* not function graph compares against hits */
Steven Rostedt493762f2009-03-23 17:12:36 -0400566static int function_stat_cmp(void *p1, void *p2)
567{
568 struct ftrace_profile *a = p1;
569 struct ftrace_profile *b = p2;
570
571 if (a->counter < b->counter)
572 return -1;
573 if (a->counter > b->counter)
574 return 1;
575 else
576 return 0;
577}
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400578#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400579
580static int function_stat_headers(struct seq_file *m)
581{
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400582#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100583 seq_puts(m, " Function "
584 "Hit Time Avg s^2\n"
585 " -------- "
586 "--- ---- --- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400587#else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100588 seq_puts(m, " Function Hit\n"
589 " -------- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400590#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400591 return 0;
592}
593
594static int function_stat_show(struct seq_file *m, void *v)
595{
596 struct ftrace_profile *rec = v;
597 char str[KSYM_SYMBOL_LEN];
Li Zefan3aaba202010-08-23 16:50:12 +0800598 int ret = 0;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400599#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400600 static struct trace_seq s;
601 unsigned long long avg;
Chase Douglase330b3b2010-04-26 14:02:05 -0400602 unsigned long long stddev;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400603#endif
Li Zefan3aaba202010-08-23 16:50:12 +0800604 mutex_lock(&ftrace_profile_lock);
605
606 /* we raced with function_profile_reset() */
607 if (unlikely(rec->counter == 0)) {
608 ret = -EBUSY;
609 goto out;
610 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400611
Umesh Tiwari8e436ca2015-06-22 16:58:08 +0530612#ifdef CONFIG_FUNCTION_GRAPH_TRACER
613 avg = rec->time;
614 do_div(avg, rec->counter);
615 if (tracing_thresh && (avg < tracing_thresh))
616 goto out;
617#endif
618
Steven Rostedt493762f2009-03-23 17:12:36 -0400619 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400620 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
Steven Rostedt493762f2009-03-23 17:12:36 -0400621
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400622#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +0100623 seq_puts(m, " ");
Steven Rostedt34886c82009-03-25 21:00:47 -0400624
Chase Douglase330b3b2010-04-26 14:02:05 -0400625 /* Sample standard deviation (s^2) */
626 if (rec->counter <= 1)
627 stddev = 0;
628 else {
Juri Lelli52d85d72013-06-12 12:03:18 +0200629 /*
630 * Apply Welford's method:
631 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
632 */
633 stddev = rec->counter * rec->time_squared -
634 rec->time * rec->time;
635
Chase Douglase330b3b2010-04-26 14:02:05 -0400636 /*
637 * Divide only 1000 for ns^2 -> us^2 conversion.
638 * trace_print_graph_duration will divide 1000 again.
639 */
Juri Lelli52d85d72013-06-12 12:03:18 +0200640 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
Chase Douglase330b3b2010-04-26 14:02:05 -0400641 }
642
Steven Rostedt34886c82009-03-25 21:00:47 -0400643 trace_seq_init(&s);
644 trace_print_graph_duration(rec->time, &s);
645 trace_seq_puts(&s, " ");
646 trace_print_graph_duration(avg, &s);
Chase Douglase330b3b2010-04-26 14:02:05 -0400647 trace_seq_puts(&s, " ");
648 trace_print_graph_duration(stddev, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400649 trace_print_seq(m, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400650#endif
651 seq_putc(m, '\n');
Li Zefan3aaba202010-08-23 16:50:12 +0800652out:
653 mutex_unlock(&ftrace_profile_lock);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400654
Li Zefan3aaba202010-08-23 16:50:12 +0800655 return ret;
Steven Rostedt493762f2009-03-23 17:12:36 -0400656}
657
Steven Rostedtcafb1682009-03-24 20:50:39 -0400658static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400659{
660 struct ftrace_profile_page *pg;
661
Steven Rostedtcafb1682009-03-24 20:50:39 -0400662 pg = stat->pages = stat->start;
Steven Rostedt493762f2009-03-23 17:12:36 -0400663
664 while (pg) {
665 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
666 pg->index = 0;
667 pg = pg->next;
668 }
669
Steven Rostedtcafb1682009-03-24 20:50:39 -0400670 memset(stat->hash, 0,
Steven Rostedt493762f2009-03-23 17:12:36 -0400671 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
672}
673
Steven Rostedtcafb1682009-03-24 20:50:39 -0400674int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400675{
676 struct ftrace_profile_page *pg;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400677 int functions;
678 int pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400679 int i;
680
681 /* If we already allocated, do nothing */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400682 if (stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400683 return 0;
684
Steven Rostedtcafb1682009-03-24 20:50:39 -0400685 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
686 if (!stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400687 return -ENOMEM;
688
Steven Rostedt318e0a72009-03-25 20:06:34 -0400689#ifdef CONFIG_DYNAMIC_FTRACE
690 functions = ftrace_update_tot_cnt;
691#else
692 /*
693 * We do not know the number of functions that exist because
694 * dynamic tracing is what counts them. With past experience
695 * we have around 20K functions. That should be more than enough.
696 * It is highly unlikely we will execute every function in
697 * the kernel.
698 */
699 functions = 20000;
700#endif
701
Steven Rostedtcafb1682009-03-24 20:50:39 -0400702 pg = stat->start = stat->pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400703
Steven Rostedt318e0a72009-03-25 20:06:34 -0400704 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
705
Namhyung Kim39e30cd2013-04-01 21:46:24 +0900706 for (i = 1; i < pages; i++) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400707 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400708 if (!pg->next)
Steven Rostedt318e0a72009-03-25 20:06:34 -0400709 goto out_free;
Steven Rostedt493762f2009-03-23 17:12:36 -0400710 pg = pg->next;
711 }
712
713 return 0;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400714
715 out_free:
716 pg = stat->start;
717 while (pg) {
718 unsigned long tmp = (unsigned long)pg;
719
720 pg = pg->next;
721 free_page(tmp);
722 }
723
Steven Rostedt318e0a72009-03-25 20:06:34 -0400724 stat->pages = NULL;
725 stat->start = NULL;
726
727 return -ENOMEM;
Steven Rostedt493762f2009-03-23 17:12:36 -0400728}
729
Steven Rostedtcafb1682009-03-24 20:50:39 -0400730static int ftrace_profile_init_cpu(int cpu)
Steven Rostedt493762f2009-03-23 17:12:36 -0400731{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400732 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400733 int size;
734
Steven Rostedtcafb1682009-03-24 20:50:39 -0400735 stat = &per_cpu(ftrace_profile_stats, cpu);
736
737 if (stat->hash) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400738 /* If the profile is already created, simply reset it */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400739 ftrace_profile_reset(stat);
Steven Rostedt493762f2009-03-23 17:12:36 -0400740 return 0;
741 }
742
743 /*
744 * We are profiling all functions, but usually only a few thousand
745 * functions are hit. We'll make a hash of 1024 items.
746 */
747 size = FTRACE_PROFILE_HASH_SIZE;
748
Steven Rostedtcafb1682009-03-24 20:50:39 -0400749 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400750
Steven Rostedtcafb1682009-03-24 20:50:39 -0400751 if (!stat->hash)
Steven Rostedt493762f2009-03-23 17:12:36 -0400752 return -ENOMEM;
753
Steven Rostedt318e0a72009-03-25 20:06:34 -0400754 /* Preallocate the function profiling pages */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400755 if (ftrace_profile_pages_init(stat) < 0) {
756 kfree(stat->hash);
757 stat->hash = NULL;
Steven Rostedt493762f2009-03-23 17:12:36 -0400758 return -ENOMEM;
759 }
760
761 return 0;
762}
763
Steven Rostedtcafb1682009-03-24 20:50:39 -0400764static int ftrace_profile_init(void)
765{
766 int cpu;
767 int ret = 0;
768
Miao Xiec4602c12013-12-16 15:20:01 +0800769 for_each_possible_cpu(cpu) {
Steven Rostedtcafb1682009-03-24 20:50:39 -0400770 ret = ftrace_profile_init_cpu(cpu);
771 if (ret)
772 break;
773 }
774
775 return ret;
776}
777
Steven Rostedt493762f2009-03-23 17:12:36 -0400778/* interrupts must be disabled */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400779static struct ftrace_profile *
780ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400781{
782 struct ftrace_profile *rec;
783 struct hlist_head *hhd;
Steven Rostedt493762f2009-03-23 17:12:36 -0400784 unsigned long key;
785
Namhyung Kim20079eb2013-04-10 08:55:50 +0900786 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400787 hhd = &stat->hash[key];
Steven Rostedt493762f2009-03-23 17:12:36 -0400788
789 if (hlist_empty(hhd))
790 return NULL;
791
Steven Rostedt1bb539c2013-05-28 14:38:43 -0400792 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400793 if (rec->ip == ip)
794 return rec;
795 }
796
797 return NULL;
798}
799
Steven Rostedtcafb1682009-03-24 20:50:39 -0400800static void ftrace_add_profile(struct ftrace_profile_stat *stat,
801 struct ftrace_profile *rec)
Steven Rostedt493762f2009-03-23 17:12:36 -0400802{
803 unsigned long key;
804
Namhyung Kim20079eb2013-04-10 08:55:50 +0900805 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400806 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
Steven Rostedt493762f2009-03-23 17:12:36 -0400807}
808
Steven Rostedt318e0a72009-03-25 20:06:34 -0400809/*
810 * The memory is already allocated, this simply finds a new record to use.
811 */
Steven Rostedt493762f2009-03-23 17:12:36 -0400812static struct ftrace_profile *
Steven Rostedt318e0a72009-03-25 20:06:34 -0400813ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400814{
815 struct ftrace_profile *rec = NULL;
816
Steven Rostedt318e0a72009-03-25 20:06:34 -0400817 /* prevent recursion (from NMIs) */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400818 if (atomic_inc_return(&stat->disabled) != 1)
Steven Rostedt493762f2009-03-23 17:12:36 -0400819 goto out;
820
Steven Rostedt493762f2009-03-23 17:12:36 -0400821 /*
Steven Rostedt318e0a72009-03-25 20:06:34 -0400822 * Try to find the function again since an NMI
823 * could have added it
Steven Rostedt493762f2009-03-23 17:12:36 -0400824 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400825 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400826 if (rec)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400827 goto out;
Steven Rostedt493762f2009-03-23 17:12:36 -0400828
Steven Rostedtcafb1682009-03-24 20:50:39 -0400829 if (stat->pages->index == PROFILES_PER_PAGE) {
830 if (!stat->pages->next)
831 goto out;
832 stat->pages = stat->pages->next;
Steven Rostedt493762f2009-03-23 17:12:36 -0400833 }
834
Steven Rostedtcafb1682009-03-24 20:50:39 -0400835 rec = &stat->pages->records[stat->pages->index++];
Steven Rostedt493762f2009-03-23 17:12:36 -0400836 rec->ip = ip;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400837 ftrace_add_profile(stat, rec);
Steven Rostedt493762f2009-03-23 17:12:36 -0400838
Steven Rostedt493762f2009-03-23 17:12:36 -0400839 out:
Steven Rostedtcafb1682009-03-24 20:50:39 -0400840 atomic_dec(&stat->disabled);
Steven Rostedt493762f2009-03-23 17:12:36 -0400841
842 return rec;
843}
844
Steven Rostedt493762f2009-03-23 17:12:36 -0400845static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400846function_profile_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400847 struct ftrace_ops *ops, struct pt_regs *regs)
Steven Rostedt493762f2009-03-23 17:12:36 -0400848{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400849 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400850 struct ftrace_profile *rec;
851 unsigned long flags;
Steven Rostedt493762f2009-03-23 17:12:36 -0400852
853 if (!ftrace_profile_enabled)
854 return;
855
Steven Rostedt493762f2009-03-23 17:12:36 -0400856 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400857
Christoph Lameterbdffd892014-04-29 14:17:40 -0500858 stat = this_cpu_ptr(&ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400859 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400860 goto out;
861
862 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400863 if (!rec) {
Steven Rostedt318e0a72009-03-25 20:06:34 -0400864 rec = ftrace_profile_alloc(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400865 if (!rec)
866 goto out;
867 }
868
869 rec->counter++;
870 out:
871 local_irq_restore(flags);
872}
873
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400874#ifdef CONFIG_FUNCTION_GRAPH_TRACER
875static int profile_graph_entry(struct ftrace_graph_ent *trace)
876{
Namhyung Kim8861dd32016-08-31 11:55:29 +0900877 int index = trace->depth;
878
Steven Rostedta1e2e312011-08-09 12:50:46 -0400879 function_profile_call(trace->func, 0, NULL, NULL);
Namhyung Kim8861dd32016-08-31 11:55:29 +0900880
Steven Rostedt (VMware)741397d2017-08-17 16:37:25 -0400881 /* If function graph is shutting down, ret_stack can be NULL */
882 if (!current->ret_stack)
883 return 0;
884
Namhyung Kim8861dd32016-08-31 11:55:29 +0900885 if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
886 current->ret_stack[index].subtime = 0;
887
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400888 return 1;
889}
890
891static void profile_graph_return(struct ftrace_graph_ret *trace)
892{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400893 struct ftrace_profile_stat *stat;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400894 unsigned long long calltime;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400895 struct ftrace_profile *rec;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400896 unsigned long flags;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400897
898 local_irq_save(flags);
Christoph Lameterbdffd892014-04-29 14:17:40 -0500899 stat = this_cpu_ptr(&ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400900 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400901 goto out;
902
Steven Rostedt37e44bc2010-04-27 21:04:24 -0400903 /* If the calltime was zero'd ignore it */
904 if (!trace->calltime)
905 goto out;
906
Steven Rostedta2a16d62009-03-24 23:17:58 -0400907 calltime = trace->rettime - trace->calltime;
908
Steven Rostedt (Red Hat)55577202015-09-29 19:06:50 -0400909 if (!fgraph_graph_time) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400910 int index;
911
912 index = trace->depth;
913
914 /* Append this call time to the parent time to subtract */
915 if (index)
916 current->ret_stack[index - 1].subtime += calltime;
917
918 if (current->ret_stack[index].subtime < calltime)
919 calltime -= current->ret_stack[index].subtime;
920 else
921 calltime = 0;
922 }
923
Steven Rostedtcafb1682009-03-24 20:50:39 -0400924 rec = ftrace_find_profiled_func(stat, trace->func);
Chase Douglase330b3b2010-04-26 14:02:05 -0400925 if (rec) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400926 rec->time += calltime;
Chase Douglase330b3b2010-04-26 14:02:05 -0400927 rec->time_squared += calltime * calltime;
928 }
Steven Rostedta2a16d62009-03-24 23:17:58 -0400929
Steven Rostedtcafb1682009-03-24 20:50:39 -0400930 out:
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400931 local_irq_restore(flags);
932}
933
934static int register_ftrace_profiler(void)
935{
936 return register_ftrace_graph(&profile_graph_return,
937 &profile_graph_entry);
938}
939
940static void unregister_ftrace_profiler(void)
941{
942 unregister_ftrace_graph();
943}
944#else
Paul McQuadebd38c0e2011-05-31 20:51:55 +0100945static struct ftrace_ops ftrace_profile_ops __read_mostly = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400946 .func = function_profile_call,
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +0900947 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -0400948 INIT_OPS_HASH(ftrace_profile_ops)
Steven Rostedt493762f2009-03-23 17:12:36 -0400949};
950
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400951static int register_ftrace_profiler(void)
952{
953 return register_ftrace_function(&ftrace_profile_ops);
954}
955
956static void unregister_ftrace_profiler(void)
957{
958 unregister_ftrace_function(&ftrace_profile_ops);
959}
960#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
961
Steven Rostedt493762f2009-03-23 17:12:36 -0400962static ssize_t
963ftrace_profile_write(struct file *filp, const char __user *ubuf,
964 size_t cnt, loff_t *ppos)
965{
966 unsigned long val;
Steven Rostedt493762f2009-03-23 17:12:36 -0400967 int ret;
968
Peter Huewe22fe9b52011-06-07 21:58:27 +0200969 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
970 if (ret)
Steven Rostedt493762f2009-03-23 17:12:36 -0400971 return ret;
972
973 val = !!val;
974
975 mutex_lock(&ftrace_profile_lock);
976 if (ftrace_profile_enabled ^ val) {
977 if (val) {
978 ret = ftrace_profile_init();
979 if (ret < 0) {
980 cnt = ret;
981 goto out;
982 }
983
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400984 ret = register_ftrace_profiler();
985 if (ret < 0) {
986 cnt = ret;
987 goto out;
988 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400989 ftrace_profile_enabled = 1;
990 } else {
991 ftrace_profile_enabled = 0;
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400992 /*
993 * unregister_ftrace_profiler calls stop_machine
994 * so this acts like an synchronize_sched.
995 */
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400996 unregister_ftrace_profiler();
Steven Rostedt493762f2009-03-23 17:12:36 -0400997 }
998 }
999 out:
1000 mutex_unlock(&ftrace_profile_lock);
1001
Jiri Olsacf8517c2009-10-23 19:36:16 -04001002 *ppos += cnt;
Steven Rostedt493762f2009-03-23 17:12:36 -04001003
1004 return cnt;
1005}
1006
1007static ssize_t
1008ftrace_profile_read(struct file *filp, char __user *ubuf,
1009 size_t cnt, loff_t *ppos)
1010{
Steven Rostedtfb9fb012009-03-25 13:26:41 -04001011 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -04001012 int r;
1013
1014 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1015 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1016}
1017
1018static const struct file_operations ftrace_profile_fops = {
1019 .open = tracing_open_generic,
1020 .read = ftrace_profile_read,
1021 .write = ftrace_profile_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001022 .llseek = default_llseek,
Steven Rostedt493762f2009-03-23 17:12:36 -04001023};
1024
Steven Rostedtcafb1682009-03-24 20:50:39 -04001025/* used to initialize the real stat files */
1026static struct tracer_stat function_stats __initdata = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -04001027 .name = "functions",
1028 .stat_start = function_stat_start,
1029 .stat_next = function_stat_next,
1030 .stat_cmp = function_stat_cmp,
1031 .stat_headers = function_stat_headers,
1032 .stat_show = function_stat_show
Steven Rostedtcafb1682009-03-24 20:50:39 -04001033};
1034
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001035static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -04001036{
Steven Rostedtcafb1682009-03-24 20:50:39 -04001037 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -04001038 struct dentry *entry;
Steven Rostedtcafb1682009-03-24 20:50:39 -04001039 char *name;
Steven Rostedt493762f2009-03-23 17:12:36 -04001040 int ret;
Steven Rostedtcafb1682009-03-24 20:50:39 -04001041 int cpu;
Steven Rostedt493762f2009-03-23 17:12:36 -04001042
Steven Rostedtcafb1682009-03-24 20:50:39 -04001043 for_each_possible_cpu(cpu) {
1044 stat = &per_cpu(ftrace_profile_stats, cpu);
1045
Geliang Tang6363c6b2016-03-15 22:12:34 +08001046 name = kasprintf(GFP_KERNEL, "function%d", cpu);
Steven Rostedtcafb1682009-03-24 20:50:39 -04001047 if (!name) {
1048 /*
1049 * The files created are permanent, if something happens
1050 * we still do not free memory.
1051 */
Steven Rostedtcafb1682009-03-24 20:50:39 -04001052 WARN(1,
1053 "Could not allocate stat file for cpu %d\n",
1054 cpu);
1055 return;
1056 }
1057 stat->stat = function_stats;
Steven Rostedtcafb1682009-03-24 20:50:39 -04001058 stat->stat.name = name;
1059 ret = register_stat_tracer(&stat->stat);
1060 if (ret) {
1061 WARN(1,
1062 "Could not register function stat for cpu %d\n",
1063 cpu);
1064 kfree(name);
1065 return;
1066 }
Steven Rostedt493762f2009-03-23 17:12:36 -04001067 }
1068
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001069 entry = tracefs_create_file("function_profile_enabled", 0644,
Steven Rostedt493762f2009-03-23 17:12:36 -04001070 d_tracer, NULL, &ftrace_profile_fops);
1071 if (!entry)
Joe Perchesa395d6a2016-03-22 14:28:09 -07001072 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
Steven Rostedt493762f2009-03-23 17:12:36 -04001073}
1074
1075#else /* CONFIG_FUNCTION_PROFILER */
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001076static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -04001077{
1078}
1079#endif /* CONFIG_FUNCTION_PROFILER */
1080
Ingo Molnar73d3fd92009-02-17 11:48:18 +01001081static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1082
Pratyush Anand1619dc32015-03-06 23:58:06 +05301083#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1084static int ftrace_graph_active;
1085#else
1086# define ftrace_graph_active 0
1087#endif
1088
Steven Rostedt3d083392008-05-12 21:20:42 +02001089#ifdef CONFIG_DYNAMIC_FTRACE
Ingo Molnar73d3fd92009-02-17 11:48:18 +01001090
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001091static struct ftrace_ops *removed_ops;
1092
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04001093/*
1094 * Set when doing a global update, like enabling all recs or disabling them.
1095 * It is not set when just updating a single ftrace_ops.
1096 */
1097static bool update_all_ops;
1098
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001099#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001100# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001101#endif
1102
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001103static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1104
Steven Rostedtb6887d72009-02-17 12:32:04 -05001105struct ftrace_func_probe {
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001106 struct hlist_node node;
Steven Rostedtb6887d72009-02-17 12:32:04 -05001107 struct ftrace_probe_ops *ops;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001108 unsigned long flags;
1109 unsigned long ip;
1110 void *data;
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04001111 struct list_head free_list;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001112};
1113
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001114struct ftrace_func_entry {
1115 struct hlist_node hlist;
1116 unsigned long ip;
1117};
1118
1119struct ftrace_hash {
1120 unsigned long size_bits;
1121 struct hlist_head *buckets;
1122 unsigned long count;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001123 struct rcu_head rcu;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001124};
1125
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001126/*
1127 * We make these constant because no one should touch them,
1128 * but they are used as the default "empty hash", to avoid allocating
1129 * it all the time. These are in a read only section such that if
1130 * anyone does try to modify it, it will cause an exception.
1131 */
1132static const struct hlist_head empty_buckets[1];
1133static const struct ftrace_hash empty_hash = {
1134 .buckets = (struct hlist_head *)empty_buckets,
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001135};
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001136#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
Steven Rostedt5072c592008-05-12 21:20:43 +02001137
Steven Rostedt2b499382011-05-03 22:49:52 -04001138static struct ftrace_ops global_ops = {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001139 .func = ftrace_stub,
1140 .local_hash.notrace_hash = EMPTY_HASH,
1141 .local_hash.filter_hash = EMPTY_HASH,
1142 INIT_OPS_HASH(global_ops)
1143 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -04001144 FTRACE_OPS_FL_INITIALIZED |
1145 FTRACE_OPS_FL_PID,
Steven Rostedtf45948e2011-05-02 12:29:25 -04001146};
1147
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001148/*
1149 * This is used by __kernel_text_address() to return true if the
Steven Rostedt (Red Hat)0af26492014-11-20 10:05:36 -05001150 * address is on a dynamically allocated trampoline that would
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05001151 * not return true for either core_kernel_text() or
1152 * is_module_text_address().
1153 */
1154bool is_ftrace_trampoline(unsigned long addr)
1155{
1156 struct ftrace_ops *op;
1157 bool ret = false;
1158
1159 /*
1160 * Some of the ops may be dynamically allocated,
1161 * they are freed after a synchronize_sched().
1162 */
1163 preempt_disable_notrace();
1164
1165 do_for_each_ftrace_op(op, ftrace_ops_list) {
1166 /*
1167 * This is to check for dynamically allocated trampolines.
1168 * Trampolines that are in kernel text will have
1169 * core_kernel_text() return true.
1170 */
1171 if (op->trampoline && op->trampoline_size)
1172 if (addr >= op->trampoline &&
1173 addr < op->trampoline + op->trampoline_size) {
1174 ret = true;
1175 goto out;
1176 }
1177 } while_for_each_ftrace_op(op);
1178
1179 out:
1180 preempt_enable_notrace();
1181
1182 return ret;
1183}
1184
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001185struct ftrace_page {
1186 struct ftrace_page *next;
Steven Rostedta7900872011-12-16 16:23:44 -05001187 struct dyn_ftrace *records;
Steven Rostedt431aa3f2009-01-06 12:43:01 -05001188 int index;
Steven Rostedta7900872011-12-16 16:23:44 -05001189 int size;
David Milleraa5e5ce2008-05-13 22:06:56 -07001190};
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001191
Steven Rostedta7900872011-12-16 16:23:44 -05001192#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1193#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001194
1195/* estimate from running different kernels */
1196#define NR_TO_INIT 10000
1197
1198static struct ftrace_page *ftrace_pages_start;
1199static struct ftrace_page *ftrace_pages;
1200
Steven Rostedt (Red Hat)68f40962014-05-01 12:44:50 -04001201static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
Steven Rostedt06a51d92011-12-19 19:07:36 -05001202{
1203 return !hash || !hash->count;
1204}
1205
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001206static struct ftrace_func_entry *
1207ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1208{
1209 unsigned long key;
1210 struct ftrace_func_entry *entry;
1211 struct hlist_head *hhd;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001212
Steven Rostedt06a51d92011-12-19 19:07:36 -05001213 if (ftrace_hash_empty(hash))
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001214 return NULL;
1215
1216 if (hash->size_bits > 0)
1217 key = hash_long(ip, hash->size_bits);
1218 else
1219 key = 0;
1220
1221 hhd = &hash->buckets[key];
1222
Steven Rostedt1bb539c2013-05-28 14:38:43 -04001223 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001224 if (entry->ip == ip)
1225 return entry;
1226 }
1227 return NULL;
1228}
1229
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001230static void __add_hash_entry(struct ftrace_hash *hash,
1231 struct ftrace_func_entry *entry)
1232{
1233 struct hlist_head *hhd;
1234 unsigned long key;
1235
1236 if (hash->size_bits)
1237 key = hash_long(entry->ip, hash->size_bits);
1238 else
1239 key = 0;
1240
1241 hhd = &hash->buckets[key];
1242 hlist_add_head(&entry->hlist, hhd);
1243 hash->count++;
1244}
1245
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001246static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1247{
1248 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001249
1250 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1251 if (!entry)
1252 return -ENOMEM;
1253
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001254 entry->ip = ip;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001255 __add_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001256
1257 return 0;
1258}
1259
1260static void
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001261free_hash_entry(struct ftrace_hash *hash,
1262 struct ftrace_func_entry *entry)
1263{
1264 hlist_del(&entry->hlist);
1265 kfree(entry);
1266 hash->count--;
1267}
1268
1269static void
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001270remove_hash_entry(struct ftrace_hash *hash,
1271 struct ftrace_func_entry *entry)
1272{
1273 hlist_del(&entry->hlist);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001274 hash->count--;
1275}
1276
1277static void ftrace_hash_clear(struct ftrace_hash *hash)
1278{
1279 struct hlist_head *hhd;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001280 struct hlist_node *tn;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001281 struct ftrace_func_entry *entry;
1282 int size = 1 << hash->size_bits;
1283 int i;
1284
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001285 if (!hash->count)
1286 return;
1287
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001288 for (i = 0; i < size; i++) {
1289 hhd = &hash->buckets[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001290 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001291 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001292 }
1293 FTRACE_WARN_ON(hash->count);
1294}
1295
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001296static void free_ftrace_hash(struct ftrace_hash *hash)
1297{
1298 if (!hash || hash == EMPTY_HASH)
1299 return;
1300 ftrace_hash_clear(hash);
1301 kfree(hash->buckets);
1302 kfree(hash);
1303}
1304
Steven Rostedt07fd5512011-05-05 18:03:47 -04001305static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1306{
1307 struct ftrace_hash *hash;
1308
1309 hash = container_of(rcu, struct ftrace_hash, rcu);
1310 free_ftrace_hash(hash);
1311}
1312
1313static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1314{
1315 if (!hash || hash == EMPTY_HASH)
1316 return;
1317 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1318}
1319
Jiri Olsa5500fa52012-02-15 15:51:54 +01001320void ftrace_free_filter(struct ftrace_ops *ops)
1321{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09001322 ftrace_ops_init(ops);
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001323 free_ftrace_hash(ops->func_hash->filter_hash);
1324 free_ftrace_hash(ops->func_hash->notrace_hash);
Jiri Olsa5500fa52012-02-15 15:51:54 +01001325}
1326
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001327static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1328{
1329 struct ftrace_hash *hash;
1330 int size;
1331
1332 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1333 if (!hash)
1334 return NULL;
1335
1336 size = 1 << size_bits;
Thomas Meyer47b0edc2011-11-29 22:08:00 +01001337 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001338
1339 if (!hash->buckets) {
1340 kfree(hash);
1341 return NULL;
1342 }
1343
1344 hash->size_bits = size_bits;
1345
1346 return hash;
1347}
1348
1349static struct ftrace_hash *
1350alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1351{
1352 struct ftrace_func_entry *entry;
1353 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001354 int size;
1355 int ret;
1356 int i;
1357
1358 new_hash = alloc_ftrace_hash(size_bits);
1359 if (!new_hash)
1360 return NULL;
1361
1362 /* Empty hash? */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001363 if (ftrace_hash_empty(hash))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001364 return new_hash;
1365
1366 size = 1 << hash->size_bits;
1367 for (i = 0; i < size; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001368 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001369 ret = add_hash_entry(new_hash, entry->ip);
1370 if (ret < 0)
1371 goto free_hash;
1372 }
1373 }
1374
1375 FTRACE_WARN_ON(new_hash->count != hash->count);
1376
1377 return new_hash;
1378
1379 free_hash:
1380 free_ftrace_hash(new_hash);
1381 return NULL;
1382}
1383
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001384static void
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001385ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001386static void
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001387ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001388
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001389static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1390 struct ftrace_hash *new_hash);
1391
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001392static int
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001393ftrace_hash_move(struct ftrace_ops *ops, int enable,
1394 struct ftrace_hash **dst, struct ftrace_hash *src)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001395{
1396 struct ftrace_func_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001397 struct hlist_node *tn;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001398 struct hlist_head *hhd;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001399 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001400 int size = src->count;
1401 int bits = 0;
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001402 int ret;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001403 int i;
1404
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001405 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1406 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1407 return -EINVAL;
1408
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001409 /*
1410 * If the new source is empty, just free dst and assign it
1411 * the empty_hash.
1412 */
1413 if (!src->count) {
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001414 new_hash = EMPTY_HASH;
1415 goto update;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001416 }
1417
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001418 /*
1419 * Make the hash size about 1/2 the # found
1420 */
1421 for (size /= 2; size; size >>= 1)
1422 bits++;
1423
1424 /* Don't allocate too much */
1425 if (bits > FTRACE_HASH_MAX_BITS)
1426 bits = FTRACE_HASH_MAX_BITS;
1427
Steven Rostedt07fd5512011-05-05 18:03:47 -04001428 new_hash = alloc_ftrace_hash(bits);
1429 if (!new_hash)
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001430 return -ENOMEM;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001431
1432 size = 1 << src->size_bits;
1433 for (i = 0; i < size; i++) {
1434 hhd = &src->buckets[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001435 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001436 remove_hash_entry(src, entry);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001437 __add_hash_entry(new_hash, entry);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001438 }
1439 }
1440
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001441update:
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001442 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1443 if (enable) {
1444 /* IPMODIFY should be updated only when filter_hash updating */
1445 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1446 if (ret < 0) {
1447 free_ftrace_hash(new_hash);
1448 return ret;
1449 }
1450 }
1451
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001452 /*
1453 * Remove the current set, update the hash and add
1454 * them back.
1455 */
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001456 ftrace_hash_rec_disable_modify(ops, enable);
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001457
Steven Rostedt07fd5512011-05-05 18:03:47 -04001458 rcu_assign_pointer(*dst, new_hash);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001459
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001460 ftrace_hash_rec_enable_modify(ops, enable);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001461
Masami Hiramatsu5c27c772014-06-17 11:04:42 +00001462 return 0;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001463}
1464
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001465static bool hash_contains_ip(unsigned long ip,
1466 struct ftrace_ops_hash *hash)
1467{
1468 /*
1469 * The function record is a match if it exists in the filter
1470 * hash and not in the notrace hash. Note, an emty hash is
1471 * considered a match for the filter hash, but an empty
1472 * notrace hash is considered not in the notrace hash.
1473 */
1474 return (ftrace_hash_empty(hash->filter_hash) ||
1475 ftrace_lookup_ip(hash->filter_hash, ip)) &&
1476 (ftrace_hash_empty(hash->notrace_hash) ||
1477 !ftrace_lookup_ip(hash->notrace_hash, ip));
1478}
1479
Steven Rostedt265c8312009-02-13 12:43:56 -05001480/*
Steven Rostedtb8489142011-05-04 09:27:52 -04001481 * Test the hashes for this ops to see if we want to call
1482 * the ops->func or not.
1483 *
1484 * It's a match if the ip is in the ops->filter_hash or
1485 * the filter_hash does not exist or is empty,
1486 * AND
1487 * the ip is not in the ops->notrace_hash.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04001488 *
1489 * This needs to be called with preemption disabled as
1490 * the hashes are freed with call_rcu_sched().
Steven Rostedtb8489142011-05-04 09:27:52 -04001491 */
1492static int
Steven Rostedt (Red Hat)195a8af2013-07-23 22:06:15 -04001493ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
Steven Rostedtb8489142011-05-04 09:27:52 -04001494{
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001495 struct ftrace_ops_hash hash;
Steven Rostedtb8489142011-05-04 09:27:52 -04001496 int ret;
1497
Steven Rostedt (Red Hat)195a8af2013-07-23 22:06:15 -04001498#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1499 /*
1500 * There's a small race when adding ops that the ftrace handler
1501 * that wants regs, may be called without them. We can not
1502 * allow that handler to be called if regs is NULL.
1503 */
1504 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1505 return 0;
1506#endif
1507
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001508 hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1509 hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
Steven Rostedtb8489142011-05-04 09:27:52 -04001510
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001511 if (hash_contains_ip(ip, &hash))
Steven Rostedtb8489142011-05-04 09:27:52 -04001512 ret = 1;
1513 else
1514 ret = 0;
Steven Rostedtb8489142011-05-04 09:27:52 -04001515
1516 return ret;
1517}
1518
1519/*
Steven Rostedt265c8312009-02-13 12:43:56 -05001520 * This is a double for. Do not use 'break' to break out of the loop,
1521 * you must use a goto.
1522 */
1523#define do_for_each_ftrace_rec(pg, rec) \
1524 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1525 int _____i; \
1526 for (_____i = 0; _____i < pg->index; _____i++) { \
1527 rec = &pg->records[_____i];
1528
1529#define while_for_each_ftrace_rec() \
1530 } \
1531 }
Abhishek Sagarecea6562008-06-21 23:47:53 +05301532
Steven Rostedt5855fea2011-12-16 19:27:42 -05001533
1534static int ftrace_cmp_recs(const void *a, const void *b)
1535{
Steven Rostedta650e022012-04-25 13:48:13 -04001536 const struct dyn_ftrace *key = a;
1537 const struct dyn_ftrace *rec = b;
Steven Rostedt5855fea2011-12-16 19:27:42 -05001538
Steven Rostedta650e022012-04-25 13:48:13 -04001539 if (key->flags < rec->ip)
Steven Rostedt5855fea2011-12-16 19:27:42 -05001540 return -1;
Steven Rostedta650e022012-04-25 13:48:13 -04001541 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1542 return 1;
1543 return 0;
1544}
1545
Michael Ellerman04cf31a2016-03-24 22:04:01 +11001546/**
1547 * ftrace_location_range - return the first address of a traced location
1548 * if it touches the given ip range
1549 * @start: start of range to search.
1550 * @end: end of range to search (inclusive). @end points to the last byte
1551 * to check.
1552 *
1553 * Returns rec->ip if the related ftrace location is a least partly within
1554 * the given address range. That is, the first address of the instruction
1555 * that is either a NOP or call to the function tracer. It checks the ftrace
1556 * internal tables to determine if the address belongs or not.
1557 */
1558unsigned long ftrace_location_range(unsigned long start, unsigned long end)
Steven Rostedta650e022012-04-25 13:48:13 -04001559{
1560 struct ftrace_page *pg;
1561 struct dyn_ftrace *rec;
1562 struct dyn_ftrace key;
1563
1564 key.ip = start;
1565 key.flags = end; /* overload flags, as it is unsigned long */
1566
1567 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1568 if (end < pg->records[0].ip ||
1569 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1570 continue;
1571 rec = bsearch(&key, pg->records, pg->index,
1572 sizeof(struct dyn_ftrace),
1573 ftrace_cmp_recs);
1574 if (rec)
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001575 return rec->ip;
Steven Rostedta650e022012-04-25 13:48:13 -04001576 }
1577
Steven Rostedt5855fea2011-12-16 19:27:42 -05001578 return 0;
1579}
1580
Steven Rostedtc88fd862011-08-16 09:53:39 -04001581/**
1582 * ftrace_location - return true if the ip giving is a traced location
1583 * @ip: the instruction pointer to check
1584 *
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001585 * Returns rec->ip if @ip given is a pointer to a ftrace location.
Steven Rostedtc88fd862011-08-16 09:53:39 -04001586 * That is, the instruction that is either a NOP or call to
1587 * the function tracer. It checks the ftrace internal tables to
1588 * determine if the address belongs or not.
1589 */
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001590unsigned long ftrace_location(unsigned long ip)
Steven Rostedtc88fd862011-08-16 09:53:39 -04001591{
Steven Rostedta650e022012-04-25 13:48:13 -04001592 return ftrace_location_range(ip, ip);
1593}
Steven Rostedtc88fd862011-08-16 09:53:39 -04001594
Steven Rostedta650e022012-04-25 13:48:13 -04001595/**
1596 * ftrace_text_reserved - return true if range contains an ftrace location
1597 * @start: start of range to search
1598 * @end: end of range to search (inclusive). @end points to the last byte to check.
1599 *
1600 * Returns 1 if @start and @end contains a ftrace location.
1601 * That is, the instruction that is either a NOP or call to
1602 * the function tracer. It checks the ftrace internal tables to
1603 * determine if the address belongs or not.
1604 */
Sasha Levind88471c2013-01-09 18:09:20 -05001605int ftrace_text_reserved(const void *start, const void *end)
Steven Rostedta650e022012-04-25 13:48:13 -04001606{
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001607 unsigned long ret;
1608
1609 ret = ftrace_location_range((unsigned long)start,
1610 (unsigned long)end);
1611
1612 return (int)!!ret;
Steven Rostedtc88fd862011-08-16 09:53:39 -04001613}
1614
Steven Rostedt (Red Hat)4fbb48c2014-04-30 22:35:48 -04001615/* Test if ops registered to this rec needs regs */
1616static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1617{
1618 struct ftrace_ops *ops;
1619 bool keep_regs = false;
1620
1621 for (ops = ftrace_ops_list;
1622 ops != &ftrace_list_end; ops = ops->next) {
1623 /* pass rec in as regs to have non-NULL val */
1624 if (ftrace_ops_test(ops, rec->ip, rec)) {
1625 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1626 keep_regs = true;
1627 break;
1628 }
1629 }
1630 }
1631
1632 return keep_regs;
1633}
1634
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001635static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
Steven Rostedted926f92011-05-03 13:25:24 -04001636 int filter_hash,
1637 bool inc)
1638{
1639 struct ftrace_hash *hash;
1640 struct ftrace_hash *other_hash;
1641 struct ftrace_page *pg;
1642 struct dyn_ftrace *rec;
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001643 bool update = false;
Steven Rostedted926f92011-05-03 13:25:24 -04001644 int count = 0;
1645 int all = 0;
1646
1647 /* Only update if the ops has been registered */
1648 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001649 return false;
Steven Rostedted926f92011-05-03 13:25:24 -04001650
1651 /*
1652 * In the filter_hash case:
1653 * If the count is zero, we update all records.
1654 * Otherwise we just update the items in the hash.
1655 *
1656 * In the notrace_hash case:
1657 * We enable the update in the hash.
1658 * As disabling notrace means enabling the tracing,
1659 * and enabling notrace means disabling, the inc variable
1660 * gets inversed.
1661 */
1662 if (filter_hash) {
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001663 hash = ops->func_hash->filter_hash;
1664 other_hash = ops->func_hash->notrace_hash;
Steven Rostedt06a51d92011-12-19 19:07:36 -05001665 if (ftrace_hash_empty(hash))
Steven Rostedted926f92011-05-03 13:25:24 -04001666 all = 1;
1667 } else {
1668 inc = !inc;
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04001669 hash = ops->func_hash->notrace_hash;
1670 other_hash = ops->func_hash->filter_hash;
Steven Rostedted926f92011-05-03 13:25:24 -04001671 /*
1672 * If the notrace hash has no items,
1673 * then there's nothing to do.
1674 */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001675 if (ftrace_hash_empty(hash))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001676 return false;
Steven Rostedted926f92011-05-03 13:25:24 -04001677 }
1678
1679 do_for_each_ftrace_rec(pg, rec) {
1680 int in_other_hash = 0;
1681 int in_hash = 0;
1682 int match = 0;
1683
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05001684 if (rec->flags & FTRACE_FL_DISABLED)
1685 continue;
1686
Steven Rostedted926f92011-05-03 13:25:24 -04001687 if (all) {
1688 /*
1689 * Only the filter_hash affects all records.
1690 * Update if the record is not in the notrace hash.
1691 */
Steven Rostedtb8489142011-05-04 09:27:52 -04001692 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
Steven Rostedted926f92011-05-03 13:25:24 -04001693 match = 1;
1694 } else {
Steven Rostedt06a51d92011-12-19 19:07:36 -05001695 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1696 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
Steven Rostedted926f92011-05-03 13:25:24 -04001697
1698 /*
Steven Rostedt (Red Hat)19eab4a2014-05-07 15:06:14 -04001699 * If filter_hash is set, we want to match all functions
1700 * that are in the hash but not in the other hash.
Steven Rostedted926f92011-05-03 13:25:24 -04001701 *
Steven Rostedt (Red Hat)19eab4a2014-05-07 15:06:14 -04001702 * If filter_hash is not set, then we are decrementing.
1703 * That means we match anything that is in the hash
1704 * and also in the other_hash. That is, we need to turn
1705 * off functions in the other hash because they are disabled
1706 * by this hash.
Steven Rostedted926f92011-05-03 13:25:24 -04001707 */
1708 if (filter_hash && in_hash && !in_other_hash)
1709 match = 1;
1710 else if (!filter_hash && in_hash &&
Steven Rostedt06a51d92011-12-19 19:07:36 -05001711 (in_other_hash || ftrace_hash_empty(other_hash)))
Steven Rostedted926f92011-05-03 13:25:24 -04001712 match = 1;
1713 }
1714 if (!match)
1715 continue;
1716
1717 if (inc) {
1718 rec->flags++;
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04001719 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001720 return false;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001721
1722 /*
1723 * If there's only a single callback registered to a
1724 * function, and the ops has a trampoline registered
1725 * for it, then we can call it directly.
1726 */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001727 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001728 rec->flags |= FTRACE_FL_TRAMP;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001729 else
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001730 /*
1731 * If we are adding another function callback
1732 * to this function, and the previous had a
Steven Rostedt (Red Hat)bce0b6c2014-08-20 23:57:04 -04001733 * custom trampoline in use, then we need to go
1734 * back to the default trampoline.
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001735 */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001736 rec->flags &= ~FTRACE_FL_TRAMP;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001737
Steven Rostedt08f6fba2012-04-30 16:20:23 -04001738 /*
1739 * If any ops wants regs saved for this function
1740 * then all ops will get saved regs.
1741 */
1742 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1743 rec->flags |= FTRACE_FL_REGS;
Steven Rostedted926f92011-05-03 13:25:24 -04001744 } else {
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04001745 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001746 return false;
Steven Rostedted926f92011-05-03 13:25:24 -04001747 rec->flags--;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001748
Steven Rostedt (Red Hat)4fbb48c2014-04-30 22:35:48 -04001749 /*
1750 * If the rec had REGS enabled and the ops that is
1751 * being removed had REGS set, then see if there is
1752 * still any ops for this record that wants regs.
1753 * If not, we can stop recording them.
1754 */
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04001755 if (ftrace_rec_count(rec) > 0 &&
Steven Rostedt (Red Hat)4fbb48c2014-04-30 22:35:48 -04001756 rec->flags & FTRACE_FL_REGS &&
1757 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1758 if (!test_rec_ops_needs_regs(rec))
1759 rec->flags &= ~FTRACE_FL_REGS;
1760 }
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001761
1762 /*
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04001763 * If the rec had TRAMP enabled, then it needs to
1764 * be cleared. As TRAMP can only be enabled iff
1765 * there is only a single ops attached to it.
1766 * In otherwords, always disable it on decrementing.
1767 * In the future, we may set it if rec count is
1768 * decremented to one, and the ops that is left
1769 * has a trampoline.
1770 */
1771 rec->flags &= ~FTRACE_FL_TRAMP;
1772
1773 /*
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04001774 * flags will be cleared in ftrace_check_record()
1775 * if rec count is zero.
1776 */
Steven Rostedted926f92011-05-03 13:25:24 -04001777 }
1778 count++;
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001779
1780 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1781 update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
1782
Steven Rostedted926f92011-05-03 13:25:24 -04001783 /* Shortcut, if we handled all records, we are done. */
1784 if (!all && count == hash->count)
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001785 return update;
Steven Rostedted926f92011-05-03 13:25:24 -04001786 } while_for_each_ftrace_rec();
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001787
1788 return update;
Steven Rostedted926f92011-05-03 13:25:24 -04001789}
1790
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001791static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
Steven Rostedted926f92011-05-03 13:25:24 -04001792 int filter_hash)
1793{
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001794 return __ftrace_hash_rec_update(ops, filter_hash, 0);
Steven Rostedted926f92011-05-03 13:25:24 -04001795}
1796
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001797static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
Steven Rostedted926f92011-05-03 13:25:24 -04001798 int filter_hash)
1799{
Jiri Olsa84b6d3e2016-03-16 15:34:32 +01001800 return __ftrace_hash_rec_update(ops, filter_hash, 1);
Steven Rostedted926f92011-05-03 13:25:24 -04001801}
1802
Steven Rostedt (Red Hat)84261912014-08-18 13:21:08 -04001803static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1804 int filter_hash, int inc)
1805{
1806 struct ftrace_ops *op;
1807
1808 __ftrace_hash_rec_update(ops, filter_hash, inc);
1809
1810 if (ops->func_hash != &global_ops.local_hash)
1811 return;
1812
1813 /*
1814 * If the ops shares the global_ops hash, then we need to update
1815 * all ops that are enabled and use this hash.
1816 */
1817 do_for_each_ftrace_op(op, ftrace_ops_list) {
1818 /* Already done */
1819 if (op == ops)
1820 continue;
1821 if (op->func_hash == &global_ops.local_hash)
1822 __ftrace_hash_rec_update(op, filter_hash, inc);
1823 } while_for_each_ftrace_op(op);
1824}
1825
1826static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1827 int filter_hash)
1828{
1829 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1830}
1831
1832static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1833 int filter_hash)
1834{
1835 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1836}
1837
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001838/*
1839 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1840 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1841 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1842 * Note that old_hash and new_hash has below meanings
1843 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1844 * - If the hash is EMPTY_HASH, it hits nothing
1845 * - Anything else hits the recs which match the hash entries.
1846 */
1847static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1848 struct ftrace_hash *old_hash,
1849 struct ftrace_hash *new_hash)
1850{
1851 struct ftrace_page *pg;
1852 struct dyn_ftrace *rec, *end = NULL;
1853 int in_old, in_new;
1854
1855 /* Only update if the ops has been registered */
1856 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1857 return 0;
1858
1859 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1860 return 0;
1861
1862 /*
1863 * Since the IPMODIFY is a very address sensitive action, we do not
1864 * allow ftrace_ops to set all functions to new hash.
1865 */
1866 if (!new_hash || !old_hash)
1867 return -EINVAL;
1868
1869 /* Update rec->flags */
1870 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05001871
1872 if (rec->flags & FTRACE_FL_DISABLED)
1873 continue;
1874
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001875 /* We need to update only differences of filter_hash */
1876 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1877 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1878 if (in_old == in_new)
1879 continue;
1880
1881 if (in_new) {
1882 /* New entries must ensure no others are using it */
1883 if (rec->flags & FTRACE_FL_IPMODIFY)
1884 goto rollback;
1885 rec->flags |= FTRACE_FL_IPMODIFY;
1886 } else /* Removed entry */
1887 rec->flags &= ~FTRACE_FL_IPMODIFY;
1888 } while_for_each_ftrace_rec();
1889
1890 return 0;
1891
1892rollback:
1893 end = rec;
1894
1895 /* Roll back what we did above */
1896 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05001897
1898 if (rec->flags & FTRACE_FL_DISABLED)
1899 continue;
1900
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05001901 if (rec == end)
1902 goto err_out;
1903
1904 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1905 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1906 if (in_old == in_new)
1907 continue;
1908
1909 if (in_new)
1910 rec->flags &= ~FTRACE_FL_IPMODIFY;
1911 else
1912 rec->flags |= FTRACE_FL_IPMODIFY;
1913 } while_for_each_ftrace_rec();
1914
1915err_out:
1916 return -EBUSY;
1917}
1918
1919static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1920{
1921 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1922
1923 if (ftrace_hash_empty(hash))
1924 hash = NULL;
1925
1926 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1927}
1928
1929/* Disabling always succeeds */
1930static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1931{
1932 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1933
1934 if (ftrace_hash_empty(hash))
1935 hash = NULL;
1936
1937 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1938}
1939
1940static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1941 struct ftrace_hash *new_hash)
1942{
1943 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1944
1945 if (ftrace_hash_empty(old_hash))
1946 old_hash = NULL;
1947
1948 if (ftrace_hash_empty(new_hash))
1949 new_hash = NULL;
1950
1951 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1952}
1953
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05001954static void print_ip_ins(const char *fmt, const unsigned char *p)
Steven Rostedt05736a42008-09-22 14:55:47 -07001955{
1956 int i;
1957
1958 printk(KERN_CONT "%s", fmt);
1959
1960 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1961 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1962}
1963
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001964static struct ftrace_ops *
1965ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05001966static struct ftrace_ops *
1967ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001968
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05001969enum ftrace_bug_type ftrace_bug_type;
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05001970const void *ftrace_expected;
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05001971
1972static void print_bug_type(void)
1973{
1974 switch (ftrace_bug_type) {
1975 case FTRACE_BUG_UNKNOWN:
1976 break;
1977 case FTRACE_BUG_INIT:
1978 pr_info("Initializing ftrace call sites\n");
1979 break;
1980 case FTRACE_BUG_NOP:
1981 pr_info("Setting ftrace call site to NOP\n");
1982 break;
1983 case FTRACE_BUG_CALL:
1984 pr_info("Setting ftrace call site to call ftrace function\n");
1985 break;
1986 case FTRACE_BUG_UPDATE:
1987 pr_info("Updating ftrace call site to call a different ftrace function\n");
1988 break;
1989 }
1990}
1991
Steven Rostedtc88fd862011-08-16 09:53:39 -04001992/**
1993 * ftrace_bug - report and shutdown function tracer
1994 * @failed: The failed type (EFAULT, EINVAL, EPERM)
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04001995 * @rec: The record that failed
Steven Rostedtc88fd862011-08-16 09:53:39 -04001996 *
1997 * The arch code that enables or disables the function tracing
1998 * can call ftrace_bug() when it has detected a problem in
1999 * modifying the code. @failed should be one of either:
2000 * EFAULT - if the problem happens on reading the @ip address
2001 * EINVAL - if what is read at @ip is not what was expected
2002 * EPERM - if the problem happens on writting to the @ip address
2003 */
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002004void ftrace_bug(int failed, struct dyn_ftrace *rec)
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002005{
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002006 unsigned long ip = rec ? rec->ip : 0;
2007
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002008 switch (failed) {
2009 case -EFAULT:
2010 FTRACE_WARN_ON_ONCE(1);
2011 pr_info("ftrace faulted on modifying ");
2012 print_ip_sym(ip);
2013 break;
2014 case -EINVAL:
2015 FTRACE_WARN_ON_ONCE(1);
2016 pr_info("ftrace failed to modify ");
2017 print_ip_sym(ip);
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05002018 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002019 pr_cont("\n");
Steven Rostedt (Red Hat)b05086c2015-11-25 14:13:11 -05002020 if (ftrace_expected) {
2021 print_ip_ins(" expected: ", ftrace_expected);
2022 pr_cont("\n");
2023 }
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002024 break;
2025 case -EPERM:
2026 FTRACE_WARN_ON_ONCE(1);
2027 pr_info("ftrace faulted on writing ");
2028 print_ip_sym(ip);
2029 break;
2030 default:
2031 FTRACE_WARN_ON_ONCE(1);
2032 pr_info("ftrace faulted on unknown error ");
2033 print_ip_sym(ip);
2034 }
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002035 print_bug_type();
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002036 if (rec) {
2037 struct ftrace_ops *ops = NULL;
2038
2039 pr_info("ftrace record flags: %lx\n", rec->flags);
2040 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2041 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2042 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2043 ops = ftrace_find_tramp_ops_any(rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05002044 if (ops) {
2045 do {
2046 pr_cont("\ttramp: %pS (%pS)",
2047 (void *)ops->trampoline,
2048 (void *)ops->func);
2049 ops = ftrace_find_tramp_ops_next(rec, ops);
2050 } while (ops);
2051 } else
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002052 pr_cont("\ttramp: ERROR!");
2053
2054 }
2055 ip = ftrace_get_addr_curr(rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05002056 pr_cont("\n expected tramp: %lx\n", ip);
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002057 }
Steven Rostedtb17e8a32008-11-14 16:21:19 -08002058}
2059
Steven Rostedtc88fd862011-08-16 09:53:39 -04002060static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
Steven Rostedt5072c592008-05-12 21:20:43 +02002061{
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002062 unsigned long flag = 0UL;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002063
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002064 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2065
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002066 if (rec->flags & FTRACE_FL_DISABLED)
2067 return FTRACE_UPDATE_IGNORE;
2068
Steven Rostedt982c3502008-11-15 16:31:41 -05002069 /*
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01002070 * If we are updating calls:
Steven Rostedt982c3502008-11-15 16:31:41 -05002071 *
Steven Rostedted926f92011-05-03 13:25:24 -04002072 * If the record has a ref count, then we need to enable it
2073 * because someone is using it.
Steven Rostedt982c3502008-11-15 16:31:41 -05002074 *
Steven Rostedted926f92011-05-03 13:25:24 -04002075 * Otherwise we make sure its disabled.
2076 *
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01002077 * If we are disabling calls, then disable all records that
Steven Rostedted926f92011-05-03 13:25:24 -04002078 * are enabled.
Steven Rostedt982c3502008-11-15 16:31:41 -05002079 */
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04002080 if (enable && ftrace_rec_count(rec))
Steven Rostedted926f92011-05-03 13:25:24 -04002081 flag = FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +02002082
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002083 /*
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002084 * If enabling and the REGS flag does not match the REGS_EN, or
2085 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2086 * this record. Set flags to fail the compare against ENABLED.
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002087 */
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002088 if (flag) {
2089 if (!(rec->flags & FTRACE_FL_REGS) !=
2090 !(rec->flags & FTRACE_FL_REGS_EN))
2091 flag |= FTRACE_FL_REGS;
2092
2093 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2094 !(rec->flags & FTRACE_FL_TRAMP_EN))
2095 flag |= FTRACE_FL_TRAMP;
2096 }
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002097
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002098 /* If the state of this record hasn't changed, then do nothing */
2099 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
Steven Rostedtc88fd862011-08-16 09:53:39 -04002100 return FTRACE_UPDATE_IGNORE;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002101
2102 if (flag) {
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002103 /* Save off if rec is being enabled (for return value) */
2104 flag ^= rec->flags & FTRACE_FL_ENABLED;
2105
2106 if (update) {
Steven Rostedtc88fd862011-08-16 09:53:39 -04002107 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002108 if (flag & FTRACE_FL_REGS) {
2109 if (rec->flags & FTRACE_FL_REGS)
2110 rec->flags |= FTRACE_FL_REGS_EN;
2111 else
2112 rec->flags &= ~FTRACE_FL_REGS_EN;
2113 }
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002114 if (flag & FTRACE_FL_TRAMP) {
2115 if (rec->flags & FTRACE_FL_TRAMP)
2116 rec->flags |= FTRACE_FL_TRAMP_EN;
2117 else
2118 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2119 }
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002120 }
2121
2122 /*
2123 * If this record is being updated from a nop, then
2124 * return UPDATE_MAKE_CALL.
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002125 * Otherwise,
2126 * return UPDATE_MODIFY_CALL to tell the caller to convert
Steven Rostedt (Red Hat)f1b2f2b2014-05-07 16:09:49 -04002127 * from the save regs, to a non-save regs function or
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002128 * vice versa, or from a trampoline call.
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002129 */
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002130 if (flag & FTRACE_FL_ENABLED) {
2131 ftrace_bug_type = FTRACE_BUG_CALL;
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002132 return FTRACE_UPDATE_MAKE_CALL;
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002133 }
Steven Rostedt (Red Hat)f1b2f2b2014-05-07 16:09:49 -04002134
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002135 ftrace_bug_type = FTRACE_BUG_UPDATE;
Steven Rostedt (Red Hat)f1b2f2b2014-05-07 16:09:49 -04002136 return FTRACE_UPDATE_MODIFY_CALL;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08002137 }
2138
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002139 if (update) {
2140 /* If there's no more users, clear all flags */
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04002141 if (!ftrace_rec_count(rec))
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002142 rec->flags = 0;
2143 else
Steven Rostedt (Red Hat)b24d4432015-03-04 23:10:28 -05002144 /*
2145 * Just disable the record, but keep the ops TRAMP
2146 * and REGS states. The _EN flags must be disabled though.
2147 */
2148 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2149 FTRACE_FL_REGS_EN);
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002150 }
Steven Rostedtc88fd862011-08-16 09:53:39 -04002151
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002152 ftrace_bug_type = FTRACE_BUG_NOP;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002153 return FTRACE_UPDATE_MAKE_NOP;
2154}
2155
2156/**
2157 * ftrace_update_record, set a record that now is tracing or not
2158 * @rec: the record to update
2159 * @enable: set to 1 if the record is tracing, zero to force disable
2160 *
2161 * The records that represent all functions that can be traced need
2162 * to be updated when tracing has been enabled.
2163 */
2164int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2165{
2166 return ftrace_check_record(rec, enable, 1);
2167}
2168
2169/**
2170 * ftrace_test_record, check if the record has been enabled or not
2171 * @rec: the record to test
2172 * @enable: set to 1 to check if enabled, 0 if it is disabled
2173 *
2174 * The arch code may need to test if a record is already set to
2175 * tracing to determine how to modify the function code that it
2176 * represents.
2177 */
2178int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2179{
2180 return ftrace_check_record(rec, enable, 0);
2181}
2182
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002183static struct ftrace_ops *
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04002184ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2185{
2186 struct ftrace_ops *op;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002187 unsigned long ip = rec->ip;
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04002188
2189 do_for_each_ftrace_op(op, ftrace_ops_list) {
2190
2191 if (!op->trampoline)
2192 continue;
2193
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002194 if (hash_contains_ip(ip, op->func_hash))
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04002195 return op;
2196 } while_for_each_ftrace_op(op);
2197
2198 return NULL;
2199}
2200
2201static struct ftrace_ops *
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05002202ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2203 struct ftrace_ops *op)
2204{
2205 unsigned long ip = rec->ip;
2206
2207 while_for_each_ftrace_op(op) {
2208
2209 if (!op->trampoline)
2210 continue;
2211
2212 if (hash_contains_ip(ip, op->func_hash))
2213 return op;
2214 }
2215
2216 return NULL;
2217}
2218
2219static struct ftrace_ops *
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002220ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2221{
2222 struct ftrace_ops *op;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002223 unsigned long ip = rec->ip;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002224
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002225 /*
2226 * Need to check removed ops first.
2227 * If they are being removed, and this rec has a tramp,
2228 * and this rec is in the ops list, then it would be the
2229 * one with the tramp.
2230 */
2231 if (removed_ops) {
2232 if (hash_contains_ip(ip, &removed_ops->old_hash))
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002233 return removed_ops;
2234 }
2235
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002236 /*
2237 * Need to find the current trampoline for a rec.
2238 * Now, a trampoline is only attached to a rec if there
2239 * was a single 'ops' attached to it. But this can be called
2240 * when we are adding another op to the rec or removing the
2241 * current one. Thus, if the op is being added, we can
2242 * ignore it because it hasn't attached itself to the rec
Steven Rostedt (Red Hat)4fc40902014-10-24 14:48:35 -04002243 * yet.
2244 *
2245 * If an ops is being modified (hooking to different functions)
2246 * then we don't care about the new functions that are being
2247 * added, just the old ones (that are probably being removed).
2248 *
2249 * If we are adding an ops to a function that already is using
2250 * a trampoline, it needs to be removed (trampolines are only
2251 * for single ops connected), then an ops that is not being
2252 * modified also needs to be checked.
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002253 */
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002254 do_for_each_ftrace_op(op, ftrace_ops_list) {
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002255
2256 if (!op->trampoline)
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002257 continue;
2258
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002259 /*
2260 * If the ops is being added, it hasn't gotten to
2261 * the point to be removed from this tree yet.
2262 */
2263 if (op->flags & FTRACE_OPS_FL_ADDING)
2264 continue;
2265
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002266
Steven Rostedt (Red Hat)4fc40902014-10-24 14:48:35 -04002267 /*
2268 * If the ops is being modified and is in the old
2269 * hash, then it is probably being removed from this
2270 * function.
2271 */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002272 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2273 hash_contains_ip(ip, &op->old_hash))
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002274 return op;
Steven Rostedt (Red Hat)4fc40902014-10-24 14:48:35 -04002275 /*
2276 * If the ops is not being added or modified, and it's
2277 * in its normal filter hash, then this must be the one
2278 * we want!
2279 */
2280 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2281 hash_contains_ip(ip, op->func_hash))
2282 return op;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002283
2284 } while_for_each_ftrace_op(op);
2285
2286 return NULL;
2287}
2288
2289static struct ftrace_ops *
2290ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2291{
2292 struct ftrace_ops *op;
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002293 unsigned long ip = rec->ip;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002294
2295 do_for_each_ftrace_op(op, ftrace_ops_list) {
2296 /* pass rec in as regs to have non-NULL val */
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002297 if (hash_contains_ip(ip, op->func_hash))
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002298 return op;
2299 } while_for_each_ftrace_op(op);
2300
2301 return NULL;
2302}
2303
Steven Rostedt (Red Hat)7413af12014-05-06 21:34:14 -04002304/**
2305 * ftrace_get_addr_new - Get the call address to set to
2306 * @rec: The ftrace record descriptor
2307 *
2308 * If the record has the FTRACE_FL_REGS set, that means that it
2309 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2310 * is not not set, then it wants to convert to the normal callback.
2311 *
2312 * Returns the address of the trampoline to set to
2313 */
2314unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2315{
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002316 struct ftrace_ops *ops;
2317
2318 /* Trampolines take precedence over regs */
2319 if (rec->flags & FTRACE_FL_TRAMP) {
2320 ops = ftrace_find_tramp_ops_new(rec);
2321 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
Steven Rostedt (Red Hat)bce0b6c2014-08-20 23:57:04 -04002322 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2323 (void *)rec->ip, (void *)rec->ip, rec->flags);
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002324 /* Ftrace is shutting down, return anything */
2325 return (unsigned long)FTRACE_ADDR;
2326 }
2327 return ops->trampoline;
2328 }
2329
Steven Rostedt (Red Hat)7413af12014-05-06 21:34:14 -04002330 if (rec->flags & FTRACE_FL_REGS)
2331 return (unsigned long)FTRACE_REGS_ADDR;
2332 else
2333 return (unsigned long)FTRACE_ADDR;
2334}
2335
2336/**
2337 * ftrace_get_addr_curr - Get the call address that is already there
2338 * @rec: The ftrace record descriptor
2339 *
2340 * The FTRACE_FL_REGS_EN is set when the record already points to
2341 * a function that saves all the regs. Basically the '_EN' version
2342 * represents the current state of the function.
2343 *
2344 * Returns the address of the trampoline that is currently being called
2345 */
2346unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2347{
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002348 struct ftrace_ops *ops;
2349
2350 /* Trampolines take precedence over regs */
2351 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2352 ops = ftrace_find_tramp_ops_curr(rec);
2353 if (FTRACE_WARN_ON(!ops)) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07002354 pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2355 (void *)rec->ip, (void *)rec->ip);
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002356 /* Ftrace is shutting down, return anything */
2357 return (unsigned long)FTRACE_ADDR;
2358 }
2359 return ops->trampoline;
2360 }
2361
Steven Rostedt (Red Hat)7413af12014-05-06 21:34:14 -04002362 if (rec->flags & FTRACE_FL_REGS_EN)
2363 return (unsigned long)FTRACE_REGS_ADDR;
2364 else
2365 return (unsigned long)FTRACE_ADDR;
2366}
2367
Steven Rostedtc88fd862011-08-16 09:53:39 -04002368static int
2369__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2370{
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002371 unsigned long ftrace_old_addr;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002372 unsigned long ftrace_addr;
2373 int ret;
2374
Steven Rostedt (Red Hat)7c0868e2014-05-08 07:01:21 -04002375 ftrace_addr = ftrace_get_addr_new(rec);
Steven Rostedtc88fd862011-08-16 09:53:39 -04002376
Steven Rostedt (Red Hat)7c0868e2014-05-08 07:01:21 -04002377 /* This needs to be done before we call ftrace_update_record */
2378 ftrace_old_addr = ftrace_get_addr_curr(rec);
2379
2380 ret = ftrace_update_record(rec, enable);
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002381
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002382 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2383
Steven Rostedtc88fd862011-08-16 09:53:39 -04002384 switch (ret) {
2385 case FTRACE_UPDATE_IGNORE:
2386 return 0;
2387
2388 case FTRACE_UPDATE_MAKE_CALL:
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002389 ftrace_bug_type = FTRACE_BUG_CALL;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002390 return ftrace_make_call(rec, ftrace_addr);
2391
2392 case FTRACE_UPDATE_MAKE_NOP:
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002393 ftrace_bug_type = FTRACE_BUG_NOP;
Steven Rostedt (Red Hat)39b55522014-08-17 20:59:10 -04002394 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002395
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002396 case FTRACE_UPDATE_MODIFY_CALL:
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002397 ftrace_bug_type = FTRACE_BUG_UPDATE;
Steven Rostedt08f6fba2012-04-30 16:20:23 -04002398 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
Steven Rostedtc88fd862011-08-16 09:53:39 -04002399 }
2400
2401 return -1; /* unknow ftrace bug */
Steven Rostedt5072c592008-05-12 21:20:43 +02002402}
2403
Steven Rostedte4f5d542012-04-27 09:13:18 -04002404void __weak ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002405{
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002406 struct dyn_ftrace *rec;
2407 struct ftrace_page *pg;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002408 int failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002409
Steven Rostedt45a4a232011-04-21 23:16:46 -04002410 if (unlikely(ftrace_disabled))
2411 return;
2412
Steven Rostedt265c8312009-02-13 12:43:56 -05002413 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05002414
2415 if (rec->flags & FTRACE_FL_DISABLED)
2416 continue;
2417
Steven Rostedte4f5d542012-04-27 09:13:18 -04002418 failed = __ftrace_replace_code(rec, enable);
Zhaoleifa9d13c2009-03-13 17:16:34 +08002419 if (failed) {
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002420 ftrace_bug(failed, rec);
Steven Rostedt3279ba32009-10-07 16:57:56 -04002421 /* Stop processing */
2422 return;
Steven Rostedt265c8312009-02-13 12:43:56 -05002423 }
2424 } while_for_each_ftrace_rec();
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002425}
2426
Steven Rostedtc88fd862011-08-16 09:53:39 -04002427struct ftrace_rec_iter {
2428 struct ftrace_page *pg;
2429 int index;
2430};
2431
2432/**
2433 * ftrace_rec_iter_start, start up iterating over traced functions
2434 *
2435 * Returns an iterator handle that is used to iterate over all
2436 * the records that represent address locations where functions
2437 * are traced.
2438 *
2439 * May return NULL if no records are available.
2440 */
2441struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2442{
2443 /*
2444 * We only use a single iterator.
2445 * Protected by the ftrace_lock mutex.
2446 */
2447 static struct ftrace_rec_iter ftrace_rec_iter;
2448 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2449
2450 iter->pg = ftrace_pages_start;
2451 iter->index = 0;
2452
2453 /* Could have empty pages */
2454 while (iter->pg && !iter->pg->index)
2455 iter->pg = iter->pg->next;
2456
2457 if (!iter->pg)
2458 return NULL;
2459
2460 return iter;
2461}
2462
2463/**
2464 * ftrace_rec_iter_next, get the next record to process.
2465 * @iter: The handle to the iterator.
2466 *
2467 * Returns the next iterator after the given iterator @iter.
2468 */
2469struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2470{
2471 iter->index++;
2472
2473 if (iter->index >= iter->pg->index) {
2474 iter->pg = iter->pg->next;
2475 iter->index = 0;
2476
2477 /* Could have empty pages */
2478 while (iter->pg && !iter->pg->index)
2479 iter->pg = iter->pg->next;
2480 }
2481
2482 if (!iter->pg)
2483 return NULL;
2484
2485 return iter;
2486}
2487
2488/**
2489 * ftrace_rec_iter_record, get the record at the iterator location
2490 * @iter: The current iterator location
2491 *
2492 * Returns the record that the current @iter is at.
2493 */
2494struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2495{
2496 return &iter->pg->records[iter->index];
2497}
2498
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05302499static int
Steven Rostedt31e88902008-11-14 16:21:19 -08002500ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002501{
Steven Rostedt593eb8a2008-10-23 09:32:59 -04002502 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002503
Steven Rostedt45a4a232011-04-21 23:16:46 -04002504 if (unlikely(ftrace_disabled))
2505 return 0;
2506
Shaohua Li25aac9d2009-01-09 11:29:40 +08002507 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -04002508 if (ret) {
Steven Rostedt (Red Hat)02a392a2015-11-25 12:50:47 -05002509 ftrace_bug_type = FTRACE_BUG_INIT;
Steven Rostedt (Red Hat)4fd32792014-10-24 17:56:04 -04002510 ftrace_bug(ret, rec);
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05302511 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +02002512 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05302513 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002514}
2515
Steven Rostedt000ab692009-02-17 13:35:06 -05002516/*
2517 * archs can override this function if they must do something
2518 * before the modifying code is performed.
2519 */
2520int __weak ftrace_arch_code_modify_prepare(void)
2521{
2522 return 0;
2523}
2524
2525/*
2526 * archs can override this function if they must do something
2527 * after the modifying code is performed.
2528 */
2529int __weak ftrace_arch_code_modify_post_process(void)
2530{
2531 return 0;
2532}
2533
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002534void ftrace_modify_all_code(int command)
2535{
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002536 int update = command & FTRACE_UPDATE_TRACE_FUNC;
Petr Mladekcd210672014-02-24 17:12:21 +01002537 int err = 0;
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002538
2539 /*
2540 * If the ftrace_caller calls a ftrace_ops func directly,
2541 * we need to make sure that it only traces functions it
2542 * expects to trace. When doing the switch of functions,
2543 * we need to update to the ftrace_ops_list_func first
2544 * before the transition between old and new calls are set,
2545 * as the ftrace_ops_list_func will check the ops hashes
2546 * to make sure the ops are having the right functions
2547 * traced.
2548 */
Petr Mladekcd210672014-02-24 17:12:21 +01002549 if (update) {
2550 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2551 if (FTRACE_WARN_ON(err))
2552 return;
2553 }
Steven Rostedt (Red Hat)59338f72013-08-31 01:04:07 -04002554
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002555 if (command & FTRACE_UPDATE_CALLS)
2556 ftrace_replace_code(1);
2557 else if (command & FTRACE_DISABLE_CALLS)
2558 ftrace_replace_code(0);
2559
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -05002560 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2561 function_trace_op = set_function_trace_op;
2562 smp_wmb();
2563 /* If irqs are disabled, we are in stop machine */
2564 if (!irqs_disabled())
2565 smp_call_function(ftrace_sync_ipi, NULL, 1);
Petr Mladekcd210672014-02-24 17:12:21 +01002566 err = ftrace_update_ftrace_func(ftrace_trace_function);
2567 if (FTRACE_WARN_ON(err))
2568 return;
Steven Rostedt (Red Hat)405e1d82013-11-08 14:17:30 -05002569 }
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002570
2571 if (command & FTRACE_START_FUNC_RET)
Petr Mladekcd210672014-02-24 17:12:21 +01002572 err = ftrace_enable_ftrace_graph_caller();
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002573 else if (command & FTRACE_STOP_FUNC_RET)
Petr Mladekcd210672014-02-24 17:12:21 +01002574 err = ftrace_disable_ftrace_graph_caller();
2575 FTRACE_WARN_ON(err);
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002576}
2577
Ingo Molnare309b412008-05-12 21:20:51 +02002578static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +02002579{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002580 int *command = data;
2581
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04002582 ftrace_modify_all_code(*command);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002583
Steven Rostedtc88fd862011-08-16 09:53:39 -04002584 return 0;
2585}
2586
2587/**
2588 * ftrace_run_stop_machine, go back to the stop machine method
2589 * @command: The command to tell ftrace what to do
2590 *
2591 * If an arch needs to fall back to the stop machine method, the
2592 * it can call this function.
2593 */
2594void ftrace_run_stop_machine(int command)
2595{
2596 stop_machine(__ftrace_modify_code, &command, NULL);
2597}
2598
2599/**
2600 * arch_ftrace_update_code, modify the code to trace or not trace
2601 * @command: The command that needs to be done
2602 *
2603 * Archs can override this function if it does not need to
2604 * run stop_machine() to modify code.
2605 */
2606void __weak arch_ftrace_update_code(int command)
2607{
2608 ftrace_run_stop_machine(command);
2609}
2610
2611static void ftrace_run_update_code(int command)
2612{
2613 int ret;
2614
2615 ret = ftrace_arch_code_modify_prepare();
2616 FTRACE_WARN_ON(ret);
2617 if (ret)
2618 return;
Steven Rostedtc88fd862011-08-16 09:53:39 -04002619
2620 /*
2621 * By default we use stop_machine() to modify the code.
2622 * But archs can do what ever they want as long as it
2623 * is safe. The stop_machine() is the safest, but also
2624 * produces the most overhead.
2625 */
2626 arch_ftrace_update_code(command);
2627
Steven Rostedt000ab692009-02-17 13:35:06 -05002628 ret = ftrace_arch_code_modify_post_process();
2629 FTRACE_WARN_ON(ret);
Steven Rostedt3d083392008-05-12 21:20:42 +02002630}
2631
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04002632static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05002633 struct ftrace_ops_hash *old_hash)
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002634{
2635 ops->flags |= FTRACE_OPS_FL_MODIFYING;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05002636 ops->old_hash.filter_hash = old_hash->filter_hash;
2637 ops->old_hash.notrace_hash = old_hash->notrace_hash;
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002638 ftrace_run_update_code(command);
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04002639 ops->old_hash.filter_hash = NULL;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05002640 ops->old_hash.notrace_hash = NULL;
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002641 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2642}
2643
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002644static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002645static int ftrace_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002646
Steven Rostedt (Red Hat)12cce592014-07-03 15:48:16 -04002647void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2648{
2649}
2650
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05002651static void per_cpu_ops_free(struct ftrace_ops *ops)
Jiri Slabydb0fbad2014-03-10 21:42:11 +01002652{
2653 free_percpu(ops->disabled);
2654}
2655
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002656static void ftrace_startup_enable(int command)
2657{
2658 if (saved_ftrace_func != ftrace_trace_function) {
2659 saved_ftrace_func = ftrace_trace_function;
2660 command |= FTRACE_UPDATE_TRACE_FUNC;
2661 }
2662
2663 if (!command || !ftrace_enabled)
2664 return;
2665
2666 ftrace_run_update_code(command);
2667}
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002668
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002669static void ftrace_startup_all(int command)
2670{
2671 update_all_ops = true;
2672 ftrace_startup_enable(command);
2673 update_all_ops = false;
2674}
2675
Steven Rostedta1cd6172011-05-23 15:24:25 -04002676static int ftrace_startup(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02002677{
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002678 int ret;
Steven Rostedtb8489142011-05-04 09:27:52 -04002679
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002680 if (unlikely(ftrace_disabled))
Steven Rostedta1cd6172011-05-23 15:24:25 -04002681 return -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002682
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002683 ret = __register_ftrace_function(ops);
2684 if (ret)
2685 return ret;
2686
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002687 ftrace_start_up++;
Steven Rostedt3d083392008-05-12 21:20:42 +02002688
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002689 /*
2690 * Note that ftrace probes uses this to start up
2691 * and modify functions it will probe. But we still
2692 * set the ADDING flag for modification, as probes
2693 * do not have trampolines. If they add them in the
2694 * future, then the probes will need to distinguish
2695 * between adding and updating probes.
2696 */
2697 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
Steven Rostedt (Red Hat)66209a52014-05-06 21:57:49 -04002698
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05002699 ret = ftrace_hash_ipmodify_enable(ops);
2700 if (ret < 0) {
2701 /* Rollback registration process */
2702 __unregister_ftrace_function(ops);
2703 ftrace_start_up--;
2704 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2705 return ret;
2706 }
2707
Jiri Olsa7f50d062016-03-16 15:34:33 +01002708 if (ftrace_hash_rec_enable(ops, 1))
2709 command |= FTRACE_UPDATE_CALLS;
Steven Rostedted926f92011-05-03 13:25:24 -04002710
Steven Rostedtdf4fc312008-11-26 00:16:23 -05002711 ftrace_startup_enable(command);
Steven Rostedta1cd6172011-05-23 15:24:25 -04002712
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002713 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2714
Steven Rostedta1cd6172011-05-23 15:24:25 -04002715 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02002716}
2717
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002718static int ftrace_shutdown(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02002719{
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002720 int ret;
Steven Rostedtb8489142011-05-04 09:27:52 -04002721
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002722 if (unlikely(ftrace_disabled))
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002723 return -ENODEV;
2724
2725 ret = __unregister_ftrace_function(ops);
2726 if (ret)
2727 return ret;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002728
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002729 ftrace_start_up--;
Frederic Weisbecker9ea1a152009-06-20 06:52:21 +02002730 /*
2731 * Just warn in case of unbalance, no need to kill ftrace, it's not
2732 * critical but the ftrace_call callers may be never nopped again after
2733 * further ftrace uses.
2734 */
2735 WARN_ON_ONCE(ftrace_start_up < 0);
2736
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05002737 /* Disabling ipmodify never fails */
2738 ftrace_hash_ipmodify_disable(ops);
Jiri Olsa7f50d062016-03-16 15:34:33 +01002739
2740 if (ftrace_hash_rec_disable(ops, 1))
2741 command |= FTRACE_UPDATE_CALLS;
Steven Rostedtb8489142011-05-04 09:27:52 -04002742
Namhyung Kima737e6d2014-06-12 23:56:12 +09002743 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04002744
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002745 if (saved_ftrace_func != ftrace_trace_function) {
2746 saved_ftrace_func = ftrace_trace_function;
2747 command |= FTRACE_UPDATE_TRACE_FUNC;
2748 }
2749
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002750 if (!command || !ftrace_enabled) {
2751 /*
Steven Rostedt (VMware)100553e2017-09-01 12:18:28 -04002752 * If these are dynamic or per_cpu ops, they still
2753 * need their data freed. Since, function tracing is
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002754 * not currently active, we can just free them
2755 * without synchronizing all CPUs.
2756 */
Steven Rostedt (VMware)100553e2017-09-01 12:18:28 -04002757 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
2758 goto free_ops;
2759
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002760 return 0;
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002761 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002762
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002763 /*
2764 * If the ops uses a trampoline, then it needs to be
2765 * tested first on update.
2766 */
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002767 ops->flags |= FTRACE_OPS_FL_REMOVING;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002768 removed_ops = ops;
2769
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002770 /* The trampoline logic checks the old hashes */
2771 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2772 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2773
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002774 ftrace_run_update_code(command);
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002775
Steven Rostedt (Red Hat)84bde622014-09-12 14:21:13 -04002776 /*
2777 * If there's no more ops registered with ftrace, run a
2778 * sanity check to make sure all rec flags are cleared.
2779 */
2780 if (ftrace_ops_list == &ftrace_list_end) {
2781 struct ftrace_page *pg;
2782 struct dyn_ftrace *rec;
2783
2784 do_for_each_ftrace_rec(pg, rec) {
Alexei Starovoitov977c1f92016-11-07 15:14:20 -08002785 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
Steven Rostedt (Red Hat)84bde622014-09-12 14:21:13 -04002786 pr_warn(" %pS flags:%lx\n",
2787 (void *)rec->ip, rec->flags);
2788 } while_for_each_ftrace_rec();
2789 }
2790
Steven Rostedt (Red Hat)fef5aee2014-07-24 12:25:47 -04002791 ops->old_hash.filter_hash = NULL;
2792 ops->old_hash.notrace_hash = NULL;
2793
2794 removed_ops = NULL;
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04002795 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
Steven Rostedt (Red Hat)79922b82014-05-06 21:56:17 -04002796
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002797 /*
2798 * Dynamic ops may be freed, we must make sure that all
2799 * callers are done before leaving this function.
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05002800 * The same goes for freeing the per_cpu data of the per_cpu
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002801 * ops.
2802 *
2803 * Again, normal synchronize_sched() is not good enough.
2804 * We need to do a hard force of sched synchronization.
2805 * This is because we use preempt_disable() to do RCU, but
2806 * the function tracers can be called where RCU is not watching
2807 * (like before user_exit()). We can not rely on the RCU
2808 * infrastructure to do the synchronization, thus we must do it
2809 * ourselves.
2810 */
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05002811 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002812 schedule_on_each_cpu(ftrace_sync);
2813
Steven Rostedt (VMware)100553e2017-09-01 12:18:28 -04002814 free_ops:
Steven Rostedt (Red Hat)12cce592014-07-03 15:48:16 -04002815 arch_ftrace_trampoline_free(ops);
2816
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05002817 if (ops->flags & FTRACE_OPS_FL_PER_CPU)
2818 per_cpu_ops_free(ops);
Steven Rostedt (Red Hat)a4c35ed22014-01-13 12:56:21 -05002819 }
2820
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05002821 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02002822}
2823
Ingo Molnare309b412008-05-12 21:20:51 +02002824static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002825{
Pratyush Anand1619dc32015-03-06 23:58:06 +05302826 int command;
2827
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002828 if (unlikely(ftrace_disabled))
2829 return;
2830
Steven Rostedtd61f82d2008-05-12 21:20:43 +02002831 /* Force update next time */
2832 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002833 /* ftrace_start_up is true if we want ftrace running */
Pratyush Anand1619dc32015-03-06 23:58:06 +05302834 if (ftrace_start_up) {
2835 command = FTRACE_UPDATE_CALLS;
2836 if (ftrace_graph_active)
2837 command |= FTRACE_START_FUNC_RET;
Steven Rostedt (Red Hat)524a3862015-03-06 19:55:13 -05002838 ftrace_startup_enable(command);
Pratyush Anand1619dc32015-03-06 23:58:06 +05302839 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002840}
2841
Ingo Molnare309b412008-05-12 21:20:51 +02002842static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002843{
Pratyush Anand1619dc32015-03-06 23:58:06 +05302844 int command;
2845
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002846 if (unlikely(ftrace_disabled))
2847 return;
2848
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05002849 /* ftrace_start_up is true if ftrace is running */
Pratyush Anand1619dc32015-03-06 23:58:06 +05302850 if (ftrace_start_up) {
2851 command = FTRACE_DISABLE_CALLS;
2852 if (ftrace_graph_active)
2853 command |= FTRACE_STOP_FUNC_RET;
2854 ftrace_run_update_code(command);
2855 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +02002856}
2857
Steven Rostedt3d083392008-05-12 21:20:42 +02002858static cycle_t ftrace_update_time;
Steven Rostedt3d083392008-05-12 21:20:42 +02002859unsigned long ftrace_update_tot_cnt;
2860
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002861static inline int ops_traces_mod(struct ftrace_ops *ops)
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002862{
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002863 /*
2864 * Filter_hash being empty will default to trace module.
2865 * But notrace hash requires a test of individual module functions.
2866 */
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04002867 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2868 ftrace_hash_empty(ops->func_hash->notrace_hash);
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002869}
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002870
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002871/*
2872 * Check if the current ops references the record.
2873 *
2874 * If the ops traces all functions, then it was already accounted for.
2875 * If the ops does not trace the current record function, skip it.
2876 * If the ops ignores the function via notrace filter, skip it.
2877 */
2878static inline bool
2879ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2880{
2881 /* If ops isn't enabled, ignore it */
2882 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2883 return 0;
2884
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002885 /* If ops traces all then it includes this function */
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002886 if (ops_traces_mod(ops))
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002887 return 1;
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002888
2889 /* The function must be in the filter */
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04002890 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2891 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002892 return 0;
2893
2894 /* If in notrace hash, we ignore it too */
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04002895 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002896 return 0;
2897
2898 return 1;
2899}
2900
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002901static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
Steven Rostedt3d083392008-05-12 21:20:42 +02002902{
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002903 struct ftrace_page *pg;
Lai Jiangshane94142a2009-03-13 17:51:27 +08002904 struct dyn_ftrace *p;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05302905 cycle_t start, stop;
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002906 unsigned long update_cnt = 0;
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002907 unsigned long rec_flags = 0;
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002908 int i;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002909
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002910 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002911
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002912 /*
2913 * When a module is loaded, this function is called to convert
2914 * the calls to mcount in its text to nops, and also to create
2915 * an entry in the ftrace data. Now, if ftrace is activated
2916 * after this call, but before the module sets its text to
2917 * read-only, the modification of enabling ftrace can fail if
2918 * the read-only is done while ftrace is converting the calls.
2919 * To prevent this, the module's records are set as disabled
2920 * and will be enabled after the call to set the module's text
2921 * to read-only.
2922 */
2923 if (mod)
2924 rec_flags |= FTRACE_FL_DISABLED;
2925
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002926 for (pg = new_pgs; pg; pg = pg->next) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05302927
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002928 for (i = 0; i < pg->index; i++) {
Steven Rostedt (Red Hat)8c4f3c32013-07-30 00:04:32 -04002929
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002930 /* If something went wrong, bail without enabling anything */
2931 if (unlikely(ftrace_disabled))
2932 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +02002933
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002934 p = &pg->records[i];
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05002935 p->flags = rec_flags;
Abhishek Sagar0eb96702008-06-01 21:47:30 +05302936
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002937 /*
2938 * Do the initial record conversion from mcount jump
2939 * to the NOP instructions.
2940 */
2941 if (!ftrace_code_disable(mod, p))
2942 break;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002943
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002944 update_cnt++;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002945 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002946 }
2947
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002948 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002949 ftrace_update_time = stop - start;
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01002950 ftrace_update_tot_cnt += update_cnt;
Steven Rostedt3d083392008-05-12 21:20:42 +02002951
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02002952 return 0;
2953}
2954
Steven Rostedta7900872011-12-16 16:23:44 -05002955static int ftrace_allocate_records(struct ftrace_page *pg, int count)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002956{
Steven Rostedta7900872011-12-16 16:23:44 -05002957 int order;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002958 int cnt;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002959
Steven Rostedta7900872011-12-16 16:23:44 -05002960 if (WARN_ON(!count))
2961 return -EINVAL;
2962
2963 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002964
2965 /*
Steven Rostedta7900872011-12-16 16:23:44 -05002966 * We want to fill as much as possible. No more than a page
2967 * may be empty.
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002968 */
Steven Rostedta7900872011-12-16 16:23:44 -05002969 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2970 order--;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002971
Steven Rostedta7900872011-12-16 16:23:44 -05002972 again:
2973 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2974
2975 if (!pg->records) {
2976 /* if we can't allocate this size, try something smaller */
2977 if (!order)
2978 return -ENOMEM;
2979 order >>= 1;
2980 goto again;
2981 }
2982
2983 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2984 pg->size = cnt;
2985
2986 if (cnt > count)
2987 cnt = count;
2988
2989 return cnt;
2990}
2991
2992static struct ftrace_page *
2993ftrace_allocate_pages(unsigned long num_to_init)
2994{
2995 struct ftrace_page *start_pg;
2996 struct ftrace_page *pg;
2997 int order;
2998 int cnt;
2999
3000 if (!num_to_init)
3001 return 0;
3002
3003 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3004 if (!pg)
3005 return NULL;
3006
3007 /*
3008 * Try to allocate as much as possible in one continues
3009 * location that fills in all of the space. We want to
3010 * waste as little space as possible.
3011 */
3012 for (;;) {
3013 cnt = ftrace_allocate_records(pg, num_to_init);
3014 if (cnt < 0)
3015 goto free_pages;
3016
3017 num_to_init -= cnt;
3018 if (!num_to_init)
3019 break;
3020
3021 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3022 if (!pg->next)
3023 goto free_pages;
3024
3025 pg = pg->next;
3026 }
3027
3028 return start_pg;
3029
3030 free_pages:
Namhyung Kim1f61be002014-06-11 17:06:53 +09003031 pg = start_pg;
3032 while (pg) {
Steven Rostedta7900872011-12-16 16:23:44 -05003033 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3034 free_pages((unsigned long)pg->records, order);
3035 start_pg = pg->next;
3036 kfree(pg);
3037 pg = start_pg;
3038 }
3039 pr_info("ftrace: FAILED to allocate memory for functions\n");
3040 return NULL;
3041}
3042
Steven Rostedt5072c592008-05-12 21:20:43 +02003043#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3044
3045struct ftrace_iterator {
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003046 loff_t pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003047 loff_t func_pos;
3048 struct ftrace_page *pg;
3049 struct dyn_ftrace *func;
3050 struct ftrace_func_probe *probe;
3051 struct trace_parser parser;
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003052 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003053 struct ftrace_ops *ops;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003054 int hidx;
3055 int idx;
3056 unsigned flags;
Steven Rostedt5072c592008-05-12 21:20:43 +02003057};
3058
Ingo Molnare309b412008-05-12 21:20:51 +02003059static void *
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003060t_hash_next(struct seq_file *m, loff_t *pos)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003061{
3062 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003063 struct hlist_node *hnd = NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003064 struct hlist_head *hhd;
3065
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003066 (*pos)++;
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003067 iter->pos = *pos;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003068
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003069 if (iter->probe)
3070 hnd = &iter->probe->node;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003071 retry:
3072 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
3073 return NULL;
3074
3075 hhd = &ftrace_func_hash[iter->hidx];
3076
3077 if (hlist_empty(hhd)) {
3078 iter->hidx++;
3079 hnd = NULL;
3080 goto retry;
3081 }
3082
3083 if (!hnd)
3084 hnd = hhd->first;
3085 else {
3086 hnd = hnd->next;
3087 if (!hnd) {
3088 iter->hidx++;
3089 goto retry;
3090 }
3091 }
3092
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003093 if (WARN_ON_ONCE(!hnd))
3094 return NULL;
3095
3096 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
3097
3098 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003099}
3100
3101static void *t_hash_start(struct seq_file *m, loff_t *pos)
3102{
3103 struct ftrace_iterator *iter = m->private;
3104 void *p = NULL;
Li Zefand82d6242009-06-24 09:54:54 +08003105 loff_t l;
3106
Steven Rostedt69a30832011-12-19 15:21:16 -05003107 if (!(iter->flags & FTRACE_ITER_DO_HASH))
3108 return NULL;
3109
Steven Rostedt2bccfff2010-09-09 08:43:22 -04003110 if (iter->func_pos > *pos)
3111 return NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003112
Li Zefand82d6242009-06-24 09:54:54 +08003113 iter->hidx = 0;
Steven Rostedt2bccfff2010-09-09 08:43:22 -04003114 for (l = 0; l <= (*pos - iter->func_pos); ) {
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003115 p = t_hash_next(m, &l);
Li Zefand82d6242009-06-24 09:54:54 +08003116 if (!p)
3117 break;
3118 }
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003119 if (!p)
3120 return NULL;
3121
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003122 /* Only set this if we have an item */
3123 iter->flags |= FTRACE_ITER_HASH;
3124
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003125 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003126}
3127
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003128static int
3129t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003130{
Steven Rostedtb6887d72009-02-17 12:32:04 -05003131 struct ftrace_func_probe *rec;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003132
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003133 rec = iter->probe;
3134 if (WARN_ON_ONCE(!rec))
3135 return -EIO;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003136
Steven Rostedt809dcf22009-02-16 23:06:01 -05003137 if (rec->ops->print)
3138 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
3139
Steven Rostedtb375a112009-09-17 00:05:58 -04003140 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003141
3142 if (rec->data)
3143 seq_printf(m, ":%p", rec->data);
3144 seq_putc(m, '\n');
3145
3146 return 0;
3147}
3148
3149static void *
Steven Rostedt5072c592008-05-12 21:20:43 +02003150t_next(struct seq_file *m, void *v, loff_t *pos)
3151{
3152 struct ftrace_iterator *iter = m->private;
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003153 struct ftrace_ops *ops = iter->ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02003154 struct dyn_ftrace *rec = NULL;
3155
Steven Rostedt45a4a232011-04-21 23:16:46 -04003156 if (unlikely(ftrace_disabled))
3157 return NULL;
3158
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003159 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003160 return t_hash_next(m, pos);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003161
Steven Rostedt5072c592008-05-12 21:20:43 +02003162 (*pos)++;
Jiri Olsa1106b692011-02-16 17:35:34 +01003163 iter->pos = iter->func_pos = *pos;
Steven Rostedt5072c592008-05-12 21:20:43 +02003164
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003165 if (iter->flags & FTRACE_ITER_PRINTALL)
Steven Rostedt57c072c2010-09-14 11:21:11 -04003166 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003167
Steven Rostedt5072c592008-05-12 21:20:43 +02003168 retry:
3169 if (iter->idx >= iter->pg->index) {
3170 if (iter->pg->next) {
3171 iter->pg = iter->pg->next;
3172 iter->idx = 0;
3173 goto retry;
3174 }
3175 } else {
3176 rec = &iter->pg->records[iter->idx++];
Steven Rostedt32082302011-12-16 14:42:37 -05003177 if (((iter->flags & FTRACE_ITER_FILTER) &&
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003178 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
Steven Rostedt0183fb12008-11-07 22:36:02 -05003179
Steven Rostedt41c52c02008-05-22 11:46:33 -04003180 ((iter->flags & FTRACE_ITER_NOTRACE) &&
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003181 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
Steven Rostedt647bcd02011-05-03 14:39:21 -04003182
3183 ((iter->flags & FTRACE_ITER_ENABLED) &&
Steven Rostedt (Red Hat)23ea9c42013-05-09 19:31:48 -04003184 !(rec->flags & FTRACE_FL_ENABLED))) {
Steven Rostedt647bcd02011-05-03 14:39:21 -04003185
Steven Rostedt5072c592008-05-12 21:20:43 +02003186 rec = NULL;
3187 goto retry;
3188 }
3189 }
3190
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003191 if (!rec)
Steven Rostedt57c072c2010-09-14 11:21:11 -04003192 return t_hash_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003193
3194 iter->func = rec;
3195
3196 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02003197}
3198
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003199static void reset_iter_read(struct ftrace_iterator *iter)
3200{
3201 iter->pos = 0;
3202 iter->func_pos = 0;
Dan Carpenter70f77b32012-06-09 19:10:27 +03003203 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
Steven Rostedt5072c592008-05-12 21:20:43 +02003204}
3205
3206static void *t_start(struct seq_file *m, loff_t *pos)
3207{
3208 struct ftrace_iterator *iter = m->private;
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003209 struct ftrace_ops *ops = iter->ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02003210 void *p = NULL;
Li Zefan694ce0a2009-06-24 09:54:19 +08003211 loff_t l;
Steven Rostedt5072c592008-05-12 21:20:43 +02003212
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003213 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003214
3215 if (unlikely(ftrace_disabled))
3216 return NULL;
3217
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003218 /*
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003219 * If an lseek was done, then reset and start from beginning.
3220 */
3221 if (*pos < iter->pos)
3222 reset_iter_read(iter);
3223
3224 /*
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003225 * For set_ftrace_filter reading, if we have the filter
3226 * off, we can short cut and just print out that all
3227 * functions are enabled.
3228 */
Namhyung Kim8c006cf2014-06-13 16:24:06 +09003229 if ((iter->flags & FTRACE_ITER_FILTER &&
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003230 ftrace_hash_empty(ops->func_hash->filter_hash)) ||
Namhyung Kim8c006cf2014-06-13 16:24:06 +09003231 (iter->flags & FTRACE_ITER_NOTRACE &&
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003232 ftrace_hash_empty(ops->func_hash->notrace_hash))) {
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003233 if (*pos > 0)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003234 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003235 iter->flags |= FTRACE_ITER_PRINTALL;
Chris Wrightdf091622010-09-09 16:34:59 -07003236 /* reset in case of seek/pread */
3237 iter->flags &= ~FTRACE_ITER_HASH;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003238 return iter;
3239 }
3240
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003241 if (iter->flags & FTRACE_ITER_HASH)
3242 return t_hash_start(m, pos);
3243
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003244 /*
3245 * Unfortunately, we need to restart at ftrace_pages_start
3246 * every time we let go of the ftrace_mutex. This is because
3247 * those pointers can change without the lock.
3248 */
Li Zefan694ce0a2009-06-24 09:54:19 +08003249 iter->pg = ftrace_pages_start;
3250 iter->idx = 0;
3251 for (l = 0; l <= *pos; ) {
3252 p = t_next(m, p, &l);
3253 if (!p)
3254 break;
Liming Wang50cdaf02008-11-28 12:13:21 +08003255 }
walimis5821e1b2008-11-15 15:19:06 +08003256
Steven Rostedt69a30832011-12-19 15:21:16 -05003257 if (!p)
3258 return t_hash_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003259
3260 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02003261}
3262
3263static void t_stop(struct seq_file *m, void *p)
3264{
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003265 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003266}
3267
Steven Rostedt (Red Hat)15d5b022014-07-03 14:51:36 -04003268void * __weak
3269arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3270{
3271 return NULL;
3272}
3273
3274static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3275 struct dyn_ftrace *rec)
3276{
3277 void *ptr;
3278
3279 ptr = arch_ftrace_trampoline_func(ops, rec);
3280 if (ptr)
3281 seq_printf(m, " ->%pS", ptr);
3282}
3283
Steven Rostedt5072c592008-05-12 21:20:43 +02003284static int t_show(struct seq_file *m, void *v)
3285{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003286 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003287 struct dyn_ftrace *rec;
Steven Rostedt5072c592008-05-12 21:20:43 +02003288
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003289 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003290 return t_hash_show(m, iter);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05003291
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003292 if (iter->flags & FTRACE_ITER_PRINTALL) {
Namhyung Kim8c006cf2014-06-13 16:24:06 +09003293 if (iter->flags & FTRACE_ITER_NOTRACE)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003294 seq_puts(m, "#### no functions disabled ####\n");
Namhyung Kim8c006cf2014-06-13 16:24:06 +09003295 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003296 seq_puts(m, "#### all functions enabled ####\n");
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05003297 return 0;
3298 }
3299
Steven Rostedt4aeb6962010-09-09 10:00:28 -04003300 rec = iter->func;
3301
Steven Rostedt5072c592008-05-12 21:20:43 +02003302 if (!rec)
3303 return 0;
3304
Steven Rostedt647bcd02011-05-03 14:39:21 -04003305 seq_printf(m, "%ps", (void *)rec->ip);
Steven Rostedt (Red Hat)9674b2f2014-05-09 16:54:59 -04003306 if (iter->flags & FTRACE_ITER_ENABLED) {
Steven Rostedt (Red Hat)030f4e12015-12-01 12:24:45 -05003307 struct ftrace_ops *ops;
Steven Rostedt (Red Hat)15d5b022014-07-03 14:51:36 -04003308
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05003309 seq_printf(m, " (%ld)%s%s",
Steven Rostedt (Red Hat)0376bde2014-05-07 13:46:45 -04003310 ftrace_rec_count(rec),
Masami Hiramatsuf8b8be82014-11-21 05:25:16 -05003311 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3312 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ");
Steven Rostedt (Red Hat)9674b2f2014-05-09 16:54:59 -04003313 if (rec->flags & FTRACE_FL_TRAMP_EN) {
Steven Rostedt (Red Hat)5fecaa02014-07-24 16:00:31 -04003314 ops = ftrace_find_tramp_ops_any(rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05003315 if (ops) {
3316 do {
3317 seq_printf(m, "\ttramp: %pS (%pS)",
3318 (void *)ops->trampoline,
3319 (void *)ops->func);
Steven Rostedt (Red Hat)030f4e12015-12-01 12:24:45 -05003320 add_trampoline_func(m, ops, rec);
Steven Rostedt (Red Hat)39daa7b2015-11-25 15:12:38 -05003321 ops = ftrace_find_tramp_ops_next(rec, ops);
3322 } while (ops);
3323 } else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003324 seq_puts(m, "\ttramp: ERROR!");
Steven Rostedt (Red Hat)030f4e12015-12-01 12:24:45 -05003325 } else {
3326 add_trampoline_func(m, NULL, rec);
Steven Rostedt (Red Hat)9674b2f2014-05-09 16:54:59 -04003327 }
3328 }
3329
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003330 seq_putc(m, '\n');
Steven Rostedt5072c592008-05-12 21:20:43 +02003331
3332 return 0;
3333}
3334
James Morris88e9d342009-09-22 16:43:43 -07003335static const struct seq_operations show_ftrace_seq_ops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003336 .start = t_start,
3337 .next = t_next,
3338 .stop = t_stop,
3339 .show = t_show,
3340};
3341
Ingo Molnare309b412008-05-12 21:20:51 +02003342static int
Steven Rostedt5072c592008-05-12 21:20:43 +02003343ftrace_avail_open(struct inode *inode, struct file *file)
3344{
3345 struct ftrace_iterator *iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02003346
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003347 if (unlikely(ftrace_disabled))
3348 return -ENODEV;
3349
Jiri Olsa50e18b92012-04-25 10:23:39 +02003350 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3351 if (iter) {
3352 iter->pg = ftrace_pages_start;
3353 iter->ops = &global_ops;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003354 }
Steven Rostedt5072c592008-05-12 21:20:43 +02003355
Jiri Olsa50e18b92012-04-25 10:23:39 +02003356 return iter ? 0 : -ENOMEM;
Steven Rostedt5072c592008-05-12 21:20:43 +02003357}
3358
Steven Rostedt647bcd02011-05-03 14:39:21 -04003359static int
3360ftrace_enabled_open(struct inode *inode, struct file *file)
3361{
3362 struct ftrace_iterator *iter;
Steven Rostedt647bcd02011-05-03 14:39:21 -04003363
Jiri Olsa50e18b92012-04-25 10:23:39 +02003364 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3365 if (iter) {
3366 iter->pg = ftrace_pages_start;
3367 iter->flags = FTRACE_ITER_ENABLED;
3368 iter->ops = &global_ops;
Steven Rostedt647bcd02011-05-03 14:39:21 -04003369 }
3370
Jiri Olsa50e18b92012-04-25 10:23:39 +02003371 return iter ? 0 : -ENOMEM;
Steven Rostedt647bcd02011-05-03 14:39:21 -04003372}
3373
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003374/**
3375 * ftrace_regex_open - initialize function tracer filter files
3376 * @ops: The ftrace_ops that hold the hash filters
3377 * @flag: The type of filter to process
3378 * @inode: The inode, usually passed in to your open routine
3379 * @file: The file, usually passed in to your open routine
3380 *
3381 * ftrace_regex_open() initializes the filter files for the
3382 * @ops. Depending on @flag it may process the filter hash or
3383 * the notrace hash of @ops. With this called from the open
3384 * routine, you can use ftrace_filter_write() for the write
3385 * routine if @flag has FTRACE_ITER_FILTER set, or
3386 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003387 * tracing_lseek() should be used as the lseek routine, and
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003388 * release must call ftrace_regex_release().
3389 */
3390int
Steven Rostedtf45948e2011-05-02 12:29:25 -04003391ftrace_regex_open(struct ftrace_ops *ops, int flag,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003392 struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02003393{
3394 struct ftrace_iterator *iter;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003395 struct ftrace_hash *hash;
Steven Rostedt5072c592008-05-12 21:20:43 +02003396 int ret = 0;
3397
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09003398 ftrace_ops_init(ops);
3399
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003400 if (unlikely(ftrace_disabled))
3401 return -ENODEV;
3402
Steven Rostedt5072c592008-05-12 21:20:43 +02003403 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3404 if (!iter)
3405 return -ENOMEM;
3406
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003407 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3408 kfree(iter);
3409 return -ENOMEM;
3410 }
3411
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003412 iter->ops = ops;
3413 iter->flags = flag;
3414
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003415 mutex_lock(&ops->func_hash->regex_lock);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003416
Steven Rostedtf45948e2011-05-02 12:29:25 -04003417 if (flag & FTRACE_ITER_NOTRACE)
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003418 hash = ops->func_hash->notrace_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003419 else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003420 hash = ops->func_hash->filter_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003421
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003422 if (file->f_mode & FMODE_WRITE) {
Namhyung Kimef2fbe12014-06-11 17:06:54 +09003423 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3424
3425 if (file->f_flags & O_TRUNC)
3426 iter->hash = alloc_ftrace_hash(size_bits);
3427 else
3428 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3429
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003430 if (!iter->hash) {
3431 trace_parser_put(&iter->parser);
3432 kfree(iter);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003433 ret = -ENOMEM;
3434 goto out_unlock;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003435 }
3436 }
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003437
Steven Rostedt5072c592008-05-12 21:20:43 +02003438 if (file->f_mode & FMODE_READ) {
3439 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02003440
3441 ret = seq_open(file, &show_ftrace_seq_ops);
3442 if (!ret) {
3443 struct seq_file *m = file->private_data;
3444 m->private = iter;
Li Zefan79fe2492009-09-22 13:54:28 +08003445 } else {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003446 /* Failed */
3447 free_ftrace_hash(iter->hash);
Li Zefan79fe2492009-09-22 13:54:28 +08003448 trace_parser_put(&iter->parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02003449 kfree(iter);
Li Zefan79fe2492009-09-22 13:54:28 +08003450 }
Steven Rostedt5072c592008-05-12 21:20:43 +02003451 } else
3452 file->private_data = iter;
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003453
3454 out_unlock:
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003455 mutex_unlock(&ops->func_hash->regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003456
3457 return ret;
3458}
3459
Steven Rostedt41c52c02008-05-22 11:46:33 -04003460static int
3461ftrace_filter_open(struct inode *inode, struct file *file)
3462{
Steven Rostedt (Red Hat)e3b3e2e2013-11-11 23:07:14 -05003463 struct ftrace_ops *ops = inode->i_private;
3464
3465 return ftrace_regex_open(ops,
Steven Rostedt69a30832011-12-19 15:21:16 -05003466 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3467 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003468}
3469
3470static int
3471ftrace_notrace_open(struct inode *inode, struct file *file)
3472{
Steven Rostedt (Red Hat)e3b3e2e2013-11-11 23:07:14 -05003473 struct ftrace_ops *ops = inode->i_private;
3474
3475 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003476 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003477}
3478
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003479/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3480struct ftrace_glob {
3481 char *search;
3482 unsigned len;
3483 int type;
3484};
3485
Thiago Jung Bauermann7132e2d2016-04-25 18:56:14 -03003486/*
3487 * If symbols in an architecture don't correspond exactly to the user-visible
3488 * name of what they represent, it is possible to define this function to
3489 * perform the necessary adjustments.
3490*/
3491char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3492{
3493 return str;
3494}
3495
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003496static int ftrace_match(char *str, struct ftrace_glob *g)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003497{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003498 int matched = 0;
Li Zefan751e9982010-01-14 10:53:02 +08003499 int slen;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003500
Thiago Jung Bauermann7132e2d2016-04-25 18:56:14 -03003501 str = arch_ftrace_match_adjust(str, g->search);
3502
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003503 switch (g->type) {
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003504 case MATCH_FULL:
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003505 if (strcmp(str, g->search) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003506 matched = 1;
3507 break;
3508 case MATCH_FRONT_ONLY:
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003509 if (strncmp(str, g->search, g->len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003510 matched = 1;
3511 break;
3512 case MATCH_MIDDLE_ONLY:
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003513 if (strstr(str, g->search))
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003514 matched = 1;
3515 break;
3516 case MATCH_END_ONLY:
Li Zefan751e9982010-01-14 10:53:02 +08003517 slen = strlen(str);
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003518 if (slen >= g->len &&
3519 memcmp(str + slen - g->len, g->search, g->len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003520 matched = 1;
3521 break;
3522 }
3523
3524 return matched;
3525}
3526
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003527static int
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003528enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
Steven Rostedt996e87b2011-04-26 16:11:03 -04003529{
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003530 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003531 int ret = 0;
3532
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003533 entry = ftrace_lookup_ip(hash, rec->ip);
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003534 if (clear_filter) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003535 /* Do nothing if it doesn't exist */
3536 if (!entry)
3537 return 0;
3538
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003539 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003540 } else {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003541 /* Do nothing if it exists */
3542 if (entry)
3543 return 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003544
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003545 ret = add_hash_entry(hash, rec->ip);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003546 }
3547 return ret;
Steven Rostedt996e87b2011-04-26 16:11:03 -04003548}
3549
Steven Rostedt64e7c442009-02-13 17:08:48 -05003550static int
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003551ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3552 struct ftrace_glob *mod_g, int exclude_mod)
Steven Rostedt64e7c442009-02-13 17:08:48 -05003553{
3554 char str[KSYM_SYMBOL_LEN];
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003555 char *modname;
Steven Rostedt64e7c442009-02-13 17:08:48 -05003556
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003557 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3558
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003559 if (mod_g) {
3560 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3561
3562 /* blank module name to match all modules */
3563 if (!mod_g->len) {
3564 /* blank module globbing: modname xor exclude_mod */
3565 if ((!exclude_mod) != (!modname))
3566 goto func_match;
3567 return 0;
3568 }
3569
3570 /* not matching the module */
3571 if (!modname || !mod_matches) {
3572 if (exclude_mod)
3573 goto func_match;
3574 else
3575 return 0;
3576 }
3577
3578 if (mod_matches && exclude_mod)
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003579 return 0;
3580
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003581func_match:
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003582 /* blank search means to match all funcs in the mod */
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003583 if (!func_g->len)
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003584 return 1;
3585 }
3586
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003587 return ftrace_match(str, func_g);
Steven Rostedt64e7c442009-02-13 17:08:48 -05003588}
3589
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003590static int
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003591match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003592{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003593 struct ftrace_page *pg;
3594 struct dyn_ftrace *rec;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003595 struct ftrace_glob func_g = { .type = MATCH_FULL };
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003596 struct ftrace_glob mod_g = { .type = MATCH_FULL };
3597 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3598 int exclude_mod = 0;
Li Zefan311d16d2009-12-08 11:15:11 +08003599 int found = 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003600 int ret;
Dan Carpenter198bd492017-07-12 10:35:57 +03003601 int clear_filter = 0;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003602
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003603 if (func) {
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003604 func_g.type = filter_parse_regex(func, len, &func_g.search,
3605 &clear_filter);
3606 func_g.len = strlen(func_g.search);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003607 }
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003608
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003609 if (mod) {
3610 mod_g.type = filter_parse_regex(mod, strlen(mod),
3611 &mod_g.search, &exclude_mod);
3612 mod_g.len = strlen(mod_g.search);
Steven Rostedt9f4801e2009-02-13 15:56:43 -05003613 }
3614
Steven Rostedt52baf112009-02-14 01:15:39 -05003615 mutex_lock(&ftrace_lock);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003616
3617 if (unlikely(ftrace_disabled))
3618 goto out_unlock;
3619
Steven Rostedt265c8312009-02-13 12:43:56 -05003620 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05003621
3622 if (rec->flags & FTRACE_FL_DISABLED)
3623 continue;
3624
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003625 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003626 ret = enter_record(hash, rec, clear_filter);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003627 if (ret < 0) {
3628 found = ret;
3629 goto out_unlock;
3630 }
Li Zefan311d16d2009-12-08 11:15:11 +08003631 found = 1;
Steven Rostedt265c8312009-02-13 12:43:56 -05003632 }
3633 } while_for_each_ftrace_rec();
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003634 out_unlock:
Steven Rostedt52baf112009-02-14 01:15:39 -05003635 mutex_unlock(&ftrace_lock);
Li Zefan311d16d2009-12-08 11:15:11 +08003636
3637 return found;
Steven Rostedt5072c592008-05-12 21:20:43 +02003638}
3639
Steven Rostedt64e7c442009-02-13 17:08:48 -05003640static int
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003641ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
Steven Rostedt64e7c442009-02-13 17:08:48 -05003642{
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003643 return match_records(hash, buff, len, NULL);
Steven Rostedt64e7c442009-02-13 17:08:48 -05003644}
3645
Steven Rostedt64e7c442009-02-13 17:08:48 -05003646
Steven Rostedtf6180772009-02-14 00:40:25 -05003647/*
3648 * We register the module command as a template to show others how
3649 * to register the a command as well.
3650 */
3651
3652static int
Steven Rostedt43dd61c2011-07-07 11:09:22 -04003653ftrace_mod_callback(struct ftrace_hash *hash,
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003654 char *func, char *cmd, char *module, int enable)
Steven Rostedtf6180772009-02-14 00:40:25 -05003655{
Dmitry Safonov5e3949f2015-09-29 19:46:12 +03003656 int ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05003657
3658 /*
3659 * cmd == 'mod' because we only registered this func
3660 * for the 'mod' ftrace_func_command.
3661 * But if you register one func with multiple commands,
3662 * you can tell which command was used by the cmd
3663 * parameter.
3664 */
Dmitry Safonovf0a3b152015-09-29 19:46:13 +03003665 ret = match_records(hash, func, strlen(func), module);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003666 if (!ret)
Dmitry Safonov5e3949f2015-09-29 19:46:12 +03003667 return -EINVAL;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003668 if (ret < 0)
3669 return ret;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003670 return 0;
Steven Rostedtf6180772009-02-14 00:40:25 -05003671}
3672
3673static struct ftrace_func_command ftrace_mod_cmd = {
3674 .name = "mod",
3675 .func = ftrace_mod_callback,
3676};
3677
3678static int __init ftrace_mod_cmd_init(void)
3679{
3680 return register_ftrace_command(&ftrace_mod_cmd);
3681}
Steven Rostedt6f415672012-10-05 12:13:07 -04003682core_initcall(ftrace_mod_cmd_init);
Steven Rostedtf6180772009-02-14 00:40:25 -05003683
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04003684static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04003685 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003686{
Steven Rostedtb6887d72009-02-17 12:32:04 -05003687 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003688 struct hlist_head *hhd;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003689 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003690
3691 key = hash_long(ip, FTRACE_HASH_BITS);
3692
3693 hhd = &ftrace_func_hash[key];
3694
3695 if (hlist_empty(hhd))
3696 return;
3697
3698 /*
3699 * Disable preemption for these calls to prevent a RCU grace
3700 * period. This syncs the hash iteration and freeing of items
3701 * on the hash. rcu_read_lock is too dangerous here.
3702 */
Steven Rostedt5168ae52010-06-03 09:36:50 -04003703 preempt_disable_notrace();
Steven Rostedt1bb539c2013-05-28 14:38:43 -04003704 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05003705 if (entry->ip == ip)
3706 entry->ops->func(ip, parent_ip, &entry->data);
3707 }
Steven Rostedt5168ae52010-06-03 09:36:50 -04003708 preempt_enable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05003709}
3710
Steven Rostedtb6887d72009-02-17 12:32:04 -05003711static struct ftrace_ops trace_probe_ops __read_mostly =
Steven Rostedt59df055f2009-02-14 15:29:06 -05003712{
Steven Rostedtfb9fb012009-03-25 13:26:41 -04003713 .func = function_trace_probe_call,
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09003714 .flags = FTRACE_OPS_FL_INITIALIZED,
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003715 INIT_OPS_HASH(trace_probe_ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003716};
3717
Steven Rostedtb6887d72009-02-17 12:32:04 -05003718static int ftrace_probe_registered;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003719
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05003720static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003721{
Steven Rostedtb8489142011-05-04 09:27:52 -04003722 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003723 int i;
3724
Steven Rostedt (Red Hat)19dd6032013-05-09 19:37:36 -04003725 if (ftrace_probe_registered) {
3726 /* still need to update the function call sites */
3727 if (ftrace_enabled)
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04003728 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3729 old_hash);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003730 return;
Steven Rostedt (Red Hat)19dd6032013-05-09 19:37:36 -04003731 }
Steven Rostedt59df055f2009-02-14 15:29:06 -05003732
3733 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3734 struct hlist_head *hhd = &ftrace_func_hash[i];
3735 if (hhd->first)
3736 break;
3737 }
3738 /* Nothing registered? */
3739 if (i == FTRACE_FUNC_HASHSIZE)
3740 return;
3741
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05003742 ret = ftrace_startup(&trace_probe_ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04003743
Steven Rostedtb6887d72009-02-17 12:32:04 -05003744 ftrace_probe_registered = 1;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003745}
3746
Steven Rostedt (VMware)666452f2017-04-14 17:45:45 -04003747static bool __disable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003748{
3749 int i;
3750
Steven Rostedtb6887d72009-02-17 12:32:04 -05003751 if (!ftrace_probe_registered)
Steven Rostedt (VMware)666452f2017-04-14 17:45:45 -04003752 return false;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003753
3754 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3755 struct hlist_head *hhd = &ftrace_func_hash[i];
3756 if (hhd->first)
Steven Rostedt (VMware)666452f2017-04-14 17:45:45 -04003757 return false;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003758 }
3759
3760 /* no more funcs left */
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05003761 ftrace_shutdown(&trace_probe_ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04003762
Steven Rostedtb6887d72009-02-17 12:32:04 -05003763 ftrace_probe_registered = 0;
Steven Rostedt (VMware)666452f2017-04-14 17:45:45 -04003764 return true;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003765}
3766
3767
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04003768static void ftrace_free_entry(struct ftrace_func_probe *entry)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003769{
Steven Rostedt59df055f2009-02-14 15:29:06 -05003770 if (entry->ops->free)
Steven Rostedt (Red Hat)e67efb92013-03-12 15:07:59 -04003771 entry->ops->free(entry->ops, entry->ip, &entry->data);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003772 kfree(entry);
3773}
3774
Steven Rostedt59df055f2009-02-14 15:29:06 -05003775int
Steven Rostedtb6887d72009-02-17 12:32:04 -05003776register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05003777 void *data)
3778{
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05003779 struct ftrace_ops_hash old_hash_ops;
Steven Rostedtb6887d72009-02-17 12:32:04 -05003780 struct ftrace_func_probe *entry;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003781 struct ftrace_glob func_g;
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003782 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04003783 struct ftrace_hash *old_hash = *orig_hash;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003784 struct ftrace_hash *hash;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003785 struct ftrace_page *pg;
3786 struct dyn_ftrace *rec;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003787 int not;
Steven Rostedt6a24a242009-02-17 11:20:26 -05003788 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003789 int count = 0;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003790 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003791
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003792 func_g.type = filter_parse_regex(glob, strlen(glob),
3793 &func_g.search, &not);
3794 func_g.len = strlen(func_g.search);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003795
Steven Rostedtb6887d72009-02-17 12:32:04 -05003796 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05003797 if (WARN_ON(not))
3798 return -EINVAL;
3799
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003800 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003801
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05003802 old_hash_ops.filter_hash = old_hash;
3803 /* Probes only have filters */
3804 old_hash_ops.notrace_hash = NULL;
3805
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04003806 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003807 if (!hash) {
3808 count = -ENOMEM;
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04003809 goto out;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003810 }
3811
3812 if (unlikely(ftrace_disabled)) {
3813 count = -ENODEV;
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04003814 goto out;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003815 }
Steven Rostedt45a4a232011-04-21 23:16:46 -04003816
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04003817 mutex_lock(&ftrace_lock);
3818
Steven Rostedt59df055f2009-02-14 15:29:06 -05003819 do_for_each_ftrace_rec(pg, rec) {
3820
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05003821 if (rec->flags & FTRACE_FL_DISABLED)
3822 continue;
3823
Dmitry Safonov0b507e12015-09-29 19:46:15 +03003824 if (!ftrace_match_record(rec, &func_g, NULL, 0))
Steven Rostedt59df055f2009-02-14 15:29:06 -05003825 continue;
3826
3827 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3828 if (!entry) {
Steven Rostedtb6887d72009-02-17 12:32:04 -05003829 /* If we did not process any, then return error */
Steven Rostedt59df055f2009-02-14 15:29:06 -05003830 if (!count)
3831 count = -ENOMEM;
3832 goto out_unlock;
3833 }
3834
3835 count++;
3836
3837 entry->data = data;
3838
3839 /*
3840 * The caller might want to do something special
3841 * for each function we find. We call the callback
3842 * to give the caller an opportunity to do so.
3843 */
Steven Rostedt (Red Hat)e67efb92013-03-12 15:07:59 -04003844 if (ops->init) {
3845 if (ops->init(ops, rec->ip, &entry->data) < 0) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05003846 /* caller does not like this func */
3847 kfree(entry);
3848 continue;
3849 }
3850 }
3851
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003852 ret = enter_record(hash, rec, 0);
3853 if (ret < 0) {
3854 kfree(entry);
3855 count = ret;
3856 goto out_unlock;
3857 }
3858
Steven Rostedt59df055f2009-02-14 15:29:06 -05003859 entry->ops = ops;
3860 entry->ip = rec->ip;
3861
3862 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3863 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3864
3865 } while_for_each_ftrace_rec();
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003866
3867 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04003868
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05003869 __enable_ftrace_function_probe(&old_hash_ops);
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04003870
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04003871 if (!ret)
3872 free_ftrace_hash_rcu(old_hash);
3873 else
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003874 count = ret;
3875
Steven Rostedt59df055f2009-02-14 15:29:06 -05003876 out_unlock:
Steven Rostedt (Red Hat)5ae0bf52013-05-09 18:20:37 -04003877 mutex_unlock(&ftrace_lock);
3878 out:
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003879 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003880 free_ftrace_hash(hash);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003881
3882 return count;
3883}
3884
3885enum {
Steven Rostedtb6887d72009-02-17 12:32:04 -05003886 PROBE_TEST_FUNC = 1,
3887 PROBE_TEST_DATA = 2
Steven Rostedt59df055f2009-02-14 15:29:06 -05003888};
3889
3890static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05003891__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05003892 void *data, int flags)
3893{
Steven Rostedt (VMware)666452f2017-04-14 17:45:45 -04003894 struct ftrace_ops_hash old_hash_ops;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003895 struct ftrace_func_entry *rec_entry;
Steven Rostedtb6887d72009-02-17 12:32:04 -05003896 struct ftrace_func_probe *entry;
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04003897 struct ftrace_func_probe *p;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003898 struct ftrace_glob func_g;
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003899 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04003900 struct ftrace_hash *old_hash = *orig_hash;
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04003901 struct list_head free_list;
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003902 struct ftrace_hash *hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08003903 struct hlist_node *tmp;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003904 char str[KSYM_SYMBOL_LEN];
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003905 int i, ret;
Steven Rostedt (VMware)666452f2017-04-14 17:45:45 -04003906 bool disabled;
Steven Rostedt59df055f2009-02-14 15:29:06 -05003907
Atsushi Tsujib36461d2009-09-15 19:06:30 +09003908 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003909 func_g.search = NULL;
Atsushi Tsujib36461d2009-09-15 19:06:30 +09003910 else if (glob) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05003911 int not;
3912
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003913 func_g.type = filter_parse_regex(glob, strlen(glob),
3914 &func_g.search, &not);
3915 func_g.len = strlen(func_g.search);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003916
Steven Rostedtb6887d72009-02-17 12:32:04 -05003917 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05003918 if (WARN_ON(not))
3919 return;
3920 }
3921
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003922 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003923
Steven Rostedt (VMware)666452f2017-04-14 17:45:45 -04003924 old_hash_ops.filter_hash = old_hash;
3925 /* Probes only have filters */
3926 old_hash_ops.notrace_hash = NULL;
3927
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003928 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3929 if (!hash)
3930 /* Hmm, should report this somehow */
3931 goto out_unlock;
3932
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04003933 INIT_LIST_HEAD(&free_list);
3934
Steven Rostedt59df055f2009-02-14 15:29:06 -05003935 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3936 struct hlist_head *hhd = &ftrace_func_hash[i];
3937
Sasha Levinb67bfe02013-02-27 17:06:00 -08003938 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05003939
3940 /* break up if statements for readability */
Steven Rostedtb6887d72009-02-17 12:32:04 -05003941 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003942 continue;
3943
Steven Rostedtb6887d72009-02-17 12:32:04 -05003944 if ((flags & PROBE_TEST_DATA) && entry->data != data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003945 continue;
3946
3947 /* do this last, since it is the most expensive */
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003948 if (func_g.search) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05003949 kallsyms_lookup(entry->ip, NULL, NULL,
3950 NULL, str);
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003951 if (!ftrace_match(str, &func_g))
Steven Rostedt59df055f2009-02-14 15:29:06 -05003952 continue;
3953 }
3954
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003955 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3956 /* It is possible more than one entry had this ip */
3957 if (rec_entry)
3958 free_hash_entry(hash, rec_entry);
3959
Steven Rostedt (Red Hat)740466b2013-03-13 11:15:19 -04003960 hlist_del_rcu(&entry->node);
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04003961 list_add(&entry->free_list, &free_list);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003962 }
3963 }
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003964 mutex_lock(&ftrace_lock);
Steven Rostedt (VMware)666452f2017-04-14 17:45:45 -04003965 disabled = __disable_ftrace_function_probe();
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003966 /*
3967 * Remove after the disable is called. Otherwise, if the last
3968 * probe is removed, a null hash means *all enabled*.
3969 */
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04003970 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
Steven Rostedt (VMware)666452f2017-04-14 17:45:45 -04003971
3972 /* still need to update the function call sites */
3973 if (ftrace_enabled && !disabled)
3974 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3975 &old_hash_ops);
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04003976 synchronize_sched();
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04003977 if (!ret)
3978 free_ftrace_hash_rcu(old_hash);
3979
Steven Rostedt (Red Hat)7818b382013-03-13 12:42:58 -04003980 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3981 list_del(&entry->free_list);
3982 ftrace_free_entry(entry);
3983 }
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09003984 mutex_unlock(&ftrace_lock);
Dmitry Safonov3ba00922015-09-29 19:46:14 +03003985
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003986 out_unlock:
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04003987 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
Steven Rostedt (Red Hat)e1df4cb2013-03-12 10:09:42 -04003988 free_ftrace_hash(hash);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003989}
3990
3991void
Steven Rostedtb6887d72009-02-17 12:32:04 -05003992unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05003993 void *data)
3994{
Steven Rostedtb6887d72009-02-17 12:32:04 -05003995 __unregister_ftrace_function_probe(glob, ops, data,
3996 PROBE_TEST_FUNC | PROBE_TEST_DATA);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003997}
3998
3999void
Steven Rostedtb6887d72009-02-17 12:32:04 -05004000unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004001{
Steven Rostedtb6887d72009-02-17 12:32:04 -05004002 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004003}
4004
Steven Rostedtb6887d72009-02-17 12:32:04 -05004005void unregister_ftrace_function_probe_all(char *glob)
Steven Rostedt59df055f2009-02-14 15:29:06 -05004006{
Steven Rostedtb6887d72009-02-17 12:32:04 -05004007 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
Steven Rostedt59df055f2009-02-14 15:29:06 -05004008}
4009
Steven Rostedtf6180772009-02-14 00:40:25 -05004010static LIST_HEAD(ftrace_commands);
4011static DEFINE_MUTEX(ftrace_cmd_mutex);
4012
Tom Zanussi38de93a2013-10-24 08:34:18 -05004013/*
4014 * Currently we only register ftrace commands from __init, so mark this
4015 * __init too.
4016 */
4017__init int register_ftrace_command(struct ftrace_func_command *cmd)
Steven Rostedtf6180772009-02-14 00:40:25 -05004018{
4019 struct ftrace_func_command *p;
4020 int ret = 0;
4021
4022 mutex_lock(&ftrace_cmd_mutex);
4023 list_for_each_entry(p, &ftrace_commands, list) {
4024 if (strcmp(cmd->name, p->name) == 0) {
4025 ret = -EBUSY;
4026 goto out_unlock;
4027 }
4028 }
4029 list_add(&cmd->list, &ftrace_commands);
4030 out_unlock:
4031 mutex_unlock(&ftrace_cmd_mutex);
4032
4033 return ret;
4034}
4035
Tom Zanussi38de93a2013-10-24 08:34:18 -05004036/*
4037 * Currently we only unregister ftrace commands from __init, so mark
4038 * this __init too.
4039 */
4040__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
Steven Rostedtf6180772009-02-14 00:40:25 -05004041{
4042 struct ftrace_func_command *p, *n;
4043 int ret = -ENODEV;
4044
4045 mutex_lock(&ftrace_cmd_mutex);
4046 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4047 if (strcmp(cmd->name, p->name) == 0) {
4048 ret = 0;
4049 list_del_init(&p->list);
4050 goto out_unlock;
4051 }
4052 }
4053 out_unlock:
4054 mutex_unlock(&ftrace_cmd_mutex);
4055
4056 return ret;
4057}
4058
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004059static int ftrace_process_regex(struct ftrace_hash *hash,
4060 char *buff, int len, int enable)
Steven Rostedt64e7c442009-02-13 17:08:48 -05004061{
Steven Rostedtf6180772009-02-14 00:40:25 -05004062 char *func, *command, *next = buff;
Steven Rostedt6a24a242009-02-17 11:20:26 -05004063 struct ftrace_func_command *p;
GuoWen Li0aff1c02011-06-01 19:18:47 +08004064 int ret = -EINVAL;
Steven Rostedt64e7c442009-02-13 17:08:48 -05004065
4066 func = strsep(&next, ":");
4067
4068 if (!next) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04004069 ret = ftrace_match_records(hash, func, len);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04004070 if (!ret)
4071 ret = -EINVAL;
4072 if (ret < 0)
4073 return ret;
4074 return 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05004075 }
4076
Steven Rostedtf6180772009-02-14 00:40:25 -05004077 /* command found */
Steven Rostedt64e7c442009-02-13 17:08:48 -05004078
4079 command = strsep(&next, ":");
4080
Steven Rostedtf6180772009-02-14 00:40:25 -05004081 mutex_lock(&ftrace_cmd_mutex);
4082 list_for_each_entry(p, &ftrace_commands, list) {
4083 if (strcmp(p->name, command) == 0) {
Steven Rostedt43dd61c2011-07-07 11:09:22 -04004084 ret = p->func(hash, func, command, next, enable);
Steven Rostedtf6180772009-02-14 00:40:25 -05004085 goto out_unlock;
4086 }
Steven Rostedt64e7c442009-02-13 17:08:48 -05004087 }
Steven Rostedtf6180772009-02-14 00:40:25 -05004088 out_unlock:
4089 mutex_unlock(&ftrace_cmd_mutex);
Steven Rostedt64e7c442009-02-13 17:08:48 -05004090
Steven Rostedtf6180772009-02-14 00:40:25 -05004091 return ret;
Steven Rostedt64e7c442009-02-13 17:08:48 -05004092}
4093
Ingo Molnare309b412008-05-12 21:20:51 +02004094static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04004095ftrace_regex_write(struct file *file, const char __user *ubuf,
4096 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02004097{
4098 struct ftrace_iterator *iter;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004099 struct trace_parser *parser;
4100 ssize_t ret, read;
Steven Rostedt5072c592008-05-12 21:20:43 +02004101
Li Zefan4ba79782009-09-22 13:52:20 +08004102 if (!cnt)
Steven Rostedt5072c592008-05-12 21:20:43 +02004103 return 0;
4104
Steven Rostedt5072c592008-05-12 21:20:43 +02004105 if (file->f_mode & FMODE_READ) {
4106 struct seq_file *m = file->private_data;
4107 iter = m->private;
4108 } else
4109 iter = file->private_data;
4110
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004111 if (unlikely(ftrace_disabled))
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004112 return -ENODEV;
4113
4114 /* iter->hash is a local copy, so we don't need regex_lock */
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004115
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004116 parser = &iter->parser;
4117 read = trace_get_user(parser, ubuf, cnt, ppos);
Steven Rostedt5072c592008-05-12 21:20:43 +02004118
Li Zefan4ba79782009-09-22 13:52:20 +08004119 if (read >= 0 && trace_parser_loaded(parser) &&
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004120 !trace_parser_cont(parser)) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004121 ret = ftrace_process_regex(iter->hash, parser->buffer,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004122 parser->idx, enable);
Li Zefan313254a2009-12-08 11:15:30 +08004123 trace_parser_clear(parser);
Steven Rostedt (Red Hat)7c088b52013-05-09 11:35:12 -04004124 if (ret < 0)
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004125 goto out;
Steven Rostedt5072c592008-05-12 21:20:43 +02004126 }
4127
Steven Rostedt5072c592008-05-12 21:20:43 +02004128 ret = read;
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004129 out:
Steven Rostedt5072c592008-05-12 21:20:43 +02004130 return ret;
4131}
4132
Steven Rostedtfc13cb02011-12-19 14:41:25 -05004133ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04004134ftrace_filter_write(struct file *file, const char __user *ubuf,
4135 size_t cnt, loff_t *ppos)
4136{
4137 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4138}
4139
Steven Rostedtfc13cb02011-12-19 14:41:25 -05004140ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04004141ftrace_notrace_write(struct file *file, const char __user *ubuf,
4142 size_t cnt, loff_t *ppos)
4143{
4144 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4145}
4146
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004147static int
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004148ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4149{
4150 struct ftrace_func_entry *entry;
4151
4152 if (!ftrace_location(ip))
4153 return -EINVAL;
4154
4155 if (remove) {
4156 entry = ftrace_lookup_ip(hash, ip);
4157 if (!entry)
4158 return -ENOENT;
4159 free_hash_entry(hash, entry);
4160 return 0;
4161 }
4162
4163 return add_hash_entry(hash, ip);
4164}
4165
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04004166static void ftrace_ops_update_code(struct ftrace_ops *ops,
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05004167 struct ftrace_ops_hash *old_hash)
Steven Rostedt (Red Hat)1c80c432013-07-25 20:22:00 -04004168{
Steven Rostedt (Red Hat)8f86f832015-01-13 11:20:43 -05004169 struct ftrace_ops *op;
4170
4171 if (!ftrace_enabled)
4172 return;
4173
4174 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
Steven Rostedt (Red Hat)8252ecf2014-10-24 14:56:01 -04004175 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
Steven Rostedt (Red Hat)8f86f832015-01-13 11:20:43 -05004176 return;
4177 }
4178
4179 /*
4180 * If this is the shared global_ops filter, then we need to
4181 * check if there is another ops that shares it, is enabled.
4182 * If so, we still need to run the modify code.
4183 */
4184 if (ops->func_hash != &global_ops.local_hash)
4185 return;
4186
4187 do_for_each_ftrace_op(op, ftrace_ops_list) {
4188 if (op->func_hash == &global_ops.local_hash &&
4189 op->flags & FTRACE_OPS_FL_ENABLED) {
4190 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4191 /* Only need to do this once */
4192 return;
4193 }
4194 } while_for_each_ftrace_op(op);
Steven Rostedt (Red Hat)1c80c432013-07-25 20:22:00 -04004195}
4196
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004197static int
4198ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4199 unsigned long ip, int remove, int reset, int enable)
Steven Rostedt41c52c02008-05-22 11:46:33 -04004200{
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004201 struct ftrace_hash **orig_hash;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05004202 struct ftrace_ops_hash old_hash_ops;
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004203 struct ftrace_hash *old_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04004204 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004205 int ret;
Steven Rostedtf45948e2011-05-02 12:29:25 -04004206
Steven Rostedt41c52c02008-05-22 11:46:33 -04004207 if (unlikely(ftrace_disabled))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004208 return -ENODEV;
Steven Rostedt41c52c02008-05-22 11:46:33 -04004209
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004210 mutex_lock(&ops->func_hash->regex_lock);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004211
Steven Rostedtf45948e2011-05-02 12:29:25 -04004212 if (enable)
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004213 orig_hash = &ops->func_hash->filter_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04004214 else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004215 orig_hash = &ops->func_hash->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004216
Wang Nanb972cc52014-07-15 08:40:20 +08004217 if (reset)
4218 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4219 else
4220 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4221
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004222 if (!hash) {
4223 ret = -ENOMEM;
4224 goto out_regex_unlock;
4225 }
Steven Rostedtf45948e2011-05-02 12:29:25 -04004226
Jiri Olsaac483c42012-01-02 10:04:14 +01004227 if (buf && !ftrace_match_records(hash, buf, len)) {
4228 ret = -EINVAL;
4229 goto out_regex_unlock;
4230 }
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004231 if (ip) {
4232 ret = ftrace_match_addr(hash, ip, remove);
4233 if (ret < 0)
4234 goto out_regex_unlock;
4235 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004236
4237 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004238 old_hash = *orig_hash;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05004239 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4240 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
Steven Rostedt41fb61c2011-07-13 15:03:44 -04004241 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004242 if (!ret) {
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05004243 ftrace_ops_update_code(ops, &old_hash_ops);
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004244 free_ftrace_hash_rcu(old_hash);
4245 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004246 mutex_unlock(&ftrace_lock);
4247
Jiri Olsaac483c42012-01-02 10:04:14 +01004248 out_regex_unlock:
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004249 mutex_unlock(&ops->func_hash->regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004250
4251 free_ftrace_hash(hash);
4252 return ret;
Steven Rostedt41c52c02008-05-22 11:46:33 -04004253}
4254
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004255static int
4256ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4257 int reset, int enable)
4258{
4259 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4260}
4261
4262/**
4263 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4264 * @ops - the ops to set the filter with
4265 * @ip - the address to add to or remove from the filter.
4266 * @remove - non zero to remove the ip from the filter
4267 * @reset - non zero to reset all filters before applying this filter.
4268 *
4269 * Filters denote which functions should be enabled when tracing is enabled
4270 * If @ip is NULL, it failes to update filter.
4271 */
4272int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4273 int remove, int reset)
4274{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004275 ftrace_ops_init(ops);
Masami Hiramatsu647664e2012-06-05 19:28:08 +09004276 return ftrace_set_addr(ops, ip, remove, reset, 1);
4277}
4278EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4279
4280static int
4281ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4282 int reset, int enable)
4283{
4284 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4285}
4286
Steven Rostedt77a2b372008-05-12 21:20:45 +02004287/**
4288 * ftrace_set_filter - set a function to filter on in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04004289 * @ops - the ops to set the filter with
Steven Rostedt77a2b372008-05-12 21:20:45 +02004290 * @buf - the string that holds the function filter text.
4291 * @len - the length of the string.
4292 * @reset - non zero to reset all filters before applying this filter.
4293 *
4294 * Filters denote which functions should be enabled when tracing is enabled.
4295 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4296 */
Jiri Olsaac483c42012-01-02 10:04:14 +01004297int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04004298 int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02004299{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004300 ftrace_ops_init(ops);
Jiri Olsaac483c42012-01-02 10:04:14 +01004301 return ftrace_set_regex(ops, buf, len, reset, 1);
Steven Rostedt41c52c02008-05-22 11:46:33 -04004302}
Steven Rostedt936e0742011-05-05 22:54:01 -04004303EXPORT_SYMBOL_GPL(ftrace_set_filter);
Steven Rostedt4eebcc82008-05-12 21:20:48 +02004304
Steven Rostedt41c52c02008-05-22 11:46:33 -04004305/**
4306 * ftrace_set_notrace - set a function to not trace in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04004307 * @ops - the ops to set the notrace filter with
Steven Rostedt41c52c02008-05-22 11:46:33 -04004308 * @buf - the string that holds the function notrace text.
4309 * @len - the length of the string.
4310 * @reset - non zero to reset all filters before applying this filter.
4311 *
4312 * Notrace Filters denote which functions should not be enabled when tracing
4313 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4314 * for tracing.
4315 */
Jiri Olsaac483c42012-01-02 10:04:14 +01004316int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04004317 int len, int reset)
4318{
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004319 ftrace_ops_init(ops);
Jiri Olsaac483c42012-01-02 10:04:14 +01004320 return ftrace_set_regex(ops, buf, len, reset, 0);
Steven Rostedt936e0742011-05-05 22:54:01 -04004321}
4322EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4323/**
Jiaxing Wang8d1b0652014-04-20 23:10:44 +08004324 * ftrace_set_global_filter - set a function to filter on with global tracers
Steven Rostedt936e0742011-05-05 22:54:01 -04004325 * @buf - the string that holds the function filter text.
4326 * @len - the length of the string.
4327 * @reset - non zero to reset all filters before applying this filter.
4328 *
4329 * Filters denote which functions should be enabled when tracing is enabled.
4330 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4331 */
4332void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4333{
4334 ftrace_set_regex(&global_ops, buf, len, reset, 1);
4335}
4336EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4337
4338/**
Jiaxing Wang8d1b0652014-04-20 23:10:44 +08004339 * ftrace_set_global_notrace - set a function to not trace with global tracers
Steven Rostedt936e0742011-05-05 22:54:01 -04004340 * @buf - the string that holds the function notrace text.
4341 * @len - the length of the string.
4342 * @reset - non zero to reset all filters before applying this filter.
4343 *
4344 * Notrace Filters denote which functions should not be enabled when tracing
4345 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4346 * for tracing.
4347 */
4348void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
Steven Rostedt41c52c02008-05-22 11:46:33 -04004349{
Steven Rostedtf45948e2011-05-02 12:29:25 -04004350 ftrace_set_regex(&global_ops, buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02004351}
Steven Rostedt936e0742011-05-05 22:54:01 -04004352EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
Steven Rostedt77a2b372008-05-12 21:20:45 +02004353
Steven Rostedt2af15d62009-05-28 13:37:24 -04004354/*
4355 * command line interface to allow users to set filters on boot up.
4356 */
4357#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
4358static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4359static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4360
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -04004361/* Used by function selftest to not test if filter is set */
4362bool ftrace_filter_param __initdata;
4363
Steven Rostedt2af15d62009-05-28 13:37:24 -04004364static int __init set_ftrace_notrace(char *str)
4365{
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -04004366 ftrace_filter_param = true;
Chen Gang75761cc2013-04-08 12:12:39 +08004367 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004368 return 1;
4369}
4370__setup("ftrace_notrace=", set_ftrace_notrace);
4371
4372static int __init set_ftrace_filter(char *str)
4373{
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -04004374 ftrace_filter_param = true;
Chen Gang75761cc2013-04-08 12:12:39 +08004375 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004376 return 1;
4377}
4378__setup("ftrace_filter=", set_ftrace_filter);
4379
Stefan Assmann369bc182009-10-12 22:17:21 +02004380#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Lai Jiangshanf6060f42009-11-05 11:16:17 +08004381static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004382static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004383static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
Steven Rostedt801c29f2010-03-05 20:02:19 -05004384
Stefan Assmann369bc182009-10-12 22:17:21 +02004385static int __init set_graph_function(char *str)
4386{
Frederic Weisbecker06f43d62009-10-14 20:43:39 +02004387 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
Stefan Assmann369bc182009-10-12 22:17:21 +02004388 return 1;
4389}
4390__setup("ftrace_graph_filter=", set_graph_function);
4391
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004392static int __init set_graph_notrace_function(char *str)
4393{
4394 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4395 return 1;
4396}
4397__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4398
4399static void __init set_ftrace_early_graph(char *buf, int enable)
Stefan Assmann369bc182009-10-12 22:17:21 +02004400{
4401 int ret;
4402 char *func;
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004403 unsigned long *table = ftrace_graph_funcs;
4404 int *count = &ftrace_graph_count;
4405
4406 if (!enable) {
4407 table = ftrace_graph_notrace_funcs;
4408 count = &ftrace_graph_notrace_count;
4409 }
Stefan Assmann369bc182009-10-12 22:17:21 +02004410
4411 while (buf) {
4412 func = strsep(&buf, ",");
4413 /* we allow only one expression at a time */
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004414 ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
Stefan Assmann369bc182009-10-12 22:17:21 +02004415 if (ret)
4416 printk(KERN_DEBUG "ftrace: function %s not "
4417 "traceable\n", func);
4418 }
4419}
4420#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4421
Steven Rostedt2a85a372011-12-19 21:57:44 -05004422void __init
4423ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
Steven Rostedt2af15d62009-05-28 13:37:24 -04004424{
4425 char *func;
4426
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09004427 ftrace_ops_init(ops);
4428
Steven Rostedt2af15d62009-05-28 13:37:24 -04004429 while (buf) {
4430 func = strsep(&buf, ",");
Steven Rostedtf45948e2011-05-02 12:29:25 -04004431 ftrace_set_regex(ops, func, strlen(func), 0, enable);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004432 }
4433}
4434
4435static void __init set_ftrace_early_filters(void)
4436{
4437 if (ftrace_filter_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05004438 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
Steven Rostedt2af15d62009-05-28 13:37:24 -04004439 if (ftrace_notrace_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05004440 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02004441#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4442 if (ftrace_graph_buf[0])
Namhyung Kim0d7d9a12014-06-13 01:23:50 +09004443 set_ftrace_early_graph(ftrace_graph_buf, 1);
4444 if (ftrace_graph_notrace_buf[0])
4445 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02004446#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
Steven Rostedt2af15d62009-05-28 13:37:24 -04004447}
4448
Steven Rostedtfc13cb02011-12-19 14:41:25 -05004449int ftrace_regex_release(struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02004450{
4451 struct seq_file *m = (struct seq_file *)file->private_data;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05004452 struct ftrace_ops_hash old_hash_ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02004453 struct ftrace_iterator *iter;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004454 struct ftrace_hash **orig_hash;
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004455 struct ftrace_hash *old_hash;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004456 struct trace_parser *parser;
Steven Rostedted926f92011-05-03 13:25:24 -04004457 int filter_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004458 int ret;
Steven Rostedt5072c592008-05-12 21:20:43 +02004459
Steven Rostedt5072c592008-05-12 21:20:43 +02004460 if (file->f_mode & FMODE_READ) {
4461 iter = m->private;
Steven Rostedt5072c592008-05-12 21:20:43 +02004462 seq_release(inode, file);
4463 } else
4464 iter = file->private_data;
4465
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004466 parser = &iter->parser;
4467 if (trace_parser_loaded(parser)) {
4468 parser->buffer[parser->idx] = 0;
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04004469 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
Steven Rostedt5072c592008-05-12 21:20:43 +02004470 }
4471
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004472 trace_parser_put(parser);
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004473
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004474 mutex_lock(&iter->ops->func_hash->regex_lock);
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004475
Steven Rostedt058e2972011-04-29 22:35:33 -04004476 if (file->f_mode & FMODE_WRITE) {
Steven Rostedted926f92011-05-03 13:25:24 -04004477 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4478
4479 if (filter_hash)
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004480 orig_hash = &iter->ops->func_hash->filter_hash;
Steven Rostedted926f92011-05-03 13:25:24 -04004481 else
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004482 orig_hash = &iter->ops->func_hash->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004483
Steven Rostedt058e2972011-04-29 22:35:33 -04004484 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004485 old_hash = *orig_hash;
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05004486 old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
4487 old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
Steven Rostedt41fb61c2011-07-13 15:03:44 -04004488 ret = ftrace_hash_move(iter->ops, filter_hash,
4489 orig_hash, iter->hash);
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004490 if (!ret) {
Steven Rostedt (Red Hat)7485058e2015-01-13 14:03:38 -05004491 ftrace_ops_update_code(iter->ops, &old_hash_ops);
Steven Rostedt (Red Hat)3296fc42014-07-24 15:33:41 -04004492 free_ftrace_hash_rcu(old_hash);
4493 }
Steven Rostedt058e2972011-04-29 22:35:33 -04004494 mutex_unlock(&ftrace_lock);
4495 }
Masami Hiramatsu3f2367b2013-05-09 14:44:21 +09004496
Steven Rostedt (Red Hat)33b7f992014-08-15 17:23:02 -04004497 mutex_unlock(&iter->ops->func_hash->regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04004498 free_ftrace_hash(iter->hash);
4499 kfree(iter);
Steven Rostedt058e2972011-04-29 22:35:33 -04004500
Steven Rostedt5072c592008-05-12 21:20:43 +02004501 return 0;
4502}
4503
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004504static const struct file_operations ftrace_avail_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02004505 .open = ftrace_avail_open,
4506 .read = seq_read,
4507 .llseek = seq_lseek,
Li Zefan3be04b42009-08-17 16:54:03 +08004508 .release = seq_release_private,
Steven Rostedt5072c592008-05-12 21:20:43 +02004509};
4510
Steven Rostedt647bcd02011-05-03 14:39:21 -04004511static const struct file_operations ftrace_enabled_fops = {
4512 .open = ftrace_enabled_open,
4513 .read = seq_read,
4514 .llseek = seq_lseek,
4515 .release = seq_release_private,
4516};
4517
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004518static const struct file_operations ftrace_filter_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02004519 .open = ftrace_filter_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08004520 .read = seq_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02004521 .write = ftrace_filter_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004522 .llseek = tracing_lseek,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04004523 .release = ftrace_regex_release,
Steven Rostedt5072c592008-05-12 21:20:43 +02004524};
4525
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004526static const struct file_operations ftrace_notrace_fops = {
Steven Rostedt41c52c02008-05-22 11:46:33 -04004527 .open = ftrace_notrace_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08004528 .read = seq_read,
Steven Rostedt41c52c02008-05-22 11:46:33 -04004529 .write = ftrace_notrace_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004530 .llseek = tracing_lseek,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04004531 .release = ftrace_regex_release,
Steven Rostedt41c52c02008-05-22 11:46:33 -04004532};
4533
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004534#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4535
4536static DEFINE_MUTEX(graph_lock);
4537
4538int ftrace_graph_count;
Namhyung Kim29ad23b2013-10-14 17:24:26 +09004539int ftrace_graph_notrace_count;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004540unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
Namhyung Kim29ad23b2013-10-14 17:24:26 +09004541unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004542
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004543struct ftrace_graph_data {
4544 unsigned long *table;
4545 size_t size;
4546 int *count;
4547 const struct seq_operations *seq_ops;
4548};
4549
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004550static void *
Li Zefan85951842009-06-24 09:54:00 +08004551__g_next(struct seq_file *m, loff_t *pos)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004552{
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004553 struct ftrace_graph_data *fgd = m->private;
4554
4555 if (*pos >= *fgd->count)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004556 return NULL;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004557 return &fgd->table[*pos];
Li Zefan85951842009-06-24 09:54:00 +08004558}
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004559
Li Zefan85951842009-06-24 09:54:00 +08004560static void *
4561g_next(struct seq_file *m, void *v, loff_t *pos)
4562{
4563 (*pos)++;
4564 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004565}
4566
4567static void *g_start(struct seq_file *m, loff_t *pos)
4568{
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004569 struct ftrace_graph_data *fgd = m->private;
4570
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004571 mutex_lock(&graph_lock);
4572
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004573 /* Nothing, tell g_show to print all functions are enabled */
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004574 if (!*fgd->count && !*pos)
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004575 return (void *)1;
4576
Li Zefan85951842009-06-24 09:54:00 +08004577 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004578}
4579
4580static void g_stop(struct seq_file *m, void *p)
4581{
4582 mutex_unlock(&graph_lock);
4583}
4584
4585static int g_show(struct seq_file *m, void *v)
4586{
4587 unsigned long *ptr = v;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004588
4589 if (!ptr)
4590 return 0;
4591
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004592 if (ptr == (unsigned long *)1) {
Namhyung Kim280d1422014-06-13 01:23:51 +09004593 struct ftrace_graph_data *fgd = m->private;
4594
4595 if (fgd->table == ftrace_graph_funcs)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004596 seq_puts(m, "#### all functions enabled ####\n");
Namhyung Kim280d1422014-06-13 01:23:51 +09004597 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004598 seq_puts(m, "#### no functions disabled ####\n");
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004599 return 0;
4600 }
4601
Steven Rostedtb375a112009-09-17 00:05:58 -04004602 seq_printf(m, "%ps\n", (void *)*ptr);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004603
4604 return 0;
4605}
4606
James Morris88e9d342009-09-22 16:43:43 -07004607static const struct seq_operations ftrace_graph_seq_ops = {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004608 .start = g_start,
4609 .next = g_next,
4610 .stop = g_stop,
4611 .show = g_show,
4612};
4613
4614static int
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004615__ftrace_graph_open(struct inode *inode, struct file *file,
4616 struct ftrace_graph_data *fgd)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004617{
4618 int ret = 0;
4619
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004620 mutex_lock(&graph_lock);
4621 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04004622 (file->f_flags & O_TRUNC)) {
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004623 *fgd->count = 0;
4624 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004625 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004626 mutex_unlock(&graph_lock);
4627
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004628 if (file->f_mode & FMODE_READ) {
4629 ret = seq_open(file, fgd->seq_ops);
4630 if (!ret) {
4631 struct seq_file *m = file->private_data;
4632 m->private = fgd;
4633 }
4634 } else
4635 file->private_data = fgd;
Li Zefana4ec5e02009-09-18 14:06:28 +08004636
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004637 return ret;
4638}
4639
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004640static int
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004641ftrace_graph_open(struct inode *inode, struct file *file)
4642{
4643 struct ftrace_graph_data *fgd;
4644
4645 if (unlikely(ftrace_disabled))
4646 return -ENODEV;
4647
4648 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4649 if (fgd == NULL)
4650 return -ENOMEM;
4651
4652 fgd->table = ftrace_graph_funcs;
4653 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4654 fgd->count = &ftrace_graph_count;
4655 fgd->seq_ops = &ftrace_graph_seq_ops;
4656
4657 return __ftrace_graph_open(inode, file, fgd);
4658}
4659
4660static int
Namhyung Kim29ad23b2013-10-14 17:24:26 +09004661ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4662{
4663 struct ftrace_graph_data *fgd;
4664
4665 if (unlikely(ftrace_disabled))
4666 return -ENODEV;
4667
4668 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4669 if (fgd == NULL)
4670 return -ENOMEM;
4671
4672 fgd->table = ftrace_graph_notrace_funcs;
4673 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4674 fgd->count = &ftrace_graph_notrace_count;
4675 fgd->seq_ops = &ftrace_graph_seq_ops;
4676
4677 return __ftrace_graph_open(inode, file, fgd);
4678}
4679
4680static int
Li Zefan87827112009-07-23 11:29:11 +08004681ftrace_graph_release(struct inode *inode, struct file *file)
4682{
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004683 if (file->f_mode & FMODE_READ) {
4684 struct seq_file *m = file->private_data;
4685
4686 kfree(m->private);
Li Zefan87827112009-07-23 11:29:11 +08004687 seq_release(inode, file);
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004688 } else {
4689 kfree(file->private_data);
4690 }
4691
Li Zefan87827112009-07-23 11:29:11 +08004692 return 0;
4693}
4694
4695static int
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004696ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004697{
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004698 struct ftrace_glob func_g;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004699 struct dyn_ftrace *rec;
4700 struct ftrace_page *pg;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004701 int fail = 1;
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004702 int not;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004703 bool exists;
4704 int i;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004705
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004706 /* decode regex */
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004707 func_g.type = filter_parse_regex(buffer, strlen(buffer),
4708 &func_g.search, &not);
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004709 if (!not && *idx >= size)
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004710 return -EBUSY;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004711
Dmitry Safonov3ba00922015-09-29 19:46:14 +03004712 func_g.len = strlen(func_g.search);
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004713
Steven Rostedt52baf112009-02-14 01:15:39 -05004714 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04004715
4716 if (unlikely(ftrace_disabled)) {
4717 mutex_unlock(&ftrace_lock);
4718 return -ENODEV;
4719 }
4720
Steven Rostedt265c8312009-02-13 12:43:56 -05004721 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004722
Steven Rostedt (Red Hat)546fece2016-11-14 16:31:49 -05004723 if (rec->flags & FTRACE_FL_DISABLED)
4724 continue;
4725
Dmitry Safonov0b507e12015-09-29 19:46:15 +03004726 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004727 /* if it is in the array */
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004728 exists = false;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004729 for (i = 0; i < *idx; i++) {
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01004730 if (array[i] == rec->ip) {
4731 exists = true;
Steven Rostedt265c8312009-02-13 12:43:56 -05004732 break;
4733 }
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004734 }
4735
4736 if (!not) {
4737 fail = 0;
4738 if (!exists) {
4739 array[(*idx)++] = rec->ip;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004740 if (*idx >= size)
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004741 goto out;
4742 }
4743 } else {
4744 if (exists) {
4745 array[i] = array[--(*idx)];
4746 array[*idx] = 0;
4747 fail = 0;
4748 }
4749 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004750 }
Steven Rostedt265c8312009-02-13 12:43:56 -05004751 } while_for_each_ftrace_rec();
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004752out:
Steven Rostedt52baf112009-02-14 01:15:39 -05004753 mutex_unlock(&ftrace_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004754
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004755 if (fail)
4756 return -EINVAL;
4757
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004758 return 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004759}
4760
4761static ssize_t
4762ftrace_graph_write(struct file *file, const char __user *ubuf,
4763 size_t cnt, loff_t *ppos)
4764{
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004765 struct trace_parser parser;
Namhyung Kim6a101082013-10-14 17:24:25 +09004766 ssize_t read, ret = 0;
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004767 struct ftrace_graph_data *fgd = file->private_data;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004768
Li Zefanc7c6b1f2010-02-10 15:43:04 +08004769 if (!cnt)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004770 return 0;
4771
Namhyung Kim6a101082013-10-14 17:24:25 +09004772 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4773 return -ENOMEM;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004774
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004775 read = trace_get_user(&parser, ubuf, cnt, ppos);
4776
Li Zefan4ba79782009-09-22 13:52:20 +08004777 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004778 parser.buffer[parser.idx] = 0;
4779
Namhyung Kim6a101082013-10-14 17:24:25 +09004780 mutex_lock(&graph_lock);
4781
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004782 /* we allow only one expression at a time */
Namhyung Kimfaf982a2013-10-14 17:24:24 +09004783 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4784 parser.buffer);
Namhyung Kim6a101082013-10-14 17:24:25 +09004785
4786 mutex_unlock(&graph_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004787 }
4788
Namhyung Kim6a101082013-10-14 17:24:25 +09004789 if (!ret)
4790 ret = read;
Li Zefan1eb90f12009-09-22 13:52:57 +08004791
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02004792 trace_parser_put(&parser);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004793
4794 return ret;
4795}
4796
4797static const struct file_operations ftrace_graph_fops = {
Li Zefan87827112009-07-23 11:29:11 +08004798 .open = ftrace_graph_open,
4799 .read = seq_read,
4800 .write = ftrace_graph_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004801 .llseek = tracing_lseek,
Li Zefan87827112009-07-23 11:29:11 +08004802 .release = ftrace_graph_release,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004803};
Namhyung Kim29ad23b2013-10-14 17:24:26 +09004804
4805static const struct file_operations ftrace_graph_notrace_fops = {
4806 .open = ftrace_graph_notrace_open,
4807 .read = seq_read,
4808 .write = ftrace_graph_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004809 .llseek = tracing_lseek,
Namhyung Kim29ad23b2013-10-14 17:24:26 +09004810 .release = ftrace_graph_release,
4811};
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004812#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4813
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05004814void ftrace_create_filter_files(struct ftrace_ops *ops,
4815 struct dentry *parent)
4816{
4817
4818 trace_create_file("set_ftrace_filter", 0644, parent,
4819 ops, &ftrace_filter_fops);
4820
4821 trace_create_file("set_ftrace_notrace", 0644, parent,
4822 ops, &ftrace_notrace_fops);
4823}
4824
4825/*
4826 * The name "destroy_filter_files" is really a misnomer. Although
4827 * in the future, it may actualy delete the files, but this is
4828 * really intended to make sure the ops passed in are disabled
4829 * and that when this function returns, the caller is free to
4830 * free the ops.
4831 *
4832 * The "destroy" name is only to match the "create" name that this
4833 * should be paired with.
4834 */
4835void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4836{
4837 mutex_lock(&ftrace_lock);
4838 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4839 ftrace_shutdown(ops, 0);
4840 ops->flags |= FTRACE_OPS_FL_DELETED;
Steven Rostedt (VMware)326c9e12018-12-10 23:58:01 -05004841 ftrace_free_filter(ops);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05004842 mutex_unlock(&ftrace_lock);
4843}
4844
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05004845static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02004846{
Steven Rostedt5072c592008-05-12 21:20:43 +02004847
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004848 trace_create_file("available_filter_functions", 0444,
4849 d_tracer, NULL, &ftrace_avail_fops);
Steven Rostedt5072c592008-05-12 21:20:43 +02004850
Steven Rostedt647bcd02011-05-03 14:39:21 -04004851 trace_create_file("enabled_functions", 0444,
4852 d_tracer, NULL, &ftrace_enabled_fops);
4853
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05004854 ftrace_create_filter_files(&global_ops, d_tracer);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04004855
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004856#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004857 trace_create_file("set_graph_function", 0444, d_tracer,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004858 NULL,
4859 &ftrace_graph_fops);
Namhyung Kim29ad23b2013-10-14 17:24:26 +09004860 trace_create_file("set_graph_notrace", 0444, d_tracer,
4861 NULL,
4862 &ftrace_graph_notrace_fops);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05004863#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4864
Steven Rostedt5072c592008-05-12 21:20:43 +02004865 return 0;
4866}
4867
Steven Rostedt9fd49322012-04-24 22:32:06 -04004868static int ftrace_cmp_ips(const void *a, const void *b)
Steven Rostedt68950612011-12-16 17:06:45 -05004869{
Steven Rostedt9fd49322012-04-24 22:32:06 -04004870 const unsigned long *ipa = a;
4871 const unsigned long *ipb = b;
Steven Rostedt68950612011-12-16 17:06:45 -05004872
Steven Rostedt9fd49322012-04-24 22:32:06 -04004873 if (*ipa > *ipb)
4874 return 1;
4875 if (*ipa < *ipb)
4876 return -1;
4877 return 0;
4878}
4879
Sami Tolvanen7bd125e2017-06-16 12:52:57 -07004880static int __norecordmcount ftrace_process_locs(struct module *mod,
4881 unsigned long *start,
4882 unsigned long *end)
Steven Rostedt68bf21a2008-08-14 15:45:08 -04004883{
Steven Rostedt706c81f2012-04-24 23:45:26 -04004884 struct ftrace_page *start_pg;
Steven Rostedta7900872011-12-16 16:23:44 -05004885 struct ftrace_page *pg;
Steven Rostedt706c81f2012-04-24 23:45:26 -04004886 struct dyn_ftrace *rec;
Steven Rostedta7900872011-12-16 16:23:44 -05004887 unsigned long count;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04004888 unsigned long *p;
4889 unsigned long addr;
Steven Rostedt4376cac2011-06-24 23:28:13 -04004890 unsigned long flags = 0; /* Shut up gcc */
Steven Rostedta7900872011-12-16 16:23:44 -05004891 int ret = -ENOMEM;
4892
4893 count = end - start;
4894
4895 if (!count)
4896 return 0;
4897
Steven Rostedt9fd49322012-04-24 22:32:06 -04004898 sort(start, count, sizeof(*start),
Rasmus Villemoes6db02902015-09-09 23:27:02 +02004899 ftrace_cmp_ips, NULL);
Steven Rostedt9fd49322012-04-24 22:32:06 -04004900
Steven Rostedt706c81f2012-04-24 23:45:26 -04004901 start_pg = ftrace_allocate_pages(count);
4902 if (!start_pg)
Steven Rostedta7900872011-12-16 16:23:44 -05004903 return -ENOMEM;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04004904
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004905 mutex_lock(&ftrace_lock);
Steven Rostedta7900872011-12-16 16:23:44 -05004906
Steven Rostedt32082302011-12-16 14:42:37 -05004907 /*
4908 * Core and each module needs their own pages, as
4909 * modules will free them when they are removed.
4910 * Force a new page to be allocated for modules.
4911 */
Steven Rostedta7900872011-12-16 16:23:44 -05004912 if (!mod) {
4913 WARN_ON(ftrace_pages || ftrace_pages_start);
4914 /* First initialization */
Steven Rostedt706c81f2012-04-24 23:45:26 -04004915 ftrace_pages = ftrace_pages_start = start_pg;
Steven Rostedta7900872011-12-16 16:23:44 -05004916 } else {
Steven Rostedt32082302011-12-16 14:42:37 -05004917 if (!ftrace_pages)
Steven Rostedta7900872011-12-16 16:23:44 -05004918 goto out;
Steven Rostedt32082302011-12-16 14:42:37 -05004919
Steven Rostedta7900872011-12-16 16:23:44 -05004920 if (WARN_ON(ftrace_pages->next)) {
4921 /* Hmm, we have free pages? */
4922 while (ftrace_pages->next)
4923 ftrace_pages = ftrace_pages->next;
Steven Rostedt32082302011-12-16 14:42:37 -05004924 }
Steven Rostedta7900872011-12-16 16:23:44 -05004925
Steven Rostedt706c81f2012-04-24 23:45:26 -04004926 ftrace_pages->next = start_pg;
Steven Rostedt32082302011-12-16 14:42:37 -05004927 }
4928
Steven Rostedt68bf21a2008-08-14 15:45:08 -04004929 p = start;
Steven Rostedt706c81f2012-04-24 23:45:26 -04004930 pg = start_pg;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04004931 while (p < end) {
4932 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08004933 /*
4934 * Some architecture linkers will pad between
4935 * the different mcount_loc sections of different
4936 * object files to satisfy alignments.
4937 * Skip any NULL pointers.
4938 */
4939 if (!addr)
4940 continue;
Steven Rostedt706c81f2012-04-24 23:45:26 -04004941
4942 if (pg->index == pg->size) {
4943 /* We should have allocated enough */
4944 if (WARN_ON(!pg->next))
4945 break;
4946 pg = pg->next;
4947 }
4948
4949 rec = &pg->records[pg->index++];
4950 rec->ip = addr;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04004951 }
4952
Steven Rostedt706c81f2012-04-24 23:45:26 -04004953 /* We should have used all pages */
4954 WARN_ON(pg->next);
4955
4956 /* Assign the last page to ftrace_pages */
4957 ftrace_pages = pg;
4958
Steven Rostedta4f18ed2011-06-07 09:26:46 -04004959 /*
Steven Rostedt4376cac2011-06-24 23:28:13 -04004960 * We only need to disable interrupts on start up
4961 * because we are modifying code that an interrupt
4962 * may execute, and the modification is not atomic.
4963 * But for modules, nothing runs the code we modify
4964 * until we are finished with it, and there's no
4965 * reason to cause large interrupt latencies while we do it.
Steven Rostedta4f18ed2011-06-07 09:26:46 -04004966 */
Steven Rostedt4376cac2011-06-24 23:28:13 -04004967 if (!mod)
4968 local_irq_save(flags);
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01004969 ftrace_update_code(mod, start_pg);
Steven Rostedt4376cac2011-06-24 23:28:13 -04004970 if (!mod)
4971 local_irq_restore(flags);
Steven Rostedta7900872011-12-16 16:23:44 -05004972 ret = 0;
4973 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004974 mutex_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04004975
Steven Rostedta7900872011-12-16 16:23:44 -05004976 return ret;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04004977}
4978
Steven Rostedt93eb6772009-04-15 13:24:06 -04004979#ifdef CONFIG_MODULES
Steven Rostedt32082302011-12-16 14:42:37 -05004980
4981#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4982
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05004983static int referenced_filters(struct dyn_ftrace *rec)
4984{
4985 struct ftrace_ops *ops;
4986 int cnt = 0;
4987
4988 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
4989 if (ops_references_rec(ops, rec))
4990 cnt++;
4991 }
4992
4993 return cnt;
4994}
4995
jolsa@redhat.come7247a12009-10-07 19:00:35 +02004996void ftrace_release_mod(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04004997{
4998 struct dyn_ftrace *rec;
Steven Rostedt32082302011-12-16 14:42:37 -05004999 struct ftrace_page **last_pg;
Steven Rostedt93eb6772009-04-15 13:24:06 -04005000 struct ftrace_page *pg;
Steven Rostedta7900872011-12-16 16:23:44 -05005001 int order;
Steven Rostedt93eb6772009-04-15 13:24:06 -04005002
Steven Rostedt93eb6772009-04-15 13:24:06 -04005003 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04005004
5005 if (ftrace_disabled)
5006 goto out_unlock;
5007
Steven Rostedt32082302011-12-16 14:42:37 -05005008 /*
5009 * Each module has its own ftrace_pages, remove
5010 * them from the list.
5011 */
5012 last_pg = &ftrace_pages_start;
5013 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
5014 rec = &pg->records[0];
jolsa@redhat.come7247a12009-10-07 19:00:35 +02005015 if (within_module_core(rec->ip, mod)) {
Steven Rostedt93eb6772009-04-15 13:24:06 -04005016 /*
Steven Rostedt32082302011-12-16 14:42:37 -05005017 * As core pages are first, the first
5018 * page should never be a module page.
Steven Rostedt93eb6772009-04-15 13:24:06 -04005019 */
Steven Rostedt32082302011-12-16 14:42:37 -05005020 if (WARN_ON(pg == ftrace_pages_start))
5021 goto out_unlock;
5022
5023 /* Check if we are deleting the last page */
5024 if (pg == ftrace_pages)
5025 ftrace_pages = next_to_ftrace_page(last_pg);
5026
5027 *last_pg = pg->next;
Steven Rostedta7900872011-12-16 16:23:44 -05005028 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5029 free_pages((unsigned long)pg->records, order);
5030 kfree(pg);
Steven Rostedt32082302011-12-16 14:42:37 -05005031 } else
5032 last_pg = &pg->next;
5033 }
Steven Rostedt45a4a232011-04-21 23:16:46 -04005034 out_unlock:
Steven Rostedt93eb6772009-04-15 13:24:06 -04005035 mutex_unlock(&ftrace_lock);
5036}
5037
Jessica Yu7dcd1822016-02-16 17:32:33 -05005038void ftrace_module_enable(struct module *mod)
Steven Rostedt (Red Hat)b7ffffb2016-01-07 15:40:01 -05005039{
5040 struct dyn_ftrace *rec;
5041 struct ftrace_page *pg;
5042
5043 mutex_lock(&ftrace_lock);
5044
5045 if (ftrace_disabled)
5046 goto out_unlock;
5047
5048 /*
5049 * If the tracing is enabled, go ahead and enable the record.
5050 *
5051 * The reason not to enable the record immediatelly is the
5052 * inherent check of ftrace_make_nop/ftrace_make_call for
5053 * correct previous instructions. Making first the NOP
5054 * conversion puts the module to the correct state, thus
5055 * passing the ftrace_make_call check.
5056 *
5057 * We also delay this to after the module code already set the
5058 * text to read-only, as we now need to set it back to read-write
5059 * so that we can modify the text.
5060 */
5061 if (ftrace_start_up)
5062 ftrace_arch_code_modify_prepare();
5063
5064 do_for_each_ftrace_rec(pg, rec) {
5065 int cnt;
5066 /*
5067 * do_for_each_ftrace_rec() is a double loop.
5068 * module text shares the pg. If a record is
5069 * not part of this module, then skip this pg,
5070 * which the "break" will do.
5071 */
5072 if (!within_module_core(rec->ip, mod))
5073 break;
5074
5075 cnt = 0;
5076
5077 /*
5078 * When adding a module, we need to check if tracers are
5079 * currently enabled and if they are, and can trace this record,
5080 * we need to enable the module functions as well as update the
5081 * reference counts for those function records.
5082 */
5083 if (ftrace_start_up)
5084 cnt += referenced_filters(rec);
5085
5086 /* This clears FTRACE_FL_DISABLED */
5087 rec->flags = cnt;
5088
5089 if (ftrace_start_up && cnt) {
5090 int failed = __ftrace_replace_code(rec, 1);
5091 if (failed) {
5092 ftrace_bug(failed, rec);
5093 goto out_loop;
5094 }
5095 }
5096
5097 } while_for_each_ftrace_rec();
5098
5099 out_loop:
5100 if (ftrace_start_up)
5101 ftrace_arch_code_modify_post_process();
5102
5103 out_unlock:
5104 mutex_unlock(&ftrace_lock);
5105}
5106
Steven Rostedt (Red Hat)a949ae52014-04-24 10:40:12 -04005107void ftrace_module_init(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04005108{
Steven Rostedt (Red Hat)97e9b4f2015-12-23 12:12:22 -05005109 if (ftrace_disabled || !mod->num_ftrace_callsites)
Abel Vesab6b71f62015-12-02 15:39:57 +01005110 return;
5111
Steven Rostedt (Red Hat)97e9b4f2015-12-23 12:12:22 -05005112 ftrace_process_locs(mod, mod->ftrace_callsites,
5113 mod->ftrace_callsites + mod->num_ftrace_callsites);
Steven Rostedt (Red Hat)8c189ea2013-02-13 15:18:38 -05005114}
Steven Rostedt93eb6772009-04-15 13:24:06 -04005115#endif /* CONFIG_MODULES */
5116
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005117void __init ftrace_init(void)
5118{
Jiri Slaby1dc43cf2014-02-24 19:59:56 +01005119 extern unsigned long __start_mcount_loc[];
5120 extern unsigned long __stop_mcount_loc[];
Jiri Slaby3a36cb12014-02-24 19:59:59 +01005121 unsigned long count, flags;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005122 int ret;
5123
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005124 local_irq_save(flags);
Jiri Slaby3a36cb12014-02-24 19:59:59 +01005125 ret = ftrace_dyn_arch_init();
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005126 local_irq_restore(flags);
Jiri Slabyaf64a7c2014-02-24 19:59:58 +01005127 if (ret)
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005128 goto failed;
5129
5130 count = __stop_mcount_loc - __start_mcount_loc;
Jiri Slabyc867ccd2014-02-24 19:59:57 +01005131 if (!count) {
5132 pr_info("ftrace: No functions to be traced?\n");
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005133 goto failed;
Jiri Slabyc867ccd2014-02-24 19:59:57 +01005134 }
5135
5136 pr_info("ftrace: allocating %ld entries in %ld pages\n",
5137 count, count / ENTRIES_PER_PAGE + 1);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005138
5139 last_ftrace_enabled = ftrace_enabled = 1;
5140
Jiri Olsa5cb084b2009-10-13 16:33:53 -04005141 ret = ftrace_process_locs(NULL,
Steven Rostedt31e88902008-11-14 16:21:19 -08005142 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005143 __stop_mcount_loc);
5144
Steven Rostedt2af15d62009-05-28 13:37:24 -04005145 set_ftrace_early_filters();
5146
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005147 return;
5148 failed:
5149 ftrace_disabled = 1;
5150}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04005151
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04005152/* Do nothing if arch does not support this */
5153void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
5154{
5155}
5156
5157static void ftrace_update_trampoline(struct ftrace_ops *ops)
5158{
Steven Rostedt (Red Hat)12cce592014-07-03 15:48:16 -04005159
5160/*
5161 * Currently there's no safe way to free a trampoline when the kernel
5162 * is configured with PREEMPT. That is because a task could be preempted
5163 * when it jumped to the trampoline, it may be preempted for a long time
5164 * depending on the system load, and currently there's no way to know
5165 * when it will be off the trampoline. If the trampoline is freed
5166 * too early, when the task runs again, it will be executing on freed
5167 * memory and crash.
5168 */
5169#ifdef CONFIG_PREEMPT
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04005170 /* Currently, only non dynamic ops can have a trampoline */
5171 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
5172 return;
Steven Rostedt (Red Hat)12cce592014-07-03 15:48:16 -04005173#endif
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04005174
5175 arch_ftrace_update_trampoline(ops);
5176}
5177
Steven Rostedt3d083392008-05-12 21:20:42 +02005178#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01005179
Steven Rostedt2b499382011-05-03 22:49:52 -04005180static struct ftrace_ops global_ops = {
Steven Rostedtbd69c302011-05-03 21:55:54 -04005181 .func = ftrace_stub,
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -04005182 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5183 FTRACE_OPS_FL_INITIALIZED |
5184 FTRACE_OPS_FL_PID,
Steven Rostedtbd69c302011-05-03 21:55:54 -04005185};
5186
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01005187static int __init ftrace_nodyn_init(void)
5188{
5189 ftrace_enabled = 1;
5190 return 0;
5191}
Steven Rostedt6f415672012-10-05 12:13:07 -04005192core_initcall(ftrace_nodyn_init);
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01005193
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05005194static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005195static inline void ftrace_startup_enable(int command) { }
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04005196static inline void ftrace_startup_all(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05005197/* Keep as macros so we do not need to define the commands */
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05005198# define ftrace_startup(ops, command) \
5199 ({ \
5200 int ___ret = __register_ftrace_function(ops); \
5201 if (!___ret) \
5202 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
5203 ___ret; \
Steven Rostedt3b6cfdb2011-05-23 15:33:49 -04005204 })
Steven Rostedt (Red Hat)1fcc1552014-02-19 15:12:18 -05005205# define ftrace_shutdown(ops, command) \
5206 ({ \
5207 int ___ret = __unregister_ftrace_function(ops); \
5208 if (!___ret) \
5209 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
5210 ___ret; \
5211 })
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05005212
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005213# define ftrace_startup_sysctl() do { } while (0)
5214# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedtb8489142011-05-04 09:27:52 -04005215
5216static inline int
Steven Rostedt (Red Hat)195a8af2013-07-23 22:06:15 -04005217ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
Steven Rostedtb8489142011-05-04 09:27:52 -04005218{
5219 return 1;
5220}
5221
Steven Rostedt (Red Hat)f3bea492014-07-02 23:23:31 -04005222static void ftrace_update_trampoline(struct ftrace_ops *ops)
5223{
5224}
5225
Steven Rostedt3d083392008-05-12 21:20:42 +02005226#endif /* CONFIG_DYNAMIC_FTRACE */
5227
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05005228__init void ftrace_init_global_array_ops(struct trace_array *tr)
5229{
5230 tr->ops = &global_ops;
5231 tr->ops->private = tr;
5232}
5233
5234void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5235{
5236 /* If we filter on pids, update to use the pid function */
5237 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
5238 if (WARN_ON(tr->ops->func != ftrace_stub))
5239 printk("ftrace ops had %pS for function\n",
5240 tr->ops->func);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05005241 }
5242 tr->ops->func = func;
5243 tr->ops->private = tr;
5244}
5245
5246void ftrace_reset_array_ops(struct trace_array *tr)
5247{
5248 tr->ops->func = ftrace_stub;
5249}
5250
Masami Hiramatsu25f467d2019-02-24 01:50:20 +09005251static nokprobe_inline void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04005252__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04005253 struct ftrace_ops *ignored, struct pt_regs *regs)
Steven Rostedtb8489142011-05-04 09:27:52 -04005254{
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04005255 struct ftrace_ops *op;
Steven Rostedtedc15ca2012-11-02 17:47:21 -04005256 int bit;
Steven Rostedtb8489142011-05-04 09:27:52 -04005257
Steven Rostedtedc15ca2012-11-02 17:47:21 -04005258 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5259 if (bit < 0)
5260 return;
Steven Rostedtc29f1222012-11-02 17:17:59 -04005261
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04005262 /*
5263 * Some of the ops may be dynamically allocated,
5264 * they must be freed after a synchronize_sched().
5265 */
5266 preempt_disable_notrace();
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05005267
Steven Rostedt0a016402012-11-02 17:03:03 -04005268 do_for_each_ftrace_op(op, ftrace_ops_list) {
Steven Rostedt (Red Hat)ba27f2b2015-11-30 17:23:39 -05005269 /*
5270 * Check the following for each ops before calling their func:
5271 * if RCU flag is set, then rcu_is_watching() must be true
5272 * if PER_CPU is set, then ftrace_function_local_disable()
5273 * must be false
5274 * Otherwise test if the ip matches the ops filter
5275 *
5276 * If any of the above fails then the op->func() is not executed.
5277 */
5278 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
5279 (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
5280 !ftrace_function_local_disabled(op)) &&
5281 ftrace_ops_test(op, ip, regs)) {
5282
Steven Rostedt (Red Hat)1d48d592014-06-25 11:54:03 -04005283 if (FTRACE_WARN_ON(!op->func)) {
5284 pr_warn("op=%p %pS\n", op, op);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05005285 goto out;
5286 }
Steven Rostedta1e2e312011-08-09 12:50:46 -04005287 op->func(ip, parent_ip, op, regs);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05005288 }
Steven Rostedt0a016402012-11-02 17:03:03 -04005289 } while_for_each_ftrace_op(op);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05005290out:
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04005291 preempt_enable_notrace();
Steven Rostedtedc15ca2012-11-02 17:47:21 -04005292 trace_clear_recursion(bit);
Steven Rostedtb8489142011-05-04 09:27:52 -04005293}
5294
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04005295/*
5296 * Some archs only support passing ip and parent_ip. Even though
5297 * the list function ignores the op parameter, we do not want any
5298 * C side effects, where a function is called without the caller
5299 * sending a third parameter.
Steven Rostedta1e2e312011-08-09 12:50:46 -04005300 * Archs are to support both the regs and ftrace_ops at the same time.
5301 * If they support ftrace_ops, it is assumed they support regs.
5302 * If call backs want to use regs, they must either check for regs
Masami Hiramatsu06aeaae2012-09-28 17:15:17 +09005303 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5304 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
Steven Rostedta1e2e312011-08-09 12:50:46 -04005305 * An architecture can pass partial regs with ftrace_ops and still
Li Binb8ec3302015-11-30 18:23:36 +08005306 * set the ARCH_SUPPORTS_FTRACE_OPS.
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04005307 */
5308#if ARCH_SUPPORTS_FTRACE_OPS
5309static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04005310 struct ftrace_ops *op, struct pt_regs *regs)
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04005311{
Steven Rostedta1e2e312011-08-09 12:50:46 -04005312 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04005313}
Masami Hiramatsu25f467d2019-02-24 01:50:20 +09005314NOKPROBE_SYMBOL(ftrace_ops_list_func);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04005315#else
Sami Tolvanenc2f9bce2018-05-10 14:56:41 -07005316static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip,
5317 struct ftrace_ops *op, struct pt_regs *regs)
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04005318{
Steven Rostedta1e2e312011-08-09 12:50:46 -04005319 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04005320}
Masami Hiramatsu25f467d2019-02-24 01:50:20 +09005321NOKPROBE_SYMBOL(ftrace_ops_no_ops);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04005322#endif
5323
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04005324/*
5325 * If there's only one function registered but it does not support
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05005326 * recursion, needs RCU protection and/or requires per cpu handling, then
5327 * this function will be called by the mcount trampoline.
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04005328 */
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05005329static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04005330 struct ftrace_ops *op, struct pt_regs *regs)
5331{
5332 int bit;
5333
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05005334 if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
5335 return;
5336
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04005337 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5338 if (bit < 0)
5339 return;
5340
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05005341 preempt_disable_notrace();
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04005342
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05005343 if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
5344 !ftrace_function_local_disabled(op)) {
5345 op->func(ip, parent_ip, op, regs);
5346 }
5347
5348 preempt_enable_notrace();
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04005349 trace_clear_recursion(bit);
5350}
Masami Hiramatsu25f467d2019-02-24 01:50:20 +09005351NOKPROBE_SYMBOL(ftrace_ops_assist_func);
Steven Rostedt (Red Hat)f1ff6342014-07-22 20:16:57 -04005352
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04005353/**
5354 * ftrace_ops_get_func - get the function a trampoline should call
5355 * @ops: the ops to get the function for
5356 *
5357 * Normally the mcount trampoline will call the ops->func, but there
5358 * are times that it should not. For example, if the ops does not
5359 * have its own recursion protection, then it should call the
5360 * ftrace_ops_recurs_func() instead.
5361 *
5362 * Returns the function that the trampoline should call for @ops.
5363 */
5364ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
5365{
5366 /*
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05005367 * If the function does not handle recursion, needs to be RCU safe,
5368 * or does per cpu logic, then we need to call the assist handler.
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04005369 */
Steven Rostedt (Red Hat)c68c0fa2015-12-01 13:28:16 -05005370 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
5371 ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU))
5372 return ftrace_ops_assist_func;
Steven Rostedt (Red Hat)87354052014-07-22 20:41:42 -04005373
5374 return ops->func;
5375}
5376
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005377static void
5378ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
5379 struct task_struct *prev, struct task_struct *next)
Steven Rostedte32d8952008-12-04 00:26:41 -05005380{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005381 struct trace_array *tr = data;
5382 struct trace_pid_list *pid_list;
5383
5384 pid_list = rcu_dereference_sched(tr->function_pids);
5385
5386 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
5387 trace_ignore_this_task(pid_list, next));
5388}
5389
5390static void clear_ftrace_pids(struct trace_array *tr)
5391{
5392 struct trace_pid_list *pid_list;
Steven Rostedte32d8952008-12-04 00:26:41 -05005393 int cpu;
5394
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005395 pid_list = rcu_dereference_protected(tr->function_pids,
5396 lockdep_is_held(&ftrace_lock));
5397 if (!pid_list)
5398 return;
5399
5400 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
5401
5402 for_each_possible_cpu(cpu)
5403 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
5404
5405 rcu_assign_pointer(tr->function_pids, NULL);
5406
5407 /* Wait till all users are no longer using pid filtering */
5408 synchronize_sched();
5409
5410 trace_free_pid_list(pid_list);
Steven Rostedte32d8952008-12-04 00:26:41 -05005411}
5412
Namhyung Kim7da0f8e2017-04-17 11:44:27 +09005413void ftrace_clear_pids(struct trace_array *tr)
5414{
5415 mutex_lock(&ftrace_lock);
5416
5417 clear_ftrace_pids(tr);
5418
5419 mutex_unlock(&ftrace_lock);
5420}
5421
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005422static void ftrace_pid_reset(struct trace_array *tr)
Steven Rostedte32d8952008-12-04 00:26:41 -05005423{
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005424 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005425 clear_ftrace_pids(tr);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005426
5427 ftrace_update_pid_func();
Steven Rostedt (Red Hat)e1effa02014-08-05 17:19:38 -04005428 ftrace_startup_all(0);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005429
5430 mutex_unlock(&ftrace_lock);
5431}
5432
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005433/* Greater than any max PID */
5434#define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
5435
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005436static void *fpid_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005437 __acquires(RCU)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005438{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005439 struct trace_pid_list *pid_list;
5440 struct trace_array *tr = m->private;
5441
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005442 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005443 rcu_read_lock_sched();
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005444
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005445 pid_list = rcu_dereference_sched(tr->function_pids);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005446
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005447 if (!pid_list)
5448 return !(*pos) ? FTRACE_NO_PIDS : NULL;
5449
5450 return trace_pid_start(pid_list, pos);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005451}
5452
5453static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5454{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005455 struct trace_array *tr = m->private;
5456 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
5457
5458 if (v == FTRACE_NO_PIDS)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005459 return NULL;
5460
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005461 return trace_pid_next(pid_list, v, pos);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005462}
5463
5464static void fpid_stop(struct seq_file *m, void *p)
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005465 __releases(RCU)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005466{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005467 rcu_read_unlock_sched();
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005468 mutex_unlock(&ftrace_lock);
5469}
5470
5471static int fpid_show(struct seq_file *m, void *v)
5472{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005473 if (v == FTRACE_NO_PIDS) {
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005474 seq_puts(m, "no pid\n");
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005475 return 0;
5476 }
5477
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005478 return trace_pid_show(m, v);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005479}
5480
5481static const struct seq_operations ftrace_pid_sops = {
5482 .start = fpid_start,
5483 .next = fpid_next,
5484 .stop = fpid_stop,
5485 .show = fpid_show,
5486};
5487
5488static int
5489ftrace_pid_open(struct inode *inode, struct file *file)
5490{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005491 struct trace_array *tr = inode->i_private;
5492 struct seq_file *m;
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005493 int ret = 0;
5494
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005495 if (trace_array_get(tr) < 0)
5496 return -ENODEV;
5497
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005498 if ((file->f_mode & FMODE_WRITE) &&
5499 (file->f_flags & O_TRUNC))
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005500 ftrace_pid_reset(tr);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005501
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005502 ret = seq_open(file, &ftrace_pid_sops);
5503 if (ret < 0) {
5504 trace_array_put(tr);
5505 } else {
5506 m = file->private_data;
5507 /* copy tr over to seq ops */
5508 m->private = tr;
5509 }
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005510
5511 return ret;
5512}
5513
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005514static void ignore_task_cpu(void *data)
5515{
5516 struct trace_array *tr = data;
5517 struct trace_pid_list *pid_list;
5518
5519 /*
5520 * This function is called by on_each_cpu() while the
5521 * event_mutex is held.
5522 */
5523 pid_list = rcu_dereference_protected(tr->function_pids,
5524 mutex_is_locked(&ftrace_lock));
5525
5526 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
5527 trace_ignore_this_task(pid_list, current));
5528}
5529
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005530static ssize_t
5531ftrace_pid_write(struct file *filp, const char __user *ubuf,
5532 size_t cnt, loff_t *ppos)
5533{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005534 struct seq_file *m = filp->private_data;
5535 struct trace_array *tr = m->private;
5536 struct trace_pid_list *filtered_pids = NULL;
5537 struct trace_pid_list *pid_list;
5538 ssize_t ret;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005539
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005540 if (!cnt)
5541 return 0;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005542
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005543 mutex_lock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005544
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005545 filtered_pids = rcu_dereference_protected(tr->function_pids,
5546 lockdep_is_held(&ftrace_lock));
5547
5548 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
5549 if (ret < 0)
5550 goto out;
5551
5552 rcu_assign_pointer(tr->function_pids, pid_list);
5553
5554 if (filtered_pids) {
5555 synchronize_sched();
5556 trace_free_pid_list(filtered_pids);
5557 } else if (pid_list) {
5558 /* Register a probe to set whether to ignore the tracing of a task */
5559 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
5560 }
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005561
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005562 /*
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005563 * Ignoring of pids is done at task switch. But we have to
5564 * check for those tasks that are currently running.
5565 * Always do this in case a pid was appended or removed.
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005566 */
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005567 on_each_cpu(ignore_task_cpu, tr, 1);
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005568
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005569 ftrace_update_pid_func();
5570 ftrace_startup_all(0);
5571 out:
5572 mutex_unlock(&ftrace_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005573
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005574 if (ret > 0)
5575 *ppos += ret;
Steven Rostedt978f3a42008-12-04 00:26:40 -05005576
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005577 return ret;
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005578}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005579
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005580static int
5581ftrace_pid_release(struct inode *inode, struct file *file)
5582{
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005583 struct trace_array *tr = inode->i_private;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005584
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005585 trace_array_put(tr);
5586
5587 return seq_release(inode, file);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005588}
5589
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005590static const struct file_operations ftrace_pid_fops = {
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005591 .open = ftrace_pid_open,
5592 .write = ftrace_pid_write,
5593 .read = seq_read,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005594 .llseek = tracing_lseek,
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04005595 .release = ftrace_pid_release,
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005596};
5597
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005598void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005599{
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005600 trace_create_file("set_ftrace_pid", 0644, d_tracer,
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04005601 tr, &ftrace_pid_fops);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005602}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05005603
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04005604void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
5605 struct dentry *d_tracer)
5606{
5607 /* Only the top level directory has the dyn_tracefs and profile */
5608 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
5609
5610 ftrace_init_dyn_tracefs(d_tracer);
5611 ftrace_profile_tracefs(d_tracer);
5612}
5613
Steven Rostedt3d083392008-05-12 21:20:42 +02005614/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04005615 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04005616 *
5617 * This function should be used by panic code. It stops ftrace
5618 * but in a not so nice way. If you need to simply kill ftrace
5619 * from a non-atomic section, use ftrace_kill.
5620 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04005621void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04005622{
5623 ftrace_disabled = 1;
5624 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04005625 clear_ftrace_function();
5626}
5627
5628/**
Steven Rostedte0a413f2011-09-29 21:26:16 -04005629 * Test if ftrace is dead or not.
5630 */
5631int ftrace_is_dead(void)
5632{
5633 return ftrace_disabled;
5634}
5635
5636/**
Steven Rostedt3d083392008-05-12 21:20:42 +02005637 * register_ftrace_function - register a function for profiling
5638 * @ops - ops structure that holds the function for profiling.
5639 *
5640 * Register a function to be called by all functions in the
5641 * kernel.
5642 *
5643 * Note: @ops->func and all the functions it calls must be labeled
5644 * with "notrace", otherwise it will go into a
5645 * recursive loop.
5646 */
5647int register_ftrace_function(struct ftrace_ops *ops)
5648{
Steven Rostedt45a4a232011-04-21 23:16:46 -04005649 int ret = -1;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02005650
Masami Hiramatsuf04f24fb2013-05-09 14:44:17 +09005651 ftrace_ops_init(ops);
5652
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005653 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01005654
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05005655 ret = ftrace_startup(ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04005656
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005657 mutex_unlock(&ftrace_lock);
Borislav Petkov8d240dd2012-03-29 19:11:40 +02005658
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005659 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02005660}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04005661EXPORT_SYMBOL_GPL(register_ftrace_function);
Steven Rostedt3d083392008-05-12 21:20:42 +02005662
5663/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01005664 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02005665 * @ops - ops structure that holds the function to unregister
5666 *
5667 * Unregister a function that was added to be called by ftrace profiling.
5668 */
5669int unregister_ftrace_function(struct ftrace_ops *ops)
5670{
5671 int ret;
5672
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005673 mutex_lock(&ftrace_lock);
Steven Rostedt (Red Hat)8a56d772013-11-25 20:59:46 -05005674 ret = ftrace_shutdown(ops, 0);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005675 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005676
5677 return ret;
5678}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04005679EXPORT_SYMBOL_GPL(unregister_ftrace_function);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005680
Ingo Molnare309b412008-05-12 21:20:51 +02005681int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005682ftrace_enable_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07005683 void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005684 loff_t *ppos)
5685{
Steven Rostedt45a4a232011-04-21 23:16:46 -04005686 int ret = -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02005687
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005688 mutex_lock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005689
Steven Rostedt45a4a232011-04-21 23:16:46 -04005690 if (unlikely(ftrace_disabled))
5691 goto out;
5692
5693 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005694
Li Zefana32c7762009-06-26 16:55:51 +08005695 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005696 goto out;
5697
Li Zefana32c7762009-06-26 16:55:51 +08005698 last_ftrace_enabled = !!ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005699
5700 if (ftrace_enabled) {
5701
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005702 /* we are starting ftrace again */
Jan Kiszka5000c412013-03-26 17:53:03 +01005703 if (ftrace_ops_list != &ftrace_list_end)
5704 update_ftrace_function();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005705
Steven Rostedt (Red Hat)524a3862015-03-06 19:55:13 -05005706 ftrace_startup_sysctl();
5707
Steven Rostedtb0fc4942008-05-12 21:20:43 +02005708 } else {
5709 /* stopping ftrace calls (just send to ftrace_stub) */
5710 ftrace_trace_function = ftrace_stub;
5711
5712 ftrace_shutdown_sysctl();
5713 }
5714
5715 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005716 mutex_unlock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02005717 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02005718}
Ingo Molnarf17845e2008-10-24 12:47:10 +02005719
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01005720#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01005721
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04005722static struct ftrace_ops graph_ops = {
5723 .func = ftrace_stub,
5724 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5725 FTRACE_OPS_FL_INITIALIZED |
Steven Rostedt (Red Hat)e3eea142015-07-24 10:38:12 -04005726 FTRACE_OPS_FL_PID |
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04005727 FTRACE_OPS_FL_STUB,
5728#ifdef FTRACE_GRAPH_TRAMP_ADDR
5729 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -05005730 /* trampoline_size is only needed for dynamically allocated tramps */
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04005731#endif
5732 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5733};
5734
Steven Rostedt (Red Hat)55577202015-09-29 19:06:50 -04005735void ftrace_graph_sleep_time_control(bool enable)
5736{
5737 fgraph_sleep_time = enable;
5738}
5739
5740void ftrace_graph_graph_time_control(bool enable)
5741{
5742 fgraph_graph_time = enable;
5743}
5744
Sami Tolvanenc2f9bce2018-05-10 14:56:41 -07005745void ftrace_graph_return_stub(struct ftrace_graph_ret *trace)
5746{
5747}
5748
Steven Rostedte49dc192008-12-02 23:50:05 -05005749int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5750{
5751 return 0;
5752}
5753
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01005754/* The callbacks that hook a function */
Sami Tolvanenc2f9bce2018-05-10 14:56:41 -07005755trace_func_graph_ret_t ftrace_graph_return = ftrace_graph_return_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05005756trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05005757static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005758
5759/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5760static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5761{
5762 int i;
5763 int ret = 0;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005764 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5765 struct task_struct *g, *t;
5766
5767 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5768 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5769 * sizeof(struct ftrace_ret_stack),
5770 GFP_KERNEL);
5771 if (!ret_stack_list[i]) {
5772 start = 0;
5773 end = i;
5774 ret = -ENOMEM;
5775 goto free;
5776 }
5777 }
5778
Soumya PN6112a302016-05-17 21:31:14 +05305779 read_lock(&tasklist_lock);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005780 do_each_thread(g, t) {
5781 if (start == end) {
5782 ret = -EAGAIN;
5783 goto unlock;
5784 }
5785
5786 if (t->ret_stack == NULL) {
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01005787 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005788 atomic_set(&t->trace_overrun, 0);
Steven Rostedt26c01622009-06-02 14:01:19 -04005789 t->curr_ret_stack = -1;
5790 /* Make sure the tasks see the -1 first: */
5791 smp_wmb();
5792 t->ret_stack = ret_stack_list[start++];
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005793 }
5794 } while_each_thread(g, t);
5795
5796unlock:
Soumya PN6112a302016-05-17 21:31:14 +05305797 read_unlock(&tasklist_lock);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005798free:
5799 for (i = start; i < end; i++)
5800 kfree(ret_stack_list[i]);
5801 return ret;
5802}
5803
Steven Rostedt8aef2d22009-03-24 01:10:15 -04005804static void
Peter Zijlstrac73464b2015-09-28 18:06:56 +02005805ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
Steven Rostedt38516ab2010-04-20 17:04:50 -04005806 struct task_struct *prev, struct task_struct *next)
Steven Rostedt8aef2d22009-03-24 01:10:15 -04005807{
5808 unsigned long long timestamp;
5809 int index;
5810
Steven Rostedtbe6f1642009-03-24 11:06:24 -04005811 /*
5812 * Does the user want to count the time a function was asleep.
5813 * If so, do not update the time stamps.
5814 */
Steven Rostedt (Red Hat)55577202015-09-29 19:06:50 -04005815 if (fgraph_sleep_time)
Steven Rostedtbe6f1642009-03-24 11:06:24 -04005816 return;
5817
Steven Rostedt8aef2d22009-03-24 01:10:15 -04005818 timestamp = trace_clock_local();
5819
5820 prev->ftrace_timestamp = timestamp;
5821
5822 /* only process tasks that we timestamped */
5823 if (!next->ftrace_timestamp)
5824 return;
5825
5826 /*
5827 * Update all the counters in next to make up for the
5828 * time next was sleeping.
5829 */
5830 timestamp -= next->ftrace_timestamp;
5831
5832 for (index = next->curr_ret_stack; index >= 0; index--)
5833 next->ret_stack[index].calltime += timestamp;
5834}
5835
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005836/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01005837static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005838{
5839 struct ftrace_ret_stack **ret_stack_list;
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01005840 int ret, cpu;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005841
5842 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5843 sizeof(struct ftrace_ret_stack *),
5844 GFP_KERNEL);
5845
5846 if (!ret_stack_list)
5847 return -ENOMEM;
5848
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01005849 /* The cpu_boot init_task->ret_stack will never be freed */
Steven Rostedt179c4982009-06-02 12:03:19 -04005850 for_each_online_cpu(cpu) {
5851 if (!idle_task(cpu)->ret_stack)
Steven Rostedt868baf02011-02-10 21:26:13 -05005852 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
Steven Rostedt179c4982009-06-02 12:03:19 -04005853 }
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01005854
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005855 do {
5856 ret = alloc_retstack_tasklist(ret_stack_list);
5857 } while (ret == -EAGAIN);
5858
Steven Rostedt8aef2d22009-03-24 01:10:15 -04005859 if (!ret) {
Steven Rostedt38516ab2010-04-20 17:04:50 -04005860 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Steven Rostedt8aef2d22009-03-24 01:10:15 -04005861 if (ret)
5862 pr_info("ftrace_graph: Couldn't activate tracepoint"
5863 " probe to kernel_sched_switch\n");
5864 }
5865
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005866 kfree(ret_stack_list);
5867 return ret;
5868}
5869
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08005870/*
5871 * Hibernation protection.
5872 * The state of the current task is too much unstable during
5873 * suspend/restore to disk. We want to protect against that.
5874 */
5875static int
5876ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5877 void *unused)
5878{
5879 switch (state) {
5880 case PM_HIBERNATION_PREPARE:
5881 pause_graph_tracing();
5882 break;
5883
5884 case PM_POST_HIBERNATION:
5885 unpause_graph_tracing();
5886 break;
5887 }
5888 return NOTIFY_DONE;
5889}
5890
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05005891static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5892{
5893 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5894 return 0;
5895 return __ftrace_graph_entry(trace);
5896}
5897
5898/*
5899 * The function graph tracer should only trace the functions defined
5900 * by set_ftrace_filter and set_ftrace_notrace. If another function
5901 * tracer ops is registered, the graph tracer requires testing the
5902 * function against the global ops, and not just trace any function
5903 * that any ftrace_ops registered.
5904 */
5905static void update_function_graph_func(void)
5906{
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04005907 struct ftrace_ops *op;
5908 bool do_test = false;
5909
5910 /*
5911 * The graph and global ops share the same set of functions
5912 * to test. If any other ops is on the list, then
5913 * the graph tracing needs to test if its the function
5914 * it should call.
5915 */
5916 do_for_each_ftrace_op(op, ftrace_ops_list) {
5917 if (op != &global_ops && op != &graph_ops &&
5918 op != &ftrace_list_end) {
5919 do_test = true;
5920 /* in double loop, break out with goto */
5921 goto out;
5922 }
5923 } while_for_each_ftrace_op(op);
5924 out:
5925 if (do_test)
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05005926 ftrace_graph_entry = ftrace_graph_entry_test;
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04005927 else
5928 ftrace_graph_entry = __ftrace_graph_entry;
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05005929}
5930
Mathias Krause8275f692014-03-30 15:31:50 +02005931static struct notifier_block ftrace_suspend_notifier = {
5932 .notifier_call = ftrace_suspend_notifier_call,
5933};
5934
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01005935int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5936 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01005937{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01005938 int ret = 0;
5939
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005940 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01005941
Steven Rostedt05ce5812009-03-24 00:18:31 -04005942 /* we currently allow only one tracer registered at a time */
Steven Rostedt597af812009-04-03 15:24:12 -04005943 if (ftrace_graph_active) {
Steven Rostedt05ce5812009-03-24 00:18:31 -04005944 ret = -EBUSY;
5945 goto out;
5946 }
5947
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08005948 register_pm_notifier(&ftrace_suspend_notifier);
5949
Steven Rostedt597af812009-04-03 15:24:12 -04005950 ftrace_graph_active++;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01005951 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005952 if (ret) {
Steven Rostedt597af812009-04-03 15:24:12 -04005953 ftrace_graph_active--;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005954 goto out;
5955 }
Steven Rostedte53a6312008-11-26 00:16:25 -05005956
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01005957 ftrace_graph_return = retfunc;
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05005958
5959 /*
5960 * Update the indirect function to the entryfunc, and the
5961 * function that gets called to the entry_test first. Then
5962 * call the update fgraph entry function to determine if
5963 * the entryfunc should be called directly or not.
5964 */
5965 __ftrace_graph_entry = entryfunc;
5966 ftrace_graph_entry = ftrace_graph_entry_test;
5967 update_function_graph_func();
Steven Rostedte53a6312008-11-26 00:16:25 -05005968
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04005969 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01005970out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005971 mutex_unlock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01005972 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01005973}
5974
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01005975void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01005976{
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005977 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01005978
Steven Rostedt597af812009-04-03 15:24:12 -04005979 if (unlikely(!ftrace_graph_active))
Steven Rostedt2aad1b72009-03-30 11:11:28 -04005980 goto out;
5981
Steven Rostedt597af812009-04-03 15:24:12 -04005982 ftrace_graph_active--;
Sami Tolvanenc2f9bce2018-05-10 14:56:41 -07005983 ftrace_graph_return = ftrace_graph_return_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05005984 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt (Red Hat)23a8e842014-01-13 10:30:23 -05005985 __ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt (Red Hat)5f151b22014-08-15 17:18:46 -04005986 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08005987 unregister_pm_notifier(&ftrace_suspend_notifier);
Steven Rostedt38516ab2010-04-20 17:04:50 -04005988 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01005989
Steven Rostedt2aad1b72009-03-30 11:11:28 -04005990 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05005991 mutex_unlock(&ftrace_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01005992}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01005993
Steven Rostedt868baf02011-02-10 21:26:13 -05005994static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5995
5996static void
5997graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5998{
5999 atomic_set(&t->tracing_graph_pause, 0);
6000 atomic_set(&t->trace_overrun, 0);
6001 t->ftrace_timestamp = 0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006002 /* make curr_ret_stack visible before we add the ret_stack */
Steven Rostedt868baf02011-02-10 21:26:13 -05006003 smp_wmb();
6004 t->ret_stack = ret_stack;
6005}
6006
6007/*
6008 * Allocate a return stack for the idle task. May be the first
6009 * time through, or it may be done by CPU hotplug online.
6010 */
6011void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
6012{
6013 t->curr_ret_stack = -1;
6014 /*
6015 * The idle task has no parent, it either has its own
6016 * stack or no stack at all.
6017 */
6018 if (t->ret_stack)
6019 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
6020
6021 if (ftrace_graph_active) {
6022 struct ftrace_ret_stack *ret_stack;
6023
6024 ret_stack = per_cpu(idle_ret_stack, cpu);
6025 if (!ret_stack) {
6026 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
6027 * sizeof(struct ftrace_ret_stack),
6028 GFP_KERNEL);
6029 if (!ret_stack)
6030 return;
6031 per_cpu(idle_ret_stack, cpu) = ret_stack;
6032 }
6033 graph_init_task(t, ret_stack);
6034 }
6035}
6036
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006037/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01006038void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006039{
Steven Rostedt84047e32009-06-02 16:51:55 -04006040 /* Make sure we do not use the parent ret_stack */
6041 t->ret_stack = NULL;
Steven Rostedtea14eb72010-03-12 19:41:23 -05006042 t->curr_ret_stack = -1;
Steven Rostedt84047e32009-06-02 16:51:55 -04006043
Steven Rostedt597af812009-04-03 15:24:12 -04006044 if (ftrace_graph_active) {
Steven Rostedt82310a32009-06-02 12:26:07 -04006045 struct ftrace_ret_stack *ret_stack;
6046
6047 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006048 * sizeof(struct ftrace_ret_stack),
6049 GFP_KERNEL);
Steven Rostedt82310a32009-06-02 12:26:07 -04006050 if (!ret_stack)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006051 return;
Steven Rostedt868baf02011-02-10 21:26:13 -05006052 graph_init_task(t, ret_stack);
Steven Rostedt84047e32009-06-02 16:51:55 -04006053 }
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006054}
6055
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01006056void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006057{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01006058 struct ftrace_ret_stack *ret_stack = t->ret_stack;
6059
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006060 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01006061 /* NULL must become visible to IRQs before we free it: */
6062 barrier();
6063
6064 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01006065}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01006066#endif