blob: ffa1a0b6509e8e8d87ca498c212ba75159924bd8 [file] [log] [blame]
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
Steven Rostedt981d0812009-03-02 13:53:59 -05006 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
Steven Rostedtb77e38a2009-02-24 10:21:36 -05009 */
10
Fabian Frederick3448bac2014-06-07 13:43:08 +020011#define pr_fmt(fmt) fmt
12
Steven Rostedte6187002009-04-15 13:36:40 -040013#include <linux/workqueue.h>
14#include <linux/spinlock.h>
15#include <linux/kthread.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050016#include <linux/tracefs.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050017#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/ctype.h>
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -040020#include <linux/sort.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Steven Rostedte6187002009-04-15 13:36:40 -040022#include <linux/delay.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050023
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -040024#include <trace/events/sched.h>
25
Li Zefan020e5f82009-07-01 10:47:05 +080026#include <asm/setup.h>
27
Steven Rostedt91729ef92009-03-02 15:03:01 -050028#include "trace_output.h"
Steven Rostedtb77e38a2009-02-24 10:21:36 -050029
Steven Rostedt4e5292e2009-09-12 19:26:21 -040030#undef TRACE_SYSTEM
Steven Rostedtb628b3e2009-02-27 23:32:58 -050031#define TRACE_SYSTEM "TRACE_SYSTEM"
32
Li Zefan20c89282009-05-06 10:33:45 +080033DEFINE_MUTEX(event_mutex);
Steven Rostedt11a241a2009-03-02 11:49:04 -050034
Steven Rostedta59fd602009-04-10 13:52:20 -040035LIST_HEAD(ftrace_events);
Daniel Wagner9f616682015-08-10 14:35:46 +020036static LIST_HEAD(ftrace_generic_fields);
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080037static LIST_HEAD(ftrace_common_fields);
Steven Rostedta59fd602009-04-10 13:52:20 -040038
Steven Rostedtd1a29142013-02-27 20:23:57 -050039#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41static struct kmem_cache *field_cachep;
42static struct kmem_cache *file_cachep;
43
Steven Rostedt6e94a782013-06-27 10:58:31 -040044static inline int system_refcount(struct event_subsystem *system)
45{
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +020046 return system->ref_count;
Steven Rostedt6e94a782013-06-27 10:58:31 -040047}
48
49static int system_refcount_inc(struct event_subsystem *system)
50{
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +020051 return system->ref_count++;
Steven Rostedt6e94a782013-06-27 10:58:31 -040052}
53
54static int system_refcount_dec(struct event_subsystem *system)
55{
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +020056 return --system->ref_count;
Steven Rostedt6e94a782013-06-27 10:58:31 -040057}
58
Steven Rostedtae63b312012-05-03 23:09:03 -040059/* Double loops, do not use break, only goto's work */
60#define do_for_each_event_file(tr, file) \
61 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
62 list_for_each_entry(file, &tr->events, list)
63
64#define do_for_each_event_file_safe(tr, file) \
65 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -040066 struct trace_event_file *___n; \
Steven Rostedtae63b312012-05-03 23:09:03 -040067 list_for_each_entry_safe(file, ___n, &tr->events, list)
68
69#define while_for_each_event_file() \
70 }
71
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080072static struct list_head *
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040073trace_get_fields(struct trace_event_call *event_call)
Steven Rostedt2e33af02010-04-22 10:35:55 -040074{
75 if (!event_call->class->get_fields)
76 return &event_call->class->fields;
77 return event_call->class->get_fields(event_call);
78}
79
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080080static struct ftrace_event_field *
81__find_event_field(struct list_head *head, char *name)
82{
83 struct ftrace_event_field *field;
84
85 list_for_each_entry(field, head, link) {
86 if (!strcmp(field->name, name))
87 return field;
88 }
89
90 return NULL;
91}
92
93struct ftrace_event_field *
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040094trace_find_event_field(struct trace_event_call *call, char *name)
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080095{
96 struct ftrace_event_field *field;
97 struct list_head *head;
98
Steven Rostedt (Red Hat)e57cbaf2016-03-03 17:18:20 -050099 head = trace_get_fields(call);
100 field = __find_event_field(head, name);
101 if (field)
102 return field;
103
Daniel Wagner9f616682015-08-10 14:35:46 +0200104 field = __find_event_field(&ftrace_generic_fields, name);
105 if (field)
106 return field;
107
Steven Rostedt (Red Hat)e57cbaf2016-03-03 17:18:20 -0500108 return __find_event_field(&ftrace_common_fields, name);
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +0800109}
110
Li Zefan8728fe52010-05-24 16:22:49 +0800111static int __trace_define_field(struct list_head *head, const char *type,
112 const char *name, int offset, int size,
113 int is_signed, int filter_type)
Tom Zanussicf027f62009-03-22 03:30:39 -0500114{
115 struct ftrace_event_field *field;
116
Steven Rostedtd1a29142013-02-27 20:23:57 -0500117 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
Tom Zanussicf027f62009-03-22 03:30:39 -0500118 if (!field)
Namhyung Kimaaf6ac02013-06-07 15:07:48 +0900119 return -ENOMEM;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +0100120
Steven Rostedt92edca02013-02-27 20:41:37 -0500121 field->name = name;
122 field->type = type;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +0100123
Li Zefan43b51ea2009-08-07 10:33:22 +0800124 if (filter_type == FILTER_OTHER)
125 field->filter_type = filter_assign_type(type);
126 else
127 field->filter_type = filter_type;
128
Tom Zanussicf027f62009-03-22 03:30:39 -0500129 field->offset = offset;
130 field->size = size;
Tom Zanussia118e4d2009-04-28 03:04:53 -0500131 field->is_signed = is_signed;
Li Zefanaa38e9f2009-08-07 10:33:02 +0800132
Steven Rostedt2e33af02010-04-22 10:35:55 -0400133 list_add(&field->link, head);
Tom Zanussicf027f62009-03-22 03:30:39 -0500134
135 return 0;
Tom Zanussicf027f62009-03-22 03:30:39 -0500136}
Li Zefan8728fe52010-05-24 16:22:49 +0800137
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400138int trace_define_field(struct trace_event_call *call, const char *type,
Li Zefan8728fe52010-05-24 16:22:49 +0800139 const char *name, int offset, int size, int is_signed,
140 int filter_type)
141{
142 struct list_head *head;
143
144 if (WARN_ON(!call->class))
145 return 0;
146
147 head = trace_get_fields(call);
148 return __trace_define_field(head, type, name, offset, size,
149 is_signed, filter_type);
150}
Steven Rostedt17c873e2009-04-10 18:12:50 -0400151EXPORT_SYMBOL_GPL(trace_define_field);
Tom Zanussicf027f62009-03-22 03:30:39 -0500152
Daniel Wagner9f616682015-08-10 14:35:46 +0200153#define __generic_field(type, item, filter_type) \
154 ret = __trace_define_field(&ftrace_generic_fields, #type, \
155 #item, 0, 0, is_signed_type(type), \
156 filter_type); \
157 if (ret) \
158 return ret;
159
Li Zefane647d6b2009-08-19 15:54:32 +0800160#define __common_field(type, item) \
Li Zefan8728fe52010-05-24 16:22:49 +0800161 ret = __trace_define_field(&ftrace_common_fields, #type, \
162 "common_" #item, \
163 offsetof(typeof(ent), item), \
164 sizeof(ent.item), \
165 is_signed_type(type), FILTER_OTHER); \
Li Zefane647d6b2009-08-19 15:54:32 +0800166 if (ret) \
167 return ret;
168
Daniel Wagner9f616682015-08-10 14:35:46 +0200169static int trace_define_generic_fields(void)
170{
171 int ret;
172
Steven Rostedt (Red Hat)e57cbaf2016-03-03 17:18:20 -0500173 __generic_field(int, CPU, FILTER_CPU);
174 __generic_field(int, cpu, FILTER_CPU);
175 __generic_field(char *, COMM, FILTER_COMM);
176 __generic_field(char *, comm, FILTER_COMM);
Daniel Wagner9f616682015-08-10 14:35:46 +0200177
178 return ret;
179}
180
Li Zefan8728fe52010-05-24 16:22:49 +0800181static int trace_define_common_fields(void)
Li Zefane647d6b2009-08-19 15:54:32 +0800182{
183 int ret;
184 struct trace_entry ent;
185
186 __common_field(unsigned short, type);
187 __common_field(unsigned char, flags);
188 __common_field(unsigned char, preempt_count);
189 __common_field(int, pid);
Li Zefane647d6b2009-08-19 15:54:32 +0800190
191 return ret;
192}
193
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400194static void trace_destroy_fields(struct trace_event_call *call)
Li Zefan2df75e42009-05-06 10:33:04 +0800195{
196 struct ftrace_event_field *field, *next;
Steven Rostedt2e33af02010-04-22 10:35:55 -0400197 struct list_head *head;
Li Zefan2df75e42009-05-06 10:33:04 +0800198
Steven Rostedt2e33af02010-04-22 10:35:55 -0400199 head = trace_get_fields(call);
200 list_for_each_entry_safe(field, next, head, link) {
Li Zefan2df75e42009-05-06 10:33:04 +0800201 list_del(&field->link);
Steven Rostedtd1a29142013-02-27 20:23:57 -0500202 kmem_cache_free(field_cachep, field);
Li Zefan2df75e42009-05-06 10:33:04 +0800203 }
204}
205
Alexei Starovoitov32bbe002016-04-06 18:43:28 -0700206/*
207 * run-time version of trace_event_get_offsets_<call>() that returns the last
208 * accessible offset of trace fields excluding __dynamic_array bytes
209 */
210int trace_event_get_offsets(struct trace_event_call *call)
211{
212 struct ftrace_event_field *tail;
213 struct list_head *head;
214
215 head = trace_get_fields(call);
216 /*
217 * head->next points to the last field with the largest offset,
218 * since it was added last by trace_define_field()
219 */
220 tail = list_first_entry(head, struct ftrace_event_field, link);
221 return tail->offset + tail->size;
222}
223
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400224int trace_event_raw_init(struct trace_event_call *call)
Li Zefan87d9b4e2009-12-08 11:14:20 +0800225{
226 int id;
227
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -0400228 id = register_trace_event(&call->event);
Li Zefan87d9b4e2009-12-08 11:14:20 +0800229 if (!id)
230 return -ENODEV;
Li Zefan87d9b4e2009-12-08 11:14:20 +0800231
232 return 0;
233}
234EXPORT_SYMBOL_GPL(trace_event_raw_init);
235
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400236bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
237{
238 struct trace_array *tr = trace_file->tr;
239 struct trace_array_cpu *data;
240 struct trace_pid_list *pid_list;
241
242 pid_list = rcu_dereference_sched(tr->filtered_pids);
243 if (!pid_list)
244 return false;
245
246 data = this_cpu_ptr(tr->trace_buffer.data);
247
248 return data->ignore_pid;
249}
250EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
251
Steven Rostedt (Red Hat)3f795dc2015-05-05 13:18:46 -0400252void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
253 struct trace_event_file *trace_file,
254 unsigned long len)
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400255{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400256 struct trace_event_call *event_call = trace_file->event_call;
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400257
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400258 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
259 trace_event_ignore_this_pid(trace_file))
260 return NULL;
261
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400262 local_save_flags(fbuffer->flags);
263 fbuffer->pc = preempt_count();
Steven Rostedt (Red Hat)e9478412016-06-17 17:40:58 -0400264 /*
265 * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
266 * preemption (adding one to the preempt_count). Since we are
267 * interested in the preempt_count at the time the tracepoint was
268 * hit, we need to subtract one to offset the increment.
269 */
270 if (IS_ENABLED(CONFIG_PREEMPT))
271 fbuffer->pc--;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400272 fbuffer->trace_file = trace_file;
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400273
274 fbuffer->event =
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400275 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400276 event_call->event.type, len,
277 fbuffer->flags, fbuffer->pc);
278 if (!fbuffer->event)
279 return NULL;
280
281 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
282 return fbuffer->entry;
283}
Steven Rostedt (Red Hat)3f795dc2015-05-05 13:18:46 -0400284EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400285
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500286static DEFINE_SPINLOCK(tracepoint_iter_lock);
287
Steven Rostedt (Red Hat)3f795dc2015-05-05 13:18:46 -0400288static void output_printk(struct trace_event_buffer *fbuffer)
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500289{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400290 struct trace_event_call *event_call;
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500291 struct trace_event *event;
292 unsigned long flags;
293 struct trace_iterator *iter = tracepoint_print_iter;
294
295 if (!iter)
296 return;
297
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400298 event_call = fbuffer->trace_file->event_call;
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500299 if (!event_call || !event_call->event.funcs ||
300 !event_call->event.funcs->trace)
301 return;
302
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400303 event = &fbuffer->trace_file->event_call->event;
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500304
305 spin_lock_irqsave(&tracepoint_iter_lock, flags);
306 trace_seq_init(&iter->seq);
307 iter->ent = fbuffer->entry;
308 event_call->event.funcs->trace(iter, 0, event);
309 trace_seq_putc(&iter->seq, 0);
310 printk("%s", iter->seq.buffer);
311
312 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
313}
314
Shashank Mittal43beb422016-05-20 13:06:09 -0700315void trace_event_buffer_commit(struct trace_event_buffer *fbuffer,
316 unsigned long len)
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400317{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500318 if (tracepoint_printk)
319 output_printk(fbuffer);
320
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400321 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400322 fbuffer->event, fbuffer->entry,
Shashank Mittal43beb422016-05-20 13:06:09 -0700323 fbuffer->flags, fbuffer->pc, len);
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400324}
Steven Rostedt (Red Hat)3f795dc2015-05-05 13:18:46 -0400325EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400326
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400327int trace_event_reg(struct trace_event_call *call,
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -0400328 enum trace_reg type, void *data)
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400329{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400330 struct trace_event_file *file = data;
Steven Rostedtae63b312012-05-03 23:09:03 -0400331
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400332 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400333 switch (type) {
334 case TRACE_REG_REGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400335 return tracepoint_probe_register(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400336 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400337 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400338 case TRACE_REG_UNREGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400339 tracepoint_probe_unregister(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400340 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400341 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400342 return 0;
343
344#ifdef CONFIG_PERF_EVENTS
345 case TRACE_REG_PERF_REGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400346 return tracepoint_probe_register(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400347 call->class->perf_probe,
348 call);
349 case TRACE_REG_PERF_UNREGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400350 tracepoint_probe_unregister(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400351 call->class->perf_probe,
352 call);
353 return 0;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100354 case TRACE_REG_PERF_OPEN:
355 case TRACE_REG_PERF_CLOSE:
Jiri Olsa489c75c2012-02-15 15:51:50 +0100356 case TRACE_REG_PERF_ADD:
357 case TRACE_REG_PERF_DEL:
Jiri Olsaceec0b62012-02-15 15:51:49 +0100358 return 0;
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400359#endif
360 }
361 return 0;
362}
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -0400363EXPORT_SYMBOL_GPL(trace_event_reg);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400364
Li Zefane870e9a2010-07-02 11:07:32 +0800365void trace_event_enable_cmd_record(bool enable)
366{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400367 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -0400368 struct trace_array *tr;
Li Zefane870e9a2010-07-02 11:07:32 +0800369
370 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400371 do_for_each_event_file(tr, file) {
372
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400373 if (!(file->flags & EVENT_FILE_FL_ENABLED))
Li Zefane870e9a2010-07-02 11:07:32 +0800374 continue;
375
376 if (enable) {
377 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400378 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800379 } else {
380 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400381 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800382 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400383 } while_for_each_event_file();
Li Zefane870e9a2010-07-02 11:07:32 +0800384 mutex_unlock(&event_mutex);
385}
386
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400387static int __ftrace_event_enable_disable(struct trace_event_file *file,
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400388 int enable, int soft_disable)
Steven Rostedtfd994982009-02-28 02:41:25 -0500389{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400390 struct trace_event_call *call = file->event_call;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400391 struct trace_array *tr = file->tr;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400392 unsigned long file_flags = file->flags;
Li Zefan3b8e4272009-12-08 11:14:52 +0800393 int ret = 0;
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400394 int disable;
Li Zefan3b8e4272009-12-08 11:14:52 +0800395
Steven Rostedtfd994982009-02-28 02:41:25 -0500396 switch (enable) {
397 case 0:
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400398 /*
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900399 * When soft_disable is set and enable is cleared, the sm_ref
400 * reference counter is decremented. If it reaches 0, we want
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400401 * to clear the SOFT_DISABLED flag but leave the event in the
402 * state that it was. That is, if the event was enabled and
403 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
404 * is set we do not want the event to be enabled before we
405 * clear the bit.
406 *
407 * When soft_disable is not set but the SOFT_MODE flag is,
408 * we do nothing. Do not disable the tracepoint, otherwise
409 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
410 */
411 if (soft_disable) {
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900412 if (atomic_dec_return(&file->sm_ref) > 0)
413 break;
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400414 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
415 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400416 } else
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400417 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400418
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400419 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
420 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
421 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
Li Zefane870e9a2010-07-02 11:07:32 +0800422 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400423 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800424 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400425 call->class->reg(call, TRACE_REG_UNREGISTER, file);
Steven Rostedtfd994982009-02-28 02:41:25 -0500426 }
Tom Zanussi3baa5e42013-06-29 00:08:07 -0500427 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400428 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
429 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
Tom Zanussi3baa5e42013-06-29 00:08:07 -0500430 else
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400431 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
Steven Rostedtfd994982009-02-28 02:41:25 -0500432 break;
433 case 1:
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400434 /*
435 * When soft_disable is set and enable is set, we want to
436 * register the tracepoint for the event, but leave the event
437 * as is. That means, if the event was already enabled, we do
438 * nothing (but set SOFT_MODE). If the event is disabled, we
439 * set SOFT_DISABLED before enabling the event tracepoint, so
440 * it still seems to be disabled.
441 */
442 if (!soft_disable)
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400443 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900444 else {
445 if (atomic_inc_return(&file->sm_ref) > 1)
446 break;
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400447 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900448 }
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400449
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400450 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400451
452 /* Keep the event disabled, when going to SOFT_MODE. */
453 if (soft_disable)
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400454 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400455
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400456 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
Li Zefane870e9a2010-07-02 11:07:32 +0800457 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400458 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800459 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400460 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
Li Zefan3b8e4272009-12-08 11:14:52 +0800461 if (ret) {
462 tracing_stop_cmdline_record();
463 pr_info("event trace: Could not enable event "
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400464 "%s\n", trace_event_name(call));
Li Zefan3b8e4272009-12-08 11:14:52 +0800465 break;
466 }
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400467 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -0500468
469 /* WAS_ENABLED gets set but never cleared. */
470 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
Steven Rostedtfd994982009-02-28 02:41:25 -0500471 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500472 break;
473 }
Li Zefan3b8e4272009-12-08 11:14:52 +0800474
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400475 /* Enable or disable use of trace_buffered_event */
476 if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
477 (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
478 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
479 trace_buffered_event_enable();
480 else
481 trace_buffered_event_disable();
482 }
483
Li Zefan3b8e4272009-12-08 11:14:52 +0800484 return ret;
Steven Rostedtfd994982009-02-28 02:41:25 -0500485}
486
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400487int trace_event_enable_disable(struct trace_event_file *file,
Tom Zanussi85f2b082013-10-24 08:59:24 -0500488 int enable, int soft_disable)
489{
490 return __ftrace_event_enable_disable(file, enable, soft_disable);
491}
492
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400493static int ftrace_event_enable_disable(struct trace_event_file *file,
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400494 int enable)
495{
496 return __ftrace_event_enable_disable(file, enable, 0);
497}
498
Steven Rostedtae63b312012-05-03 23:09:03 -0400499static void ftrace_clear_events(struct trace_array *tr)
Zhaolei0e907c92009-05-25 18:13:59 +0800500{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400501 struct trace_event_file *file;
Zhaolei0e907c92009-05-25 18:13:59 +0800502
503 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400504 list_for_each_entry(file, &tr->events, list) {
505 ftrace_event_enable_disable(file, 0);
Zhaolei0e907c92009-05-25 18:13:59 +0800506 }
507 mutex_unlock(&event_mutex);
508}
509
Steven Rostedtc37775d2016-04-13 16:59:18 -0400510static void
511event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
512{
513 struct trace_pid_list *pid_list;
514 struct trace_array *tr = data;
515
516 pid_list = rcu_dereference_sched(tr->filtered_pids);
Steven Rostedt4e267db2016-04-14 07:38:13 -0400517 trace_filter_add_remove_task(pid_list, NULL, task);
Steven Rostedtc37775d2016-04-13 16:59:18 -0400518}
519
520static void
521event_filter_pid_sched_process_fork(void *data,
522 struct task_struct *self,
523 struct task_struct *task)
524{
525 struct trace_pid_list *pid_list;
526 struct trace_array *tr = data;
527
528 pid_list = rcu_dereference_sched(tr->filtered_pids);
Steven Rostedt4e267db2016-04-14 07:38:13 -0400529 trace_filter_add_remove_task(pid_list, self, task);
Steven Rostedtc37775d2016-04-13 16:59:18 -0400530}
531
532void trace_event_follow_fork(struct trace_array *tr, bool enable)
533{
534 if (enable) {
535 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
536 tr, INT_MIN);
537 register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
538 tr, INT_MAX);
539 } else {
540 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
541 tr);
542 unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
543 tr);
544 }
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400545}
546
547static void
Linus Torvalds22402cd2015-11-06 13:30:20 -0800548event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400549 struct task_struct *prev, struct task_struct *next)
550{
551 struct trace_array *tr = data;
552 struct trace_pid_list *pid_list;
553
554 pid_list = rcu_dereference_sched(tr->filtered_pids);
555
556 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -0400557 trace_ignore_this_task(pid_list, prev) &&
558 trace_ignore_this_task(pid_list, next));
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400559}
560
561static void
Linus Torvalds22402cd2015-11-06 13:30:20 -0800562event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400563 struct task_struct *prev, struct task_struct *next)
564{
565 struct trace_array *tr = data;
566 struct trace_pid_list *pid_list;
567
568 pid_list = rcu_dereference_sched(tr->filtered_pids);
569
570 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -0400571 trace_ignore_this_task(pid_list, next));
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400572}
573
574static void
575event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
576{
577 struct trace_array *tr = data;
578 struct trace_pid_list *pid_list;
579
580 /* Nothing to do if we are already tracing */
581 if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
582 return;
583
584 pid_list = rcu_dereference_sched(tr->filtered_pids);
585
586 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -0400587 trace_ignore_this_task(pid_list, task));
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400588}
589
590static void
591event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
592{
593 struct trace_array *tr = data;
594 struct trace_pid_list *pid_list;
595
596 /* Nothing to do if we are not tracing */
597 if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
598 return;
599
600 pid_list = rcu_dereference_sched(tr->filtered_pids);
601
602 /* Set tracing if current is enabled */
603 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -0400604 trace_ignore_this_task(pid_list, current));
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400605}
606
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400607static void __ftrace_clear_event_pids(struct trace_array *tr)
608{
609 struct trace_pid_list *pid_list;
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400610 struct trace_event_file *file;
611 int cpu;
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400612
613 pid_list = rcu_dereference_protected(tr->filtered_pids,
614 lockdep_is_held(&event_mutex));
615 if (!pid_list)
616 return;
617
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400618 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
619 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
620
621 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
622 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
623
Steven Rostedt (Red Hat)0f72e372015-12-01 16:08:05 -0500624 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
625 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
626
627 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
628 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
629
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400630 list_for_each_entry(file, &tr->events, list) {
631 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
632 }
633
634 for_each_possible_cpu(cpu)
635 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
636
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400637 rcu_assign_pointer(tr->filtered_pids, NULL);
638
639 /* Wait till all users are no longer using pid filtering */
640 synchronize_sched();
641
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400642 trace_free_pid_list(pid_list);
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400643}
644
645static void ftrace_clear_event_pids(struct trace_array *tr)
646{
647 mutex_lock(&event_mutex);
648 __ftrace_clear_event_pids(tr);
649 mutex_unlock(&event_mutex);
650}
651
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400652static void __put_system(struct event_subsystem *system)
653{
654 struct event_filter *filter = system->filter;
655
Steven Rostedt6e94a782013-06-27 10:58:31 -0400656 WARN_ON_ONCE(system_refcount(system) == 0);
657 if (system_refcount_dec(system))
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400658 return;
659
Steven Rostedtae63b312012-05-03 23:09:03 -0400660 list_del(&system->list);
661
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400662 if (filter) {
663 kfree(filter->filter_string);
664 kfree(filter);
665 }
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +0200666 kfree_const(system->name);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400667 kfree(system);
668}
669
670static void __get_system(struct event_subsystem *system)
671{
Steven Rostedt6e94a782013-06-27 10:58:31 -0400672 WARN_ON_ONCE(system_refcount(system) == 0);
673 system_refcount_inc(system);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400674}
675
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -0400676static void __get_system_dir(struct trace_subsystem_dir *dir)
Steven Rostedtae63b312012-05-03 23:09:03 -0400677{
678 WARN_ON_ONCE(dir->ref_count == 0);
679 dir->ref_count++;
680 __get_system(dir->subsystem);
681}
682
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -0400683static void __put_system_dir(struct trace_subsystem_dir *dir)
Steven Rostedtae63b312012-05-03 23:09:03 -0400684{
685 WARN_ON_ONCE(dir->ref_count == 0);
686 /* If the subsystem is about to be freed, the dir must be too */
Steven Rostedt6e94a782013-06-27 10:58:31 -0400687 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
Steven Rostedtae63b312012-05-03 23:09:03 -0400688
689 __put_system(dir->subsystem);
690 if (!--dir->ref_count)
691 kfree(dir);
692}
693
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -0400694static void put_system(struct trace_subsystem_dir *dir)
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400695{
696 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400697 __put_system_dir(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400698 mutex_unlock(&event_mutex);
699}
700
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -0400701static void remove_subsystem(struct trace_subsystem_dir *dir)
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200702{
703 if (!dir)
704 return;
705
706 if (!--dir->nr_events) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -0500707 tracefs_remove_recursive(dir->entry);
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200708 list_del(&dir->list);
709 __put_system_dir(dir);
710 }
711}
712
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400713static void remove_event_file_dir(struct trace_event_file *file)
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200714{
Oleg Nesterovbf682c32013-07-28 20:35:27 +0200715 struct dentry *dir = file->dir;
716 struct dentry *child;
717
718 if (dir) {
719 spin_lock(&dir->d_lock); /* probably unneeded */
Al Viro946e51f2014-10-26 19:19:16 -0400720 list_for_each_entry(child, &dir->d_subdirs, d_child) {
David Howells7682c912015-03-17 22:26:16 +0000721 if (d_really_is_positive(child)) /* probably unneeded */
722 d_inode(child)->i_private = NULL;
Oleg Nesterovbf682c32013-07-28 20:35:27 +0200723 }
724 spin_unlock(&dir->d_lock);
725
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -0500726 tracefs_remove_recursive(dir);
Oleg Nesterovbf682c32013-07-28 20:35:27 +0200727 }
728
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200729 list_del(&file->list);
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200730 remove_subsystem(file->system);
Oleg Nesterov2448e342014-07-11 21:06:38 +0200731 free_event_filter(file->filter);
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200732 kmem_cache_free(file_cachep, file);
733}
734
Li Zefan8f31bfe2009-05-08 10:31:42 +0800735/*
736 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
737 */
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -0400738static int
739__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
740 const char *sub, const char *event, int set)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500741{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400742 struct trace_event_file *file;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400743 struct trace_event_call *call;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400744 const char *name;
Steven Rostedt29f93942009-05-08 16:06:47 -0400745 int ret = -EINVAL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500746
Steven Rostedtae63b312012-05-03 23:09:03 -0400747 list_for_each_entry(file, &tr->events, list) {
748
749 call = file->event_call;
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400750 name = trace_event_name(call);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500751
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400752 if (!name || !call->class || !call->class->reg)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500753 continue;
Steven Rostedt1473e442009-02-24 14:15:08 -0500754
Steven Rostedt9b637762012-05-10 15:55:43 -0400755 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
756 continue;
757
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500758 if (match &&
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400759 strcmp(match, name) != 0 &&
Steven Rostedt8f082012010-04-20 10:47:33 -0400760 strcmp(match, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500761 continue;
762
Steven Rostedt8f082012010-04-20 10:47:33 -0400763 if (sub && strcmp(sub, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500764 continue;
765
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400766 if (event && strcmp(event, name) != 0)
Steven Rostedt1473e442009-02-24 14:15:08 -0500767 continue;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500768
Steven Rostedtae63b312012-05-03 23:09:03 -0400769 ftrace_event_enable_disable(file, set);
Steven Rostedtfd994982009-02-28 02:41:25 -0500770
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500771 ret = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500772 }
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -0400773
774 return ret;
775}
776
777static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
778 const char *sub, const char *event, int set)
779{
780 int ret;
781
782 mutex_lock(&event_mutex);
783 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
Steven Rostedt11a241a2009-03-02 11:49:04 -0500784 mutex_unlock(&event_mutex);
785
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500786 return ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500787}
788
Steven Rostedtae63b312012-05-03 23:09:03 -0400789static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800790{
791 char *event = NULL, *sub = NULL, *match;
Joonsoo Kim84fce9d2015-04-16 13:44:44 +0900792 int ret;
Li Zefan8f31bfe2009-05-08 10:31:42 +0800793
794 /*
795 * The buf format can be <subsystem>:<event-name>
796 * *:<event-name> means any event by that name.
797 * :<event-name> is the same.
798 *
799 * <subsystem>:* means all events in that subsystem
800 * <subsystem>: means the same.
801 *
802 * <name> (no ':') means all events in a subsystem with
803 * the name <name> or any event that matches <name>
804 */
805
806 match = strsep(&buf, ":");
807 if (buf) {
808 sub = match;
809 event = buf;
810 match = NULL;
811
812 if (!strlen(sub) || strcmp(sub, "*") == 0)
813 sub = NULL;
814 if (!strlen(event) || strcmp(event, "*") == 0)
815 event = NULL;
816 }
817
Joonsoo Kim84fce9d2015-04-16 13:44:44 +0900818 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
819
820 /* Put back the colon to allow this to be called again */
821 if (buf)
822 *(buf - 1) = ':';
823
824 return ret;
Li Zefan8f31bfe2009-05-08 10:31:42 +0800825}
826
Steven Rostedt4671c792009-05-08 16:27:41 -0400827/**
828 * trace_set_clr_event - enable or disable an event
829 * @system: system name to match (NULL for any system)
830 * @event: event name to match (NULL for all events, within system)
831 * @set: 1 to enable, 0 to disable
832 *
833 * This is a way for other parts of the kernel to enable or disable
834 * event recording.
835 *
836 * Returns 0 on success, -EINVAL if the parameters do not match any
837 * registered events.
838 */
839int trace_set_clr_event(const char *system, const char *event, int set)
840{
Steven Rostedtae63b312012-05-03 23:09:03 -0400841 struct trace_array *tr = top_trace_array();
842
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +0900843 if (!tr)
844 return -ENODEV;
845
Steven Rostedtae63b312012-05-03 23:09:03 -0400846 return __ftrace_set_clr_event(tr, NULL, system, event, set);
Steven Rostedt4671c792009-05-08 16:27:41 -0400847}
Yuanhan Liu56355b82010-11-08 14:05:12 +0800848EXPORT_SYMBOL_GPL(trace_set_clr_event);
Steven Rostedt4671c792009-05-08 16:27:41 -0400849
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500850/* 128 should be much more than enough */
851#define EVENT_BUF_SIZE 127
852
853static ssize_t
854ftrace_event_write(struct file *file, const char __user *ubuf,
855 size_t cnt, loff_t *ppos)
856{
jolsa@redhat.com48966362009-09-11 17:29:28 +0200857 struct trace_parser parser;
Steven Rostedtae63b312012-05-03 23:09:03 -0400858 struct seq_file *m = file->private_data;
859 struct trace_array *tr = m->private;
Li Zefan4ba79782009-09-22 13:52:20 +0800860 ssize_t read, ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500861
Li Zefan4ba79782009-09-22 13:52:20 +0800862 if (!cnt)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500863 return 0;
864
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400865 ret = tracing_update_buffers();
866 if (ret < 0)
867 return ret;
868
jolsa@redhat.com48966362009-09-11 17:29:28 +0200869 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500870 return -ENOMEM;
871
jolsa@redhat.com48966362009-09-11 17:29:28 +0200872 read = trace_get_user(&parser, ubuf, cnt, ppos);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500873
Li Zefan4ba79782009-09-22 13:52:20 +0800874 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com48966362009-09-11 17:29:28 +0200875 int set = 1;
876
877 if (*parser.buffer == '!')
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500878 set = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500879
jolsa@redhat.com48966362009-09-11 17:29:28 +0200880 parser.buffer[parser.idx] = 0;
881
Steven Rostedtae63b312012-05-03 23:09:03 -0400882 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500883 if (ret)
jolsa@redhat.com48966362009-09-11 17:29:28 +0200884 goto out_put;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500885 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500886
887 ret = read;
888
jolsa@redhat.com48966362009-09-11 17:29:28 +0200889 out_put:
890 trace_parser_put(&parser);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500891
892 return ret;
893}
894
895static void *
896t_next(struct seq_file *m, void *v, loff_t *pos)
897{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400898 struct trace_event_file *file = v;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400899 struct trace_event_call *call;
Steven Rostedtae63b312012-05-03 23:09:03 -0400900 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500901
902 (*pos)++;
903
Steven Rostedtae63b312012-05-03 23:09:03 -0400904 list_for_each_entry_continue(file, &tr->events, list) {
905 call = file->event_call;
Steven Rostedt40e26812009-03-10 11:32:40 -0400906 /*
907 * The ftrace subsystem is for showing formats only.
908 * They can not be enabled or disabled via the event files.
909 */
Steven Rostedt (Red Hat)d0454372016-02-24 09:04:24 -0500910 if (call->class && call->class->reg &&
911 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
Steven Rostedtae63b312012-05-03 23:09:03 -0400912 return file;
Steven Rostedt40e26812009-03-10 11:32:40 -0400913 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500914
Li Zefan30bd39c2009-09-18 14:07:05 +0800915 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500916}
917
918static void *t_start(struct seq_file *m, loff_t *pos)
919{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400920 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -0400921 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800922 loff_t l;
923
Li Zefan20c89282009-05-06 10:33:45 +0800924 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800925
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400926 file = list_entry(&tr->events, struct trace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800927 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400928 file = t_next(m, file, &l);
929 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800930 break;
931 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400932 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500933}
934
935static void *
936s_next(struct seq_file *m, void *v, loff_t *pos)
937{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400938 struct trace_event_file *file = v;
Steven Rostedtae63b312012-05-03 23:09:03 -0400939 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500940
941 (*pos)++;
942
Steven Rostedtae63b312012-05-03 23:09:03 -0400943 list_for_each_entry_continue(file, &tr->events, list) {
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400944 if (file->flags & EVENT_FILE_FL_ENABLED)
Steven Rostedtae63b312012-05-03 23:09:03 -0400945 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500946 }
947
Li Zefan30bd39c2009-09-18 14:07:05 +0800948 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500949}
950
951static void *s_start(struct seq_file *m, loff_t *pos)
952{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400953 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -0400954 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800955 loff_t l;
956
Li Zefan20c89282009-05-06 10:33:45 +0800957 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800958
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400959 file = list_entry(&tr->events, struct trace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800960 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400961 file = s_next(m, file, &l);
962 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800963 break;
964 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400965 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500966}
967
968static int t_show(struct seq_file *m, void *v)
969{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400970 struct trace_event_file *file = v;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400971 struct trace_event_call *call = file->event_call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500972
Steven Rostedt8f082012010-04-20 10:47:33 -0400973 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
974 seq_printf(m, "%s:", call->class->system);
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400975 seq_printf(m, "%s\n", trace_event_name(call));
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500976
977 return 0;
978}
979
980static void t_stop(struct seq_file *m, void *p)
981{
Li Zefan20c89282009-05-06 10:33:45 +0800982 mutex_unlock(&event_mutex);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500983}
984
Steven Rostedtf4d34a82016-04-13 16:27:49 -0400985static void *
986p_next(struct seq_file *m, void *v, loff_t *pos)
987{
988 struct trace_array *tr = m->private;
989 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
Steven Rostedtf4d34a82016-04-13 16:27:49 -0400990
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400991 return trace_pid_next(pid_list, v, pos);
Steven Rostedtf4d34a82016-04-13 16:27:49 -0400992}
993
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400994static void *p_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)fb662282015-10-26 03:45:22 -0400995 __acquires(RCU)
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400996{
997 struct trace_pid_list *pid_list;
998 struct trace_array *tr = m->private;
999
1000 /*
1001 * Grab the mutex, to keep calls to p_next() having the same
1002 * tr->filtered_pids as p_start() has.
1003 * If we just passed the tr->filtered_pids around, then RCU would
1004 * have been enough, but doing that makes things more complex.
1005 */
1006 mutex_lock(&event_mutex);
1007 rcu_read_lock_sched();
1008
1009 pid_list = rcu_dereference_sched(tr->filtered_pids);
1010
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001011 if (!pid_list)
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001012 return NULL;
1013
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -04001014 return trace_pid_start(pid_list, pos);
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001015}
1016
1017static void p_stop(struct seq_file *m, void *p)
Steven Rostedt (Red Hat)fb662282015-10-26 03:45:22 -04001018 __releases(RCU)
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001019{
1020 rcu_read_unlock_sched();
1021 mutex_unlock(&event_mutex);
1022}
1023
Steven Rostedt1473e442009-02-24 14:15:08 -05001024static ssize_t
1025event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1026 loff_t *ppos)
1027{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001028 struct trace_event_file *file;
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +02001029 unsigned long flags;
Tom Zanussia4390592013-06-29 00:08:04 -05001030 char buf[4] = "0";
Steven Rostedt1473e442009-02-24 14:15:08 -05001031
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +02001032 mutex_lock(&event_mutex);
1033 file = event_file_data(filp);
1034 if (likely(file))
1035 flags = file->flags;
1036 mutex_unlock(&event_mutex);
1037
1038 if (!file)
1039 return -ENODEV;
1040
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001041 if (flags & EVENT_FILE_FL_ENABLED &&
1042 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
Tom Zanussia4390592013-06-29 00:08:04 -05001043 strcpy(buf, "1");
1044
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001045 if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1046 flags & EVENT_FILE_FL_SOFT_MODE)
Tom Zanussia4390592013-06-29 00:08:04 -05001047 strcat(buf, "*");
1048
1049 strcat(buf, "\n");
Steven Rostedt1473e442009-02-24 14:15:08 -05001050
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -04001051 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
Steven Rostedt1473e442009-02-24 14:15:08 -05001052}
1053
1054static ssize_t
1055event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1056 loff_t *ppos)
1057{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001058 struct trace_event_file *file;
Steven Rostedt1473e442009-02-24 14:15:08 -05001059 unsigned long val;
1060 int ret;
1061
Peter Huewe22fe9b52011-06-07 21:58:27 +02001062 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1063 if (ret)
Steven Rostedt1473e442009-02-24 14:15:08 -05001064 return ret;
1065
Steven Rostedt1852fcc2009-03-11 14:33:00 -04001066 ret = tracing_update_buffers();
1067 if (ret < 0)
1068 return ret;
1069
Steven Rostedt1473e442009-02-24 14:15:08 -05001070 switch (val) {
1071 case 0:
Steven Rostedt1473e442009-02-24 14:15:08 -05001072 case 1:
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +02001073 ret = -ENODEV;
Steven Rostedt11a241a2009-03-02 11:49:04 -05001074 mutex_lock(&event_mutex);
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +02001075 file = event_file_data(filp);
1076 if (likely(file))
1077 ret = ftrace_event_enable_disable(file, val);
Steven Rostedt11a241a2009-03-02 11:49:04 -05001078 mutex_unlock(&event_mutex);
Steven Rostedt1473e442009-02-24 14:15:08 -05001079 break;
1080
1081 default:
1082 return -EINVAL;
1083 }
1084
1085 *ppos += cnt;
1086
Li Zefan3b8e4272009-12-08 11:14:52 +08001087 return ret ? ret : cnt;
Steven Rostedt1473e442009-02-24 14:15:08 -05001088}
1089
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001090static ssize_t
1091system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1092 loff_t *ppos)
1093{
Li Zefanc142b152009-05-08 10:32:05 +08001094 const char set_to_char[4] = { '?', '0', '1', 'X' };
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001095 struct trace_subsystem_dir *dir = filp->private_data;
Steven Rostedtae63b312012-05-03 23:09:03 -04001096 struct event_subsystem *system = dir->subsystem;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001097 struct trace_event_call *call;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001098 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -04001099 struct trace_array *tr = dir->tr;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001100 char buf[2];
Li Zefanc142b152009-05-08 10:32:05 +08001101 int set = 0;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001102 int ret;
1103
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001104 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001105 list_for_each_entry(file, &tr->events, list) {
1106 call = file->event_call;
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001107 if (!trace_event_name(call) || !call->class || !call->class->reg)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001108 continue;
1109
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001110 if (system && strcmp(call->class->system, system->name) != 0)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001111 continue;
1112
1113 /*
1114 * We need to find out if all the events are set
1115 * or if all events or cleared, or if we have
1116 * a mixture.
1117 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001118 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
Li Zefanc142b152009-05-08 10:32:05 +08001119
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001120 /*
1121 * If we have a mixture, no need to look further.
1122 */
Li Zefanc142b152009-05-08 10:32:05 +08001123 if (set == 3)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001124 break;
1125 }
1126 mutex_unlock(&event_mutex);
1127
Li Zefanc142b152009-05-08 10:32:05 +08001128 buf[0] = set_to_char[set];
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001129 buf[1] = '\n';
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001130
1131 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1132
1133 return ret;
1134}
1135
1136static ssize_t
1137system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1138 loff_t *ppos)
1139{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001140 struct trace_subsystem_dir *dir = filp->private_data;
Steven Rostedtae63b312012-05-03 23:09:03 -04001141 struct event_subsystem *system = dir->subsystem;
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001142 const char *name = NULL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001143 unsigned long val;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001144 ssize_t ret;
1145
Peter Huewe22fe9b52011-06-07 21:58:27 +02001146 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1147 if (ret)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001148 return ret;
1149
1150 ret = tracing_update_buffers();
1151 if (ret < 0)
1152 return ret;
1153
Li Zefan8f31bfe2009-05-08 10:31:42 +08001154 if (val != 0 && val != 1)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001155 return -EINVAL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001156
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001157 /*
1158 * Opening of "enable" adds a ref count to system,
1159 * so the name is safe to use.
1160 */
1161 if (system)
1162 name = system->name;
1163
Steven Rostedtae63b312012-05-03 23:09:03 -04001164 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001165 if (ret)
Li Zefan8f31bfe2009-05-08 10:31:42 +08001166 goto out;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001167
1168 ret = cnt;
1169
Li Zefan8f31bfe2009-05-08 10:31:42 +08001170out:
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001171 *ppos += cnt;
1172
1173 return ret;
1174}
1175
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001176enum {
1177 FORMAT_HEADER = 1,
Li Zefan86397dc2010-08-17 13:53:06 +08001178 FORMAT_FIELD_SEPERATOR = 2,
1179 FORMAT_PRINTFMT = 3,
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001180};
1181
1182static void *f_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt981d0812009-03-02 13:53:59 -05001183{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001184 struct trace_event_call *call = event_file_data(m->private);
Li Zefan86397dc2010-08-17 13:53:06 +08001185 struct list_head *common_head = &ftrace_common_fields;
1186 struct list_head *head = trace_get_fields(call);
Oleg Nesterov7710b632013-07-18 20:47:10 +02001187 struct list_head *node = v;
Steven Rostedt981d0812009-03-02 13:53:59 -05001188
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001189 (*pos)++;
Lai Jiangshan5a65e952009-12-15 15:39:53 +08001190
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001191 switch ((unsigned long)v) {
1192 case FORMAT_HEADER:
Oleg Nesterov7710b632013-07-18 20:47:10 +02001193 node = common_head;
1194 break;
Li Zefan86397dc2010-08-17 13:53:06 +08001195
1196 case FORMAT_FIELD_SEPERATOR:
Oleg Nesterov7710b632013-07-18 20:47:10 +02001197 node = head;
1198 break;
Lai Jiangshan5a65e952009-12-15 15:39:53 +08001199
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001200 case FORMAT_PRINTFMT:
1201 /* all done */
1202 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +08001203 }
1204
Oleg Nesterov7710b632013-07-18 20:47:10 +02001205 node = node->prev;
1206 if (node == common_head)
Li Zefan86397dc2010-08-17 13:53:06 +08001207 return (void *)FORMAT_FIELD_SEPERATOR;
Oleg Nesterov7710b632013-07-18 20:47:10 +02001208 else if (node == head)
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001209 return (void *)FORMAT_PRINTFMT;
Oleg Nesterov7710b632013-07-18 20:47:10 +02001210 else
1211 return node;
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001212}
1213
1214static int f_show(struct seq_file *m, void *v)
1215{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001216 struct trace_event_call *call = event_file_data(m->private);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001217 struct ftrace_event_field *field;
1218 const char *array_descriptor;
1219
1220 switch ((unsigned long)v) {
1221 case FORMAT_HEADER:
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001222 seq_printf(m, "name: %s\n", trace_event_name(call));
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001223 seq_printf(m, "ID: %d\n", call->event.type);
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01001224 seq_puts(m, "format:\n");
Li Zefan8728fe52010-05-24 16:22:49 +08001225 return 0;
1226
Li Zefan86397dc2010-08-17 13:53:06 +08001227 case FORMAT_FIELD_SEPERATOR:
1228 seq_putc(m, '\n');
1229 return 0;
1230
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001231 case FORMAT_PRINTFMT:
1232 seq_printf(m, "\nprint fmt: %s\n",
1233 call->print_fmt);
1234 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -05001235 }
1236
Oleg Nesterov7710b632013-07-18 20:47:10 +02001237 field = list_entry(v, struct ftrace_event_field, link);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001238 /*
1239 * Smartly shows the array type(except dynamic array).
1240 * Normal:
1241 * field:TYPE VAR
1242 * If TYPE := TYPE[LEN], it is shown:
1243 * field:TYPE VAR[LEN]
1244 */
1245 array_descriptor = strchr(field->type, '[');
1246
1247 if (!strncmp(field->type, "__data_loc", 10))
1248 array_descriptor = NULL;
1249
1250 if (!array_descriptor)
1251 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1252 field->type, field->name, field->offset,
1253 field->size, !!field->is_signed);
1254 else
1255 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1256 (int)(array_descriptor - field->type),
1257 field->type, field->name,
1258 array_descriptor, field->offset,
1259 field->size, !!field->is_signed);
1260
1261 return 0;
1262}
1263
Oleg Nesterov7710b632013-07-18 20:47:10 +02001264static void *f_start(struct seq_file *m, loff_t *pos)
1265{
1266 void *p = (void *)FORMAT_HEADER;
1267 loff_t l = 0;
1268
Oleg Nesterovc5a44a12013-07-26 19:25:43 +02001269 /* ->stop() is called even if ->start() fails */
1270 mutex_lock(&event_mutex);
1271 if (!event_file_data(m->private))
1272 return ERR_PTR(-ENODEV);
1273
Oleg Nesterov7710b632013-07-18 20:47:10 +02001274 while (l < *pos && p)
1275 p = f_next(m, p, &l);
1276
1277 return p;
1278}
1279
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001280static void f_stop(struct seq_file *m, void *p)
1281{
Oleg Nesterovc5a44a12013-07-26 19:25:43 +02001282 mutex_unlock(&event_mutex);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001283}
1284
1285static const struct seq_operations trace_format_seq_ops = {
1286 .start = f_start,
1287 .next = f_next,
1288 .stop = f_stop,
1289 .show = f_show,
1290};
1291
1292static int trace_format_open(struct inode *inode, struct file *file)
1293{
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001294 struct seq_file *m;
1295 int ret;
1296
1297 ret = seq_open(file, &trace_format_seq_ops);
1298 if (ret < 0)
1299 return ret;
1300
1301 m = file->private_data;
Oleg Nesterovc5a44a12013-07-26 19:25:43 +02001302 m->private = file;
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001303
1304 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -05001305}
1306
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001307static ssize_t
1308event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1309{
Oleg Nesterov1a111262013-07-26 19:25:32 +02001310 int id = (long)event_file_data(filp);
Oleg Nesterovcd458ba2013-07-18 20:47:12 +02001311 char buf[32];
1312 int len;
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001313
Oleg Nesterov1a111262013-07-26 19:25:32 +02001314 if (unlikely(!id))
1315 return -ENODEV;
1316
1317 len = sprintf(buf, "%d\n", id);
1318
Oleg Nesterovcd458ba2013-07-18 20:47:12 +02001319 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001320}
1321
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001322static ssize_t
1323event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1324 loff_t *ppos)
1325{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001326 struct trace_event_file *file;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001327 struct trace_seq *s;
Oleg Nesterove2912b02013-07-26 19:25:40 +02001328 int r = -ENODEV;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001329
1330 if (*ppos)
1331 return 0;
1332
1333 s = kmalloc(sizeof(*s), GFP_KERNEL);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001334
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001335 if (!s)
1336 return -ENOMEM;
1337
1338 trace_seq_init(s);
1339
Oleg Nesterove2912b02013-07-26 19:25:40 +02001340 mutex_lock(&event_mutex);
Tom Zanussif306cc82013-10-24 08:34:17 -05001341 file = event_file_data(filp);
1342 if (file)
1343 print_event_filter(file, s);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001344 mutex_unlock(&event_mutex);
1345
Tom Zanussif306cc82013-10-24 08:34:17 -05001346 if (file)
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001347 r = simple_read_from_buffer(ubuf, cnt, ppos,
1348 s->buffer, trace_seq_used(s));
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001349
1350 kfree(s);
1351
1352 return r;
1353}
1354
1355static ssize_t
1356event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1357 loff_t *ppos)
1358{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001359 struct trace_event_file *file;
Tom Zanussi8b372562009-04-28 03:04:59 -05001360 char *buf;
Oleg Nesterove2912b02013-07-26 19:25:40 +02001361 int err = -ENODEV;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001362
Tom Zanussi8b372562009-04-28 03:04:59 -05001363 if (cnt >= PAGE_SIZE)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001364 return -EINVAL;
1365
Al Viro70f6cbb2015-12-24 00:13:10 -05001366 buf = memdup_user_nul(ubuf, cnt);
1367 if (IS_ERR(buf))
1368 return PTR_ERR(buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001369
Oleg Nesterove2912b02013-07-26 19:25:40 +02001370 mutex_lock(&event_mutex);
Tom Zanussif306cc82013-10-24 08:34:17 -05001371 file = event_file_data(filp);
1372 if (file)
1373 err = apply_event_filter(file, buf);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001374 mutex_unlock(&event_mutex);
1375
Al Viro70f6cbb2015-12-24 00:13:10 -05001376 kfree(buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001377 if (err < 0)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001378 return err;
Tom Zanussi0a19e532009-04-13 03:17:50 -05001379
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001380 *ppos += cnt;
1381
1382 return cnt;
1383}
1384
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001385static LIST_HEAD(event_subsystems);
1386
1387static int subsystem_open(struct inode *inode, struct file *filp)
1388{
1389 struct event_subsystem *system = NULL;
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001390 struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
Steven Rostedtae63b312012-05-03 23:09:03 -04001391 struct trace_array *tr;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001392 int ret;
1393
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001394 if (tracing_is_disabled())
1395 return -ENODEV;
1396
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001397 /* Make sure the system still exists */
Alexander Z Lama8227412013-07-01 19:37:54 -07001398 mutex_lock(&trace_types_lock);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001399 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001400 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1401 list_for_each_entry(dir, &tr->systems, list) {
1402 if (dir == inode->i_private) {
1403 /* Don't open systems with no events */
1404 if (dir->nr_events) {
1405 __get_system_dir(dir);
1406 system = dir->subsystem;
1407 }
1408 goto exit_loop;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001409 }
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001410 }
1411 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001412 exit_loop:
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001413 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001414 mutex_unlock(&trace_types_lock);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001415
Steven Rostedtae63b312012-05-03 23:09:03 -04001416 if (!system)
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001417 return -ENODEV;
1418
Steven Rostedtae63b312012-05-03 23:09:03 -04001419 /* Some versions of gcc think dir can be uninitialized here */
1420 WARN_ON(!dir);
1421
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001422 /* Still need to increment the ref count of the system */
1423 if (trace_array_get(tr) < 0) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001424 put_system(dir);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001425 return -ENODEV;
1426 }
1427
1428 ret = tracing_open_generic(inode, filp);
1429 if (ret < 0) {
1430 trace_array_put(tr);
1431 put_system(dir);
1432 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001433
1434 return ret;
1435}
1436
1437static int system_tr_open(struct inode *inode, struct file *filp)
1438{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001439 struct trace_subsystem_dir *dir;
Steven Rostedtae63b312012-05-03 23:09:03 -04001440 struct trace_array *tr = inode->i_private;
1441 int ret;
1442
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001443 if (tracing_is_disabled())
1444 return -ENODEV;
1445
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001446 if (trace_array_get(tr) < 0)
1447 return -ENODEV;
1448
Steven Rostedtae63b312012-05-03 23:09:03 -04001449 /* Make a temporary dir that has no system but points to tr */
1450 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001451 if (!dir) {
1452 trace_array_put(tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001453 return -ENOMEM;
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001454 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001455
1456 dir->tr = tr;
1457
1458 ret = tracing_open_generic(inode, filp);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001459 if (ret < 0) {
1460 trace_array_put(tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001461 kfree(dir);
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001462 return ret;
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001463 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001464
1465 filp->private_data = dir;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001466
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001467 return 0;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001468}
1469
1470static int subsystem_release(struct inode *inode, struct file *file)
1471{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001472 struct trace_subsystem_dir *dir = file->private_data;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001473
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001474 trace_array_put(dir->tr);
1475
Steven Rostedtae63b312012-05-03 23:09:03 -04001476 /*
1477 * If dir->subsystem is NULL, then this is a temporary
1478 * descriptor that was made for a trace_array to enable
1479 * all subsystems.
1480 */
1481 if (dir->subsystem)
1482 put_system(dir);
1483 else
1484 kfree(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001485
1486 return 0;
1487}
1488
Tom Zanussicfb180f2009-03-22 03:31:17 -05001489static ssize_t
1490subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1491 loff_t *ppos)
1492{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001493 struct trace_subsystem_dir *dir = filp->private_data;
Steven Rostedtae63b312012-05-03 23:09:03 -04001494 struct event_subsystem *system = dir->subsystem;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001495 struct trace_seq *s;
1496 int r;
1497
1498 if (*ppos)
1499 return 0;
1500
1501 s = kmalloc(sizeof(*s), GFP_KERNEL);
1502 if (!s)
1503 return -ENOMEM;
1504
1505 trace_seq_init(s);
1506
Tom Zanussi8b372562009-04-28 03:04:59 -05001507 print_subsystem_event_filter(system, s);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001508 r = simple_read_from_buffer(ubuf, cnt, ppos,
1509 s->buffer, trace_seq_used(s));
Tom Zanussicfb180f2009-03-22 03:31:17 -05001510
1511 kfree(s);
1512
1513 return r;
1514}
1515
1516static ssize_t
1517subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1518 loff_t *ppos)
1519{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001520 struct trace_subsystem_dir *dir = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -05001521 char *buf;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001522 int err;
1523
Tom Zanussi8b372562009-04-28 03:04:59 -05001524 if (cnt >= PAGE_SIZE)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001525 return -EINVAL;
1526
Al Viro70f6cbb2015-12-24 00:13:10 -05001527 buf = memdup_user_nul(ubuf, cnt);
1528 if (IS_ERR(buf))
1529 return PTR_ERR(buf);
Tom Zanussicfb180f2009-03-22 03:31:17 -05001530
Steven Rostedtae63b312012-05-03 23:09:03 -04001531 err = apply_subsystem_event_filter(dir, buf);
Al Viro70f6cbb2015-12-24 00:13:10 -05001532 kfree(buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001533 if (err < 0)
Li Zefan44e9c8b2009-04-11 15:55:28 +08001534 return err;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001535
1536 *ppos += cnt;
1537
1538 return cnt;
1539}
1540
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001541static ssize_t
1542show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1543{
1544 int (*func)(struct trace_seq *s) = filp->private_data;
1545 struct trace_seq *s;
1546 int r;
1547
1548 if (*ppos)
1549 return 0;
1550
1551 s = kmalloc(sizeof(*s), GFP_KERNEL);
1552 if (!s)
1553 return -ENOMEM;
1554
1555 trace_seq_init(s);
1556
1557 func(s);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001558 r = simple_read_from_buffer(ubuf, cnt, ppos,
1559 s->buffer, trace_seq_used(s));
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001560
1561 kfree(s);
1562
1563 return r;
1564}
1565
Steven Rostedt (Red Hat)8ca532a2015-10-21 15:27:36 -04001566static void ignore_task_cpu(void *data)
1567{
1568 struct trace_array *tr = data;
1569 struct trace_pid_list *pid_list;
1570
1571 /*
1572 * This function is called by on_each_cpu() while the
1573 * event_mutex is held.
1574 */
1575 pid_list = rcu_dereference_protected(tr->filtered_pids,
1576 mutex_is_locked(&event_mutex));
1577
1578 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -04001579 trace_ignore_this_task(pid_list, current));
Steven Rostedt (Red Hat)8ca532a2015-10-21 15:27:36 -04001580}
1581
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001582static ssize_t
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001583ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001584 size_t cnt, loff_t *ppos)
1585{
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001586 struct seq_file *m = filp->private_data;
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001587 struct trace_array *tr = m->private;
1588 struct trace_pid_list *filtered_pids = NULL;
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001589 struct trace_pid_list *pid_list;
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001590 struct trace_event_file *file;
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001591 ssize_t ret;
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001592
1593 if (!cnt)
1594 return 0;
1595
1596 ret = tracing_update_buffers();
1597 if (ret < 0)
1598 return ret;
1599
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001600 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001601
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001602 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1603 lockdep_is_held(&event_mutex));
1604
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001605 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
1606 if (ret < 0)
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001607 goto out;
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001608
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001609 rcu_assign_pointer(tr->filtered_pids, pid_list);
1610
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001611 list_for_each_entry(file, &tr->events, list) {
1612 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1613 }
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001614
1615 if (filtered_pids) {
1616 synchronize_sched();
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001617 trace_free_pid_list(filtered_pids);
1618 } else if (pid_list) {
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001619 /*
1620 * Register a probe that is called before all other probes
1621 * to set ignore_pid if next or prev do not match.
1622 * Register a probe this is called after all other probes
1623 * to only keep ignore_pid set if next pid matches.
1624 */
1625 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1626 tr, INT_MAX);
1627 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1628 tr, 0);
1629
1630 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1631 tr, INT_MAX);
1632 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1633 tr, 0);
Steven Rostedt (Red Hat)0f72e372015-12-01 16:08:05 -05001634
1635 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1636 tr, INT_MAX);
1637 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1638 tr, 0);
1639
1640 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1641 tr, INT_MAX);
1642 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1643 tr, 0);
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001644 }
1645
Steven Rostedt (Red Hat)799fd442015-11-02 13:08:26 -05001646 /*
1647 * Ignoring of pids is done at task switch. But we have to
1648 * check for those tasks that are currently running.
1649 * Always do this in case a pid was appended or removed.
1650 */
1651 on_each_cpu(ignore_task_cpu, tr, 1);
1652
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001653 out:
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001654 mutex_unlock(&event_mutex);
1655
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001656 if (ret > 0)
1657 *ppos += ret;
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001658
1659 return ret;
1660}
1661
Steven Rostedt15075ca2012-05-03 14:57:28 -04001662static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1663static int ftrace_event_set_open(struct inode *inode, struct file *file);
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001664static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001665static int ftrace_event_release(struct inode *inode, struct file *file);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001666
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001667static const struct seq_operations show_event_seq_ops = {
1668 .start = t_start,
1669 .next = t_next,
1670 .show = t_show,
1671 .stop = t_stop,
1672};
1673
1674static const struct seq_operations show_set_event_seq_ops = {
1675 .start = s_start,
1676 .next = s_next,
1677 .show = t_show,
1678 .stop = t_stop,
1679};
1680
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001681static const struct seq_operations show_set_pid_seq_ops = {
1682 .start = p_start,
1683 .next = p_next,
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -04001684 .show = trace_pid_show,
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001685 .stop = p_stop,
1686};
1687
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001688static const struct file_operations ftrace_avail_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001689 .open = ftrace_event_avail_open,
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001690 .read = seq_read,
1691 .llseek = seq_lseek,
1692 .release = seq_release,
1693};
1694
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001695static const struct file_operations ftrace_set_event_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001696 .open = ftrace_event_set_open,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001697 .read = seq_read,
1698 .write = ftrace_event_write,
1699 .llseek = seq_lseek,
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001700 .release = ftrace_event_release,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001701};
1702
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001703static const struct file_operations ftrace_set_event_pid_fops = {
1704 .open = ftrace_event_set_pid_open,
1705 .read = seq_read,
1706 .write = ftrace_event_pid_write,
1707 .llseek = seq_lseek,
1708 .release = ftrace_event_release,
1709};
1710
Steven Rostedt1473e442009-02-24 14:15:08 -05001711static const struct file_operations ftrace_enable_fops = {
Oleg Nesterovbf682c32013-07-28 20:35:27 +02001712 .open = tracing_open_generic,
Steven Rostedt1473e442009-02-24 14:15:08 -05001713 .read = event_enable_read,
1714 .write = event_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001715 .llseek = default_llseek,
Steven Rostedt1473e442009-02-24 14:15:08 -05001716};
1717
Steven Rostedt981d0812009-03-02 13:53:59 -05001718static const struct file_operations ftrace_event_format_fops = {
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001719 .open = trace_format_open,
1720 .read = seq_read,
1721 .llseek = seq_lseek,
1722 .release = seq_release,
Steven Rostedt981d0812009-03-02 13:53:59 -05001723};
1724
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001725static const struct file_operations ftrace_event_id_fops = {
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001726 .read = event_id_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001727 .llseek = default_llseek,
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001728};
1729
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001730static const struct file_operations ftrace_event_filter_fops = {
1731 .open = tracing_open_generic,
1732 .read = event_filter_read,
1733 .write = event_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001734 .llseek = default_llseek,
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001735};
1736
Tom Zanussicfb180f2009-03-22 03:31:17 -05001737static const struct file_operations ftrace_subsystem_filter_fops = {
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001738 .open = subsystem_open,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001739 .read = subsystem_filter_read,
1740 .write = subsystem_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001741 .llseek = default_llseek,
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001742 .release = subsystem_release,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001743};
1744
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001745static const struct file_operations ftrace_system_enable_fops = {
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001746 .open = subsystem_open,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001747 .read = system_enable_read,
1748 .write = system_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001749 .llseek = default_llseek,
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001750 .release = subsystem_release,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001751};
1752
Steven Rostedtae63b312012-05-03 23:09:03 -04001753static const struct file_operations ftrace_tr_enable_fops = {
1754 .open = system_tr_open,
1755 .read = system_enable_read,
1756 .write = system_enable_write,
1757 .llseek = default_llseek,
1758 .release = subsystem_release,
1759};
1760
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001761static const struct file_operations ftrace_show_header_fops = {
1762 .open = tracing_open_generic,
1763 .read = show_header,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001764 .llseek = default_llseek,
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001765};
1766
Steven Rostedtae63b312012-05-03 23:09:03 -04001767static int
1768ftrace_event_open(struct inode *inode, struct file *file,
1769 const struct seq_operations *seq_ops)
Steven Rostedt1473e442009-02-24 14:15:08 -05001770{
Steven Rostedtae63b312012-05-03 23:09:03 -04001771 struct seq_file *m;
1772 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001773
Steven Rostedtae63b312012-05-03 23:09:03 -04001774 ret = seq_open(file, seq_ops);
1775 if (ret < 0)
1776 return ret;
1777 m = file->private_data;
1778 /* copy tr over to seq ops */
1779 m->private = inode->i_private;
Steven Rostedt1473e442009-02-24 14:15:08 -05001780
Steven Rostedtae63b312012-05-03 23:09:03 -04001781 return ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001782}
1783
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001784static int ftrace_event_release(struct inode *inode, struct file *file)
1785{
1786 struct trace_array *tr = inode->i_private;
1787
1788 trace_array_put(tr);
1789
1790 return seq_release(inode, file);
1791}
1792
Steven Rostedt15075ca2012-05-03 14:57:28 -04001793static int
1794ftrace_event_avail_open(struct inode *inode, struct file *file)
1795{
1796 const struct seq_operations *seq_ops = &show_event_seq_ops;
1797
Steven Rostedtae63b312012-05-03 23:09:03 -04001798 return ftrace_event_open(inode, file, seq_ops);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001799}
1800
1801static int
1802ftrace_event_set_open(struct inode *inode, struct file *file)
1803{
1804 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
Steven Rostedtae63b312012-05-03 23:09:03 -04001805 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001806 int ret;
1807
1808 if (trace_array_get(tr) < 0)
1809 return -ENODEV;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001810
1811 if ((file->f_mode & FMODE_WRITE) &&
1812 (file->f_flags & O_TRUNC))
Steven Rostedtae63b312012-05-03 23:09:03 -04001813 ftrace_clear_events(tr);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001814
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001815 ret = ftrace_event_open(inode, file, seq_ops);
1816 if (ret < 0)
1817 trace_array_put(tr);
1818 return ret;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001819}
1820
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001821static int
1822ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1823{
1824 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1825 struct trace_array *tr = inode->i_private;
1826 int ret;
1827
1828 if (trace_array_get(tr) < 0)
1829 return -ENODEV;
1830
1831 if ((file->f_mode & FMODE_WRITE) &&
1832 (file->f_flags & O_TRUNC))
1833 ftrace_clear_event_pids(tr);
1834
1835 ret = ftrace_event_open(inode, file, seq_ops);
1836 if (ret < 0)
1837 trace_array_put(tr);
1838 return ret;
1839}
1840
Steven Rostedtae63b312012-05-03 23:09:03 -04001841static struct event_subsystem *
1842create_new_subsystem(const char *name)
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001843{
1844 struct event_subsystem *system;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001845
1846 /* need to create new entry */
1847 system = kmalloc(sizeof(*system), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001848 if (!system)
1849 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001850
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001851 system->ref_count = 1;
Steven Rostedt6e94a782013-06-27 10:58:31 -04001852
1853 /* Only allocate if dynamic (kprobes and modules) */
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +02001854 system->name = kstrdup_const(name, GFP_KERNEL);
1855 if (!system->name)
1856 goto out_free;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001857
Tom Zanussi30e673b2009-04-28 03:04:47 -05001858 system->filter = NULL;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001859
Tom Zanussi8b372562009-04-28 03:04:59 -05001860 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001861 if (!system->filter)
1862 goto out_free;
1863
1864 list_add(&system->list, &event_subsystems);
1865
1866 return system;
1867
1868 out_free:
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +02001869 kfree_const(system->name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001870 kfree(system);
1871 return NULL;
1872}
1873
1874static struct dentry *
1875event_subsystem_dir(struct trace_array *tr, const char *name,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001876 struct trace_event_file *file, struct dentry *parent)
Steven Rostedtae63b312012-05-03 23:09:03 -04001877{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001878 struct trace_subsystem_dir *dir;
Steven Rostedtae63b312012-05-03 23:09:03 -04001879 struct event_subsystem *system;
1880 struct dentry *entry;
1881
1882 /* First see if we did not already create this dir */
1883 list_for_each_entry(dir, &tr->systems, list) {
1884 system = dir->subsystem;
1885 if (strcmp(system->name, name) == 0) {
1886 dir->nr_events++;
1887 file->system = dir;
1888 return dir->entry;
1889 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001890 }
1891
Steven Rostedtae63b312012-05-03 23:09:03 -04001892 /* Now see if the system itself exists. */
1893 list_for_each_entry(system, &event_subsystems, list) {
1894 if (strcmp(system->name, name) == 0)
1895 break;
1896 }
1897 /* Reset system variable when not found */
1898 if (&system->list == &event_subsystems)
1899 system = NULL;
1900
1901 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1902 if (!dir)
1903 goto out_fail;
1904
1905 if (!system) {
1906 system = create_new_subsystem(name);
1907 if (!system)
1908 goto out_free;
1909 } else
1910 __get_system(system);
1911
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001912 dir->entry = tracefs_create_dir(name, parent);
Steven Rostedtae63b312012-05-03 23:09:03 -04001913 if (!dir->entry) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02001914 pr_warn("Failed to create system directory %s\n", name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001915 __put_system(system);
1916 goto out_free;
1917 }
1918
1919 dir->tr = tr;
1920 dir->ref_count = 1;
1921 dir->nr_events = 1;
1922 dir->subsystem = system;
1923 file->system = dir;
1924
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001925 entry = tracefs_create_file("filter", 0644, dir->entry, dir,
Tom Zanussie1112b42009-03-31 00:48:49 -05001926 &ftrace_subsystem_filter_fops);
Tom Zanussi8b372562009-04-28 03:04:59 -05001927 if (!entry) {
1928 kfree(system->filter);
1929 system->filter = NULL;
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001930 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
Tom Zanussi8b372562009-04-28 03:04:59 -05001931 }
Tom Zanussie1112b42009-03-31 00:48:49 -05001932
Steven Rostedtae63b312012-05-03 23:09:03 -04001933 trace_create_file("enable", 0644, dir->entry, dir,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001934 &ftrace_system_enable_fops);
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001935
Steven Rostedtae63b312012-05-03 23:09:03 -04001936 list_add(&dir->list, &tr->systems);
1937
1938 return dir->entry;
1939
1940 out_free:
1941 kfree(dir);
1942 out_fail:
1943 /* Only print this message if failed on memory allocation */
1944 if (!dir || !system)
Fabian Frederick3448bac2014-06-07 13:43:08 +02001945 pr_warn("No memory to create event subsystem %s\n", name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001946 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001947}
1948
Steven Rostedt1473e442009-02-24 14:15:08 -05001949static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001950event_create_dir(struct dentry *parent, struct trace_event_file *file)
Steven Rostedt1473e442009-02-24 14:15:08 -05001951{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001952 struct trace_event_call *call = file->event_call;
Steven Rostedtae63b312012-05-03 23:09:03 -04001953 struct trace_array *tr = file->tr;
Steven Rostedt2e33af02010-04-22 10:35:55 -04001954 struct list_head *head;
Steven Rostedtae63b312012-05-03 23:09:03 -04001955 struct dentry *d_events;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001956 const char *name;
Steven Rostedtfd994982009-02-28 02:41:25 -05001957 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001958
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001959 /*
1960 * If the trace point header did not define TRACE_SYSTEM
1961 * then the system would be called "TRACE_SYSTEM".
1962 */
Steven Rostedtae63b312012-05-03 23:09:03 -04001963 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1964 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1965 if (!d_events)
1966 return -ENOMEM;
1967 } else
1968 d_events = parent;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001969
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001970 name = trace_event_name(call);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001971 file->dir = tracefs_create_dir(name, d_events);
Steven Rostedtae63b312012-05-03 23:09:03 -04001972 if (!file->dir) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001973 pr_warn("Could not create tracefs '%s' directory\n", name);
Steven Rostedt1473e442009-02-24 14:15:08 -05001974 return -1;
1975 }
1976
Steven Rostedt9b637762012-05-10 15:55:43 -04001977 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
Steven Rostedtae63b312012-05-03 23:09:03 -04001978 trace_create_file("enable", 0644, file->dir, file,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001979 &ftrace_enable_fops);
Steven Rostedt1473e442009-02-24 14:15:08 -05001980
Steven Rostedt22392912010-04-21 12:27:06 -04001981#ifdef CONFIG_PERF_EVENTS
Steven Rostedta1d0ce82010-06-08 11:22:06 -04001982 if (call->event.type && call->class->reg)
Oleg Nesterov1a111262013-07-26 19:25:32 +02001983 trace_create_file("id", 0444, file->dir,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001984 (void *)(long)call->event.type,
1985 &ftrace_event_id_fops);
Steven Rostedt22392912010-04-21 12:27:06 -04001986#endif
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001987
Li Zefanc9d932c2010-05-24 16:24:28 +08001988 /*
1989 * Other events may have the same class. Only update
1990 * the fields if they are not already defined.
1991 */
1992 head = trace_get_fields(call);
1993 if (list_empty(head)) {
1994 ret = call->class->define_fields(call);
1995 if (ret < 0) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02001996 pr_warn("Could not initialize trace point events/%s\n",
1997 name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001998 return -1;
Tom Zanussicf027f62009-03-22 03:30:39 -05001999 }
2000 }
Tom Zanussif306cc82013-10-24 08:34:17 -05002001 trace_create_file("filter", 0644, file->dir, file,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002002 &ftrace_event_filter_fops);
Tom Zanussicf027f62009-03-22 03:30:39 -05002003
Chunyu Hu854145e2016-05-03 19:34:34 +08002004 /*
2005 * Only event directories that can be enabled should have
2006 * triggers.
2007 */
2008 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2009 trace_create_file("trigger", 0644, file->dir, file,
2010 &event_trigger_fops);
Tom Zanussi85f2b082013-10-24 08:59:24 -05002011
Tom Zanussi7ef224d2016-03-03 12:54:42 -06002012#ifdef CONFIG_HIST_TRIGGERS
2013 trace_create_file("hist", 0444, file->dir, file,
2014 &event_hist_fops);
2015#endif
Steven Rostedtae63b312012-05-03 23:09:03 -04002016 trace_create_file("format", 0444, file->dir, call,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002017 &ftrace_event_format_fops);
Steven Rostedtfd994982009-02-28 02:41:25 -05002018
Steven Rostedt1473e442009-02-24 14:15:08 -05002019 return 0;
2020}
2021
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002022static void remove_event_from_tracers(struct trace_event_call *call)
Steven Rostedtae63b312012-05-03 23:09:03 -04002023{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002024 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -04002025 struct trace_array *tr;
2026
2027 do_for_each_event_file_safe(tr, file) {
Steven Rostedtae63b312012-05-03 23:09:03 -04002028 if (file->event_call != call)
2029 continue;
2030
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +02002031 remove_event_file_dir(file);
Steven Rostedtae63b312012-05-03 23:09:03 -04002032 /*
2033 * The do_for_each_event_file_safe() is
2034 * a double loop. After finding the call for this
2035 * trace_array, we use break to jump to the next
2036 * trace_array.
2037 */
2038 break;
2039 } while_for_each_event_file();
2040}
2041
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002042static void event_remove(struct trace_event_call *call)
Ezequiel Garcia87819152012-09-12 11:47:57 -03002043{
Steven Rostedtae63b312012-05-03 23:09:03 -04002044 struct trace_array *tr;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002045 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -04002046
2047 do_for_each_event_file(tr, file) {
2048 if (file->event_call != call)
2049 continue;
2050 ftrace_event_enable_disable(file, 0);
2051 /*
2052 * The do_for_each_event_file() is
2053 * a double loop. After finding the call for this
2054 * trace_array, we use break to jump to the next
2055 * trace_array.
2056 */
2057 break;
2058 } while_for_each_event_file();
2059
Ezequiel Garcia87819152012-09-12 11:47:57 -03002060 if (call->event.funcs)
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04002061 __unregister_trace_event(&call->event);
Steven Rostedtae63b312012-05-03 23:09:03 -04002062 remove_event_from_tracers(call);
Ezequiel Garcia87819152012-09-12 11:47:57 -03002063 list_del(&call->list);
2064}
2065
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002066static int event_init(struct trace_event_call *call)
Ezequiel Garcia87819152012-09-12 11:47:57 -03002067{
2068 int ret = 0;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002069 const char *name;
Ezequiel Garcia87819152012-09-12 11:47:57 -03002070
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002071 name = trace_event_name(call);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002072 if (WARN_ON(!name))
Ezequiel Garcia87819152012-09-12 11:47:57 -03002073 return -EINVAL;
2074
2075 if (call->class->raw_init) {
2076 ret = call->class->raw_init(call);
2077 if (ret < 0 && ret != -ENOSYS)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002078 pr_warn("Could not initialize trace events/%s\n", name);
Ezequiel Garcia87819152012-09-12 11:47:57 -03002079 }
2080
2081 return ret;
2082}
2083
Li Zefan67ead0a2010-05-24 16:25:13 +08002084static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002085__register_event(struct trace_event_call *call, struct module *mod)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002086{
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002087 int ret;
Steven Rostedt6d723732009-04-10 14:53:50 -04002088
Ezequiel Garcia87819152012-09-12 11:47:57 -03002089 ret = event_init(call);
2090 if (ret < 0)
2091 return ret;
Steven Rostedt701970b2009-04-24 23:11:22 -04002092
Steven Rostedtae63b312012-05-03 23:09:03 -04002093 list_add(&call->list, &ftrace_events);
Li Zefan67ead0a2010-05-24 16:25:13 +08002094 call->mod = mod;
Masami Hiramatsu88f70d72009-09-25 11:20:54 -07002095
Steven Rostedtae63b312012-05-03 23:09:03 -04002096 return 0;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002097}
2098
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002099static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2100{
2101 int rlen;
2102 int elen;
2103
2104 /* Find the length of the enum value as a string */
2105 elen = snprintf(ptr, 0, "%ld", map->enum_value);
2106 /* Make sure there's enough room to replace the string with the value */
2107 if (len < elen)
2108 return NULL;
2109
2110 snprintf(ptr, elen + 1, "%ld", map->enum_value);
2111
2112 /* Get the rest of the string of ptr */
2113 rlen = strlen(ptr + len);
2114 memmove(ptr + elen, ptr + len, rlen);
2115 /* Make sure we end the new string */
2116 ptr[elen + rlen] = 0;
2117
2118 return ptr + elen;
2119}
2120
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002121static void update_event_printk(struct trace_event_call *call,
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002122 struct trace_enum_map *map)
2123{
2124 char *ptr;
2125 int quote = 0;
2126 int len = strlen(map->enum_string);
2127
2128 for (ptr = call->print_fmt; *ptr; ptr++) {
2129 if (*ptr == '\\') {
2130 ptr++;
2131 /* paranoid */
2132 if (!*ptr)
2133 break;
2134 continue;
2135 }
2136 if (*ptr == '"') {
2137 quote ^= 1;
2138 continue;
2139 }
2140 if (quote)
2141 continue;
2142 if (isdigit(*ptr)) {
2143 /* skip numbers */
2144 do {
2145 ptr++;
2146 /* Check for alpha chars like ULL */
2147 } while (isalnum(*ptr));
Steven Rostedt (Red Hat)31938992015-04-17 10:27:57 -04002148 if (!*ptr)
2149 break;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002150 /*
2151 * A number must have some kind of delimiter after
2152 * it, and we can ignore that too.
2153 */
2154 continue;
2155 }
2156 if (isalpha(*ptr) || *ptr == '_') {
2157 if (strncmp(map->enum_string, ptr, len) == 0 &&
2158 !isalnum(ptr[len]) && ptr[len] != '_') {
2159 ptr = enum_replace(ptr, map, len);
2160 /* Hmm, enum string smaller than value */
2161 if (WARN_ON_ONCE(!ptr))
2162 return;
2163 /*
2164 * No need to decrement here, as enum_replace()
2165 * returns the pointer to the character passed
2166 * the enum, and two enums can not be placed
2167 * back to back without something in between.
2168 * We can skip that something in between.
2169 */
2170 continue;
2171 }
2172 skip_more:
2173 do {
2174 ptr++;
2175 } while (isalnum(*ptr) || *ptr == '_');
Steven Rostedt (Red Hat)31938992015-04-17 10:27:57 -04002176 if (!*ptr)
2177 break;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002178 /*
2179 * If what comes after this variable is a '.' or
2180 * '->' then we can continue to ignore that string.
2181 */
2182 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2183 ptr += *ptr == '.' ? 1 : 2;
Steven Rostedt (Red Hat)31938992015-04-17 10:27:57 -04002184 if (!*ptr)
2185 break;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002186 goto skip_more;
2187 }
2188 /*
2189 * Once again, we can skip the delimiter that came
2190 * after the string.
2191 */
2192 continue;
2193 }
2194 }
2195}
2196
2197void trace_event_enum_update(struct trace_enum_map **map, int len)
2198{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002199 struct trace_event_call *call, *p;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002200 const char *last_system = NULL;
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002201 bool first = false;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002202 int last_i;
2203 int i;
2204
2205 down_write(&trace_event_sem);
2206 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2207 /* events are usually grouped together with systems */
2208 if (!last_system || call->class->system != last_system) {
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002209 first = true;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002210 last_i = 0;
2211 last_system = call->class->system;
2212 }
2213
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002214 /*
2215 * Since calls are grouped by systems, the likelyhood that the
2216 * next call in the iteration belongs to the same system as the
2217 * previous call is high. As an optimization, we skip seaching
2218 * for a map[] that matches the call's system if the last call
2219 * was from the same system. That's what last_i is for. If the
2220 * call has the same system as the previous call, then last_i
2221 * will be the index of the first map[] that has a matching
2222 * system.
2223 */
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002224 for (i = last_i; i < len; i++) {
2225 if (call->class->system == map[i]->system) {
2226 /* Save the first system if need be */
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002227 if (first) {
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002228 last_i = i;
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002229 first = false;
2230 }
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002231 update_event_printk(call, map[i]);
2232 }
2233 }
2234 }
2235 up_write(&trace_event_sem);
2236}
2237
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002238static struct trace_event_file *
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002239trace_create_new_event(struct trace_event_call *call,
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002240 struct trace_array *tr)
2241{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002242 struct trace_event_file *file;
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002243
2244 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2245 if (!file)
2246 return NULL;
2247
2248 file->event_call = call;
2249 file->tr = tr;
2250 atomic_set(&file->sm_ref, 0);
Tom Zanussi85f2b082013-10-24 08:59:24 -05002251 atomic_set(&file->tm_ref, 0);
2252 INIT_LIST_HEAD(&file->triggers);
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002253 list_add(&file->list, &tr->events);
2254
2255 return file;
2256}
2257
Steven Rostedtae63b312012-05-03 23:09:03 -04002258/* Add an event to a trace directory */
2259static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002260__trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
Steven Rostedtae63b312012-05-03 23:09:03 -04002261{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002262 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -04002263
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002264 file = trace_create_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002265 if (!file)
2266 return -ENOMEM;
2267
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002268 return event_create_dir(tr->event_dir, file);
Steven Rostedtae63b312012-05-03 23:09:03 -04002269}
2270
Steven Rostedt77248222013-02-27 16:28:06 -05002271/*
2272 * Just create a decriptor for early init. A descriptor is required
2273 * for enabling events at boot. We want to enable events before
2274 * the filesystem is initialized.
2275 */
2276static __init int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002277__trace_early_add_new_event(struct trace_event_call *call,
Steven Rostedt77248222013-02-27 16:28:06 -05002278 struct trace_array *tr)
2279{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002280 struct trace_event_file *file;
Steven Rostedt77248222013-02-27 16:28:06 -05002281
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002282 file = trace_create_new_event(call, tr);
Steven Rostedt77248222013-02-27 16:28:06 -05002283 if (!file)
2284 return -ENOMEM;
2285
Steven Rostedt77248222013-02-27 16:28:06 -05002286 return 0;
2287}
2288
Steven Rostedtae63b312012-05-03 23:09:03 -04002289struct ftrace_module_file_ops;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002290static void __add_event_to_tracers(struct trace_event_call *call);
Steven Rostedtae63b312012-05-03 23:09:03 -04002291
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002292/* Add an additional event_call dynamically */
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002293int trace_add_event_call(struct trace_event_call *call)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002294{
2295 int ret;
Alexander Z Lama8227412013-07-01 19:37:54 -07002296 mutex_lock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002297 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04002298
2299 ret = __register_event(call, NULL);
2300 if (ret >= 0)
Oleg Nesterov779c5e32013-07-31 19:31:32 +02002301 __add_event_to_tracers(call);
Steven Rostedtae63b312012-05-03 23:09:03 -04002302
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002303 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07002304 mutex_unlock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002305 return ret;
2306}
Steven Rostedt701970b2009-04-24 23:11:22 -04002307
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04002308/*
Alexander Z Lama8227412013-07-01 19:37:54 -07002309 * Must be called under locking of trace_types_lock, event_mutex and
2310 * trace_event_sem.
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04002311 */
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002312static void __trace_remove_event_call(struct trace_event_call *call)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002313{
Ezequiel Garcia87819152012-09-12 11:47:57 -03002314 event_remove(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002315 trace_destroy_fields(call);
Oleg Nesterov57375742014-07-15 20:48:16 +02002316 free_event_filter(call->filter);
2317 call->filter = NULL;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002318}
2319
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002320static int probe_remove_event_call(struct trace_event_call *call)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002321{
Oleg Nesterov2816c552013-07-29 19:50:33 +02002322 struct trace_array *tr;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002323 struct trace_event_file *file;
Oleg Nesterov2816c552013-07-29 19:50:33 +02002324
2325#ifdef CONFIG_PERF_EVENTS
2326 if (call->perf_refcount)
2327 return -EBUSY;
2328#endif
2329 do_for_each_event_file(tr, file) {
2330 if (file->event_call != call)
2331 continue;
2332 /*
2333 * We can't rely on ftrace_event_enable_disable(enable => 0)
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002334 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
Oleg Nesterov2816c552013-07-29 19:50:33 +02002335 * TRACE_REG_UNREGISTER.
2336 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002337 if (file->flags & EVENT_FILE_FL_ENABLED)
Oleg Nesterov2816c552013-07-29 19:50:33 +02002338 return -EBUSY;
Steven Rostedt (Red Hat)2ba64032013-07-31 13:16:22 -04002339 /*
2340 * The do_for_each_event_file_safe() is
2341 * a double loop. After finding the call for this
2342 * trace_array, we use break to jump to the next
2343 * trace_array.
2344 */
Oleg Nesterov2816c552013-07-29 19:50:33 +02002345 break;
2346 } while_for_each_event_file();
2347
2348 __trace_remove_event_call(call);
2349
2350 return 0;
2351}
2352
2353/* Remove an event_call */
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002354int trace_remove_event_call(struct trace_event_call *call)
Oleg Nesterov2816c552013-07-29 19:50:33 +02002355{
2356 int ret;
2357
Alexander Z Lama8227412013-07-01 19:37:54 -07002358 mutex_lock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002359 mutex_lock(&event_mutex);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002360 down_write(&trace_event_sem);
Oleg Nesterov2816c552013-07-29 19:50:33 +02002361 ret = probe_remove_event_call(call);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002362 up_write(&trace_event_sem);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002363 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07002364 mutex_unlock(&trace_types_lock);
Oleg Nesterov2816c552013-07-29 19:50:33 +02002365
2366 return ret;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002367}
2368
2369#define for_each_event(event, start, end) \
2370 for (event = start; \
2371 (unsigned long)event < (unsigned long)end; \
2372 event++)
2373
2374#ifdef CONFIG_MODULES
2375
Steven Rostedt6d723732009-04-10 14:53:50 -04002376static void trace_module_add_events(struct module *mod)
2377{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002378 struct trace_event_call **call, **start, **end;
Steven Rostedt6d723732009-04-10 14:53:50 -04002379
Steven Rostedt (Red Hat)45ab28132014-02-26 13:37:38 -05002380 if (!mod->num_trace_events)
2381 return;
2382
2383 /* Don't add infrastructure for mods without tracepoints */
2384 if (trace_module_has_bad_taint(mod)) {
2385 pr_err("%s: module has bad taint, not creating trace events\n",
2386 mod->name);
2387 return;
2388 }
2389
Steven Rostedt6d723732009-04-10 14:53:50 -04002390 start = mod->trace_events;
2391 end = mod->trace_events + mod->num_trace_events;
2392
Steven Rostedt6d723732009-04-10 14:53:50 -04002393 for_each_event(call, start, end) {
Steven Rostedtae63b312012-05-03 23:09:03 -04002394 __register_event(*call, mod);
Oleg Nesterov779c5e32013-07-31 19:31:32 +02002395 __add_event_to_tracers(*call);
Steven Rostedt6d723732009-04-10 14:53:50 -04002396 }
2397}
2398
2399static void trace_module_remove_events(struct module *mod)
2400{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002401 struct trace_event_call *call, *p;
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05002402 bool clear_trace = false;
Steven Rostedt6d723732009-04-10 14:53:50 -04002403
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002404 down_write(&trace_event_sem);
Steven Rostedt6d723732009-04-10 14:53:50 -04002405 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2406 if (call->mod == mod) {
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05002407 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2408 clear_trace = true;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002409 __trace_remove_event_call(call);
Steven Rostedt6d723732009-04-10 14:53:50 -04002410 }
2411 }
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002412 up_write(&trace_event_sem);
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002413
2414 /*
2415 * It is safest to reset the ring buffer if the module being unloaded
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002416 * registered any events that were used. The only worry is if
2417 * a new module gets loaded, and takes on the same id as the events
2418 * of this module. When printing out the buffer, traced events left
2419 * over from this module may be passed to the new module events and
2420 * unexpected results may occur.
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002421 */
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05002422 if (clear_trace)
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002423 tracing_reset_all_online_cpus();
Steven Rostedt6d723732009-04-10 14:53:50 -04002424}
2425
Steven Rostedt61f919a2009-04-14 18:22:32 -04002426static int trace_module_notify(struct notifier_block *self,
2427 unsigned long val, void *data)
Steven Rostedt6d723732009-04-10 14:53:50 -04002428{
2429 struct module *mod = data;
2430
Alexander Z Lama8227412013-07-01 19:37:54 -07002431 mutex_lock(&trace_types_lock);
Steven Rostedt6d723732009-04-10 14:53:50 -04002432 mutex_lock(&event_mutex);
2433 switch (val) {
2434 case MODULE_STATE_COMING:
2435 trace_module_add_events(mod);
2436 break;
2437 case MODULE_STATE_GOING:
2438 trace_module_remove_events(mod);
2439 break;
2440 }
2441 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07002442 mutex_unlock(&trace_types_lock);
Steven Rostedt6d723732009-04-10 14:53:50 -04002443
2444 return 0;
2445}
Steven Rostedt (Red Hat)315326c2013-03-02 17:37:14 -05002446
Oleg Nesterov836d4812013-07-31 19:31:37 +02002447static struct notifier_block trace_module_nb = {
2448 .notifier_call = trace_module_notify,
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04002449 .priority = 1, /* higher than trace.c module notify */
Oleg Nesterov836d4812013-07-31 19:31:37 +02002450};
Steven Rostedt61f919a2009-04-14 18:22:32 -04002451#endif /* CONFIG_MODULES */
Steven Rostedt6d723732009-04-10 14:53:50 -04002452
Steven Rostedtae63b312012-05-03 23:09:03 -04002453/* Create a new event directory structure for a trace directory. */
2454static void
2455__trace_add_event_dirs(struct trace_array *tr)
2456{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002457 struct trace_event_call *call;
Steven Rostedtae63b312012-05-03 23:09:03 -04002458 int ret;
2459
2460 list_for_each_entry(call, &ftrace_events, list) {
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002461 ret = __trace_add_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002462 if (ret < 0)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002463 pr_warn("Could not create directory for event %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002464 trace_event_name(call));
Steven Rostedtae63b312012-05-03 23:09:03 -04002465 }
2466}
2467
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002468struct trace_event_file *
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002469find_event_file(struct trace_array *tr, const char *system, const char *event)
2470{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002471 struct trace_event_file *file;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002472 struct trace_event_call *call;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002473 const char *name;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002474
2475 list_for_each_entry(file, &tr->events, list) {
2476
2477 call = file->event_call;
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002478 name = trace_event_name(call);
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002479
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002480 if (!name || !call->class || !call->class->reg)
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002481 continue;
2482
2483 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2484 continue;
2485
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002486 if (strcmp(event, name) == 0 &&
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002487 strcmp(system, call->class->system) == 0)
2488 return file;
2489 }
2490 return NULL;
2491}
2492
Steven Rostedt (Red Hat)2875a082013-12-20 23:23:05 -05002493#ifdef CONFIG_DYNAMIC_FTRACE
2494
2495/* Avoid typos */
2496#define ENABLE_EVENT_STR "enable_event"
2497#define DISABLE_EVENT_STR "disable_event"
2498
2499struct event_probe_data {
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002500 struct trace_event_file *file;
Steven Rostedt (Red Hat)2875a082013-12-20 23:23:05 -05002501 unsigned long count;
2502 int ref;
2503 bool enable;
2504};
2505
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002506static void
2507event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2508{
2509 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2510 struct event_probe_data *data = *pdata;
2511
2512 if (!data)
2513 return;
2514
2515 if (data->enable)
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002516 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002517 else
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002518 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002519}
2520
2521static void
2522event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2523{
2524 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2525 struct event_probe_data *data = *pdata;
2526
2527 if (!data)
2528 return;
2529
2530 if (!data->count)
2531 return;
2532
2533 /* Skip if the event is in a state we want to switch to */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002534 if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002535 return;
2536
2537 if (data->count != -1)
2538 (data->count)--;
2539
2540 event_enable_probe(ip, parent_ip, _data);
2541}
2542
2543static int
2544event_enable_print(struct seq_file *m, unsigned long ip,
2545 struct ftrace_probe_ops *ops, void *_data)
2546{
2547 struct event_probe_data *data = _data;
2548
2549 seq_printf(m, "%ps:", (void *)ip);
2550
2551 seq_printf(m, "%s:%s:%s",
2552 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2553 data->file->event_call->class->system,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002554 trace_event_name(data->file->event_call));
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002555
2556 if (data->count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002557 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002558 else
2559 seq_printf(m, ":count=%ld\n", data->count);
2560
2561 return 0;
2562}
2563
2564static int
2565event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2566 void **_data)
2567{
2568 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2569 struct event_probe_data *data = *pdata;
2570
2571 data->ref++;
2572 return 0;
2573}
2574
2575static void
2576event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2577 void **_data)
2578{
2579 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2580 struct event_probe_data *data = *pdata;
2581
2582 if (WARN_ON_ONCE(data->ref <= 0))
2583 return;
2584
2585 data->ref--;
2586 if (!data->ref) {
2587 /* Remove the SOFT_MODE flag */
2588 __ftrace_event_enable_disable(data->file, 0, 1);
2589 module_put(data->file->event_call->mod);
2590 kfree(data);
2591 }
2592 *pdata = NULL;
2593}
2594
2595static struct ftrace_probe_ops event_enable_probe_ops = {
2596 .func = event_enable_probe,
2597 .print = event_enable_print,
2598 .init = event_enable_init,
2599 .free = event_enable_free,
2600};
2601
2602static struct ftrace_probe_ops event_enable_count_probe_ops = {
2603 .func = event_enable_count_probe,
2604 .print = event_enable_print,
2605 .init = event_enable_init,
2606 .free = event_enable_free,
2607};
2608
2609static struct ftrace_probe_ops event_disable_probe_ops = {
2610 .func = event_enable_probe,
2611 .print = event_enable_print,
2612 .init = event_enable_init,
2613 .free = event_enable_free,
2614};
2615
2616static struct ftrace_probe_ops event_disable_count_probe_ops = {
2617 .func = event_enable_count_probe,
2618 .print = event_enable_print,
2619 .init = event_enable_init,
2620 .free = event_enable_free,
2621};
2622
2623static int
2624event_enable_func(struct ftrace_hash *hash,
2625 char *glob, char *cmd, char *param, int enabled)
2626{
2627 struct trace_array *tr = top_trace_array();
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002628 struct trace_event_file *file;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002629 struct ftrace_probe_ops *ops;
2630 struct event_probe_data *data;
2631 const char *system;
2632 const char *event;
2633 char *number;
2634 bool enable;
2635 int ret;
2636
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09002637 if (!tr)
2638 return -ENODEV;
2639
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002640 /* hash funcs only work with set_ftrace_filter */
Harsh Prateek Bora8092e802013-05-24 12:52:17 +05302641 if (!enabled || !param)
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002642 return -EINVAL;
2643
2644 system = strsep(&param, ":");
2645 if (!param)
2646 return -EINVAL;
2647
2648 event = strsep(&param, ":");
2649
2650 mutex_lock(&event_mutex);
2651
2652 ret = -EINVAL;
2653 file = find_event_file(tr, system, event);
2654 if (!file)
2655 goto out;
2656
2657 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2658
2659 if (enable)
2660 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2661 else
2662 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2663
2664 if (glob[0] == '!') {
2665 unregister_ftrace_function_probe_func(glob+1, ops);
2666 ret = 0;
2667 goto out;
2668 }
2669
2670 ret = -ENOMEM;
2671 data = kzalloc(sizeof(*data), GFP_KERNEL);
2672 if (!data)
2673 goto out;
2674
2675 data->enable = enable;
2676 data->count = -1;
2677 data->file = file;
2678
2679 if (!param)
2680 goto out_reg;
2681
2682 number = strsep(&param, ":");
2683
2684 ret = -EINVAL;
2685 if (!strlen(number))
2686 goto out_free;
2687
2688 /*
2689 * We use the callback data field (which is a pointer)
2690 * as our counter.
2691 */
2692 ret = kstrtoul(number, 0, &data->count);
2693 if (ret)
2694 goto out_free;
2695
2696 out_reg:
2697 /* Don't let event modules unload while probe registered */
2698 ret = try_module_get(file->event_call->mod);
Masami Hiramatsu6ed01062013-05-16 20:48:49 +09002699 if (!ret) {
2700 ret = -EBUSY;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002701 goto out_free;
Masami Hiramatsu6ed01062013-05-16 20:48:49 +09002702 }
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002703
2704 ret = __ftrace_event_enable_disable(file, 1, 1);
2705 if (ret < 0)
2706 goto out_put;
2707 ret = register_ftrace_function_probe(glob, ops, data);
Steven Rostedt (Red Hat)ff305de2013-05-09 11:30:26 -04002708 /*
2709 * The above returns on success the # of functions enabled,
2710 * but if it didn't find any functions it returns zero.
2711 * Consider no functions a failure too.
2712 */
Masami Hiramatsua5b85bd2013-05-09 14:44:14 +09002713 if (!ret) {
2714 ret = -ENOENT;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002715 goto out_disable;
Steven Rostedt (Red Hat)ff305de2013-05-09 11:30:26 -04002716 } else if (ret < 0)
2717 goto out_disable;
2718 /* Just return zero, not the number of enabled functions */
2719 ret = 0;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002720 out:
2721 mutex_unlock(&event_mutex);
2722 return ret;
2723
2724 out_disable:
2725 __ftrace_event_enable_disable(file, 0, 1);
2726 out_put:
2727 module_put(file->event_call->mod);
2728 out_free:
2729 kfree(data);
2730 goto out;
2731}
2732
2733static struct ftrace_func_command event_enable_cmd = {
2734 .name = ENABLE_EVENT_STR,
2735 .func = event_enable_func,
2736};
2737
2738static struct ftrace_func_command event_disable_cmd = {
2739 .name = DISABLE_EVENT_STR,
2740 .func = event_enable_func,
2741};
2742
2743static __init int register_event_cmds(void)
2744{
2745 int ret;
2746
2747 ret = register_ftrace_command(&event_enable_cmd);
2748 if (WARN_ON(ret < 0))
2749 return ret;
2750 ret = register_ftrace_command(&event_disable_cmd);
2751 if (WARN_ON(ret < 0))
2752 unregister_ftrace_command(&event_enable_cmd);
2753 return ret;
2754}
2755#else
2756static inline int register_event_cmds(void) { return 0; }
2757#endif /* CONFIG_DYNAMIC_FTRACE */
2758
Steven Rostedt77248222013-02-27 16:28:06 -05002759/*
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002760 * The top level array has already had its trace_event_file
Steven Rostedt77248222013-02-27 16:28:06 -05002761 * descriptors created in order to allow for early events to
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002762 * be recorded. This function is called after the tracefs has been
Steven Rostedt77248222013-02-27 16:28:06 -05002763 * initialized, and we now have to create the files associated
2764 * to the events.
2765 */
2766static __init void
2767__trace_early_add_event_dirs(struct trace_array *tr)
2768{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002769 struct trace_event_file *file;
Steven Rostedt77248222013-02-27 16:28:06 -05002770 int ret;
2771
2772
2773 list_for_each_entry(file, &tr->events, list) {
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002774 ret = event_create_dir(tr->event_dir, file);
Steven Rostedt77248222013-02-27 16:28:06 -05002775 if (ret < 0)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002776 pr_warn("Could not create directory for event %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002777 trace_event_name(file->event_call));
Steven Rostedt77248222013-02-27 16:28:06 -05002778 }
2779}
2780
2781/*
2782 * For early boot up, the top trace array requires to have
2783 * a list of events that can be enabled. This must be done before
2784 * the filesystem is set up in order to allow events to be traced
2785 * early.
2786 */
2787static __init void
2788__trace_early_add_events(struct trace_array *tr)
2789{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002790 struct trace_event_call *call;
Steven Rostedt77248222013-02-27 16:28:06 -05002791 int ret;
2792
2793 list_for_each_entry(call, &ftrace_events, list) {
2794 /* Early boot up should not have any modules loaded */
2795 if (WARN_ON_ONCE(call->mod))
2796 continue;
2797
2798 ret = __trace_early_add_new_event(call, tr);
2799 if (ret < 0)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002800 pr_warn("Could not create early event %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002801 trace_event_name(call));
Steven Rostedt77248222013-02-27 16:28:06 -05002802 }
2803}
2804
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002805/* Remove the event directory structure for a trace directory. */
2806static void
2807__trace_remove_event_dirs(struct trace_array *tr)
2808{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002809 struct trace_event_file *file, *next;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002810
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +02002811 list_for_each_entry_safe(file, next, &tr->events, list)
2812 remove_event_file_dir(file);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002813}
2814
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002815static void __add_event_to_tracers(struct trace_event_call *call)
Steven Rostedtae63b312012-05-03 23:09:03 -04002816{
2817 struct trace_array *tr;
2818
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002819 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2820 __trace_add_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002821}
2822
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002823extern struct trace_event_call *__start_ftrace_events[];
2824extern struct trace_event_call *__stop_ftrace_events[];
Steven Rostedta59fd602009-04-10 13:52:20 -04002825
Li Zefan020e5f82009-07-01 10:47:05 +08002826static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2827
2828static __init int setup_trace_event(char *str)
2829{
2830 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05002831 ring_buffer_expanded = true;
2832 tracing_selftest_disabled = true;
Li Zefan020e5f82009-07-01 10:47:05 +08002833
2834 return 1;
2835}
2836__setup("trace_event=", setup_trace_event);
2837
Steven Rostedt77248222013-02-27 16:28:06 -05002838/* Expects to have event_mutex held when called */
2839static int
2840create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
Steven Rostedtae63b312012-05-03 23:09:03 -04002841{
2842 struct dentry *d_events;
2843 struct dentry *entry;
2844
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002845 entry = tracefs_create_file("set_event", 0644, parent,
Steven Rostedtae63b312012-05-03 23:09:03 -04002846 tr, &ftrace_set_event_fops);
2847 if (!entry) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002848 pr_warn("Could not create tracefs 'set_event' entry\n");
Steven Rostedtae63b312012-05-03 23:09:03 -04002849 return -ENOMEM;
2850 }
2851
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002852 d_events = tracefs_create_dir("events", parent);
Steven Rostedt277ba042012-08-03 16:10:49 -04002853 if (!d_events) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002854 pr_warn("Could not create tracefs 'events' directory\n");
Steven Rostedt277ba042012-08-03 16:10:49 -04002855 return -ENOMEM;
2856 }
Steven Rostedtae63b312012-05-03 23:09:03 -04002857
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04002858 entry = tracefs_create_file("set_event_pid", 0644, parent,
2859 tr, &ftrace_set_event_pid_fops);
2860
Steven Rostedtae63b312012-05-03 23:09:03 -04002861 /* ring buffer internal formats */
2862 trace_create_file("header_page", 0444, d_events,
2863 ring_buffer_print_page_header,
2864 &ftrace_show_header_fops);
2865
2866 trace_create_file("header_event", 0444, d_events,
2867 ring_buffer_print_entry_header,
2868 &ftrace_show_header_fops);
2869
2870 trace_create_file("enable", 0644, d_events,
2871 tr, &ftrace_tr_enable_fops);
2872
2873 tr->event_dir = d_events;
Steven Rostedt77248222013-02-27 16:28:06 -05002874
2875 return 0;
2876}
2877
2878/**
2879 * event_trace_add_tracer - add a instance of a trace_array to events
2880 * @parent: The parent dentry to place the files/directories for events in
2881 * @tr: The trace array associated with these events
2882 *
2883 * When a new instance is created, it needs to set up its events
2884 * directory, as well as other files associated with events. It also
2885 * creates the event hierachry in the @parent/events directory.
2886 *
2887 * Returns 0 on success.
2888 */
2889int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2890{
2891 int ret;
2892
2893 mutex_lock(&event_mutex);
2894
2895 ret = create_event_toplevel_files(parent, tr);
2896 if (ret)
2897 goto out_unlock;
2898
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002899 down_write(&trace_event_sem);
Steven Rostedtae63b312012-05-03 23:09:03 -04002900 __trace_add_event_dirs(tr);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002901 up_write(&trace_event_sem);
Steven Rostedt277ba042012-08-03 16:10:49 -04002902
Steven Rostedt77248222013-02-27 16:28:06 -05002903 out_unlock:
Steven Rostedt277ba042012-08-03 16:10:49 -04002904 mutex_unlock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04002905
Steven Rostedt77248222013-02-27 16:28:06 -05002906 return ret;
2907}
2908
2909/*
2910 * The top trace array already had its file descriptors created.
2911 * Now the files themselves need to be created.
2912 */
2913static __init int
2914early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2915{
2916 int ret;
2917
2918 mutex_lock(&event_mutex);
2919
2920 ret = create_event_toplevel_files(parent, tr);
2921 if (ret)
2922 goto out_unlock;
2923
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002924 down_write(&trace_event_sem);
Steven Rostedt77248222013-02-27 16:28:06 -05002925 __trace_early_add_event_dirs(tr);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002926 up_write(&trace_event_sem);
Steven Rostedt77248222013-02-27 16:28:06 -05002927
2928 out_unlock:
2929 mutex_unlock(&event_mutex);
2930
2931 return ret;
Steven Rostedtae63b312012-05-03 23:09:03 -04002932}
2933
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002934int event_trace_del_tracer(struct trace_array *tr)
2935{
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002936 mutex_lock(&event_mutex);
2937
Tom Zanussi85f2b082013-10-24 08:59:24 -05002938 /* Disable any event triggers and associated soft-disabled events */
2939 clear_event_triggers(tr);
2940
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04002941 /* Clear the pid list */
2942 __ftrace_clear_event_pids(tr);
2943
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -04002944 /* Disable any running events */
2945 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2946
Steven Rostedt3ccb0122013-12-03 12:41:20 -05002947 /* Access to events are within rcu_read_lock_sched() */
2948 synchronize_sched();
2949
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002950 down_write(&trace_event_sem);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002951 __trace_remove_event_dirs(tr);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002952 tracefs_remove_recursive(tr->event_dir);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002953 up_write(&trace_event_sem);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002954
2955 tr->event_dir = NULL;
2956
2957 mutex_unlock(&event_mutex);
2958
2959 return 0;
2960}
2961
Steven Rostedtd1a29142013-02-27 20:23:57 -05002962static __init int event_trace_memsetup(void)
2963{
2964 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002965 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
Steven Rostedtd1a29142013-02-27 20:23:57 -05002966 return 0;
2967}
2968
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05002969static __init void
2970early_enable_events(struct trace_array *tr, bool disable_first)
2971{
2972 char *buf = bootup_event_buf;
2973 char *token;
2974 int ret;
2975
2976 while (true) {
2977 token = strsep(&buf, ",");
2978
2979 if (!token)
2980 break;
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05002981
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05002982 if (*token) {
2983 /* Restarting syscalls requires that we stop them first */
2984 if (disable_first)
2985 ftrace_set_clr_event(tr, token, 0);
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05002986
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05002987 ret = ftrace_set_clr_event(tr, token, 1);
2988 if (ret)
2989 pr_warn("Failed to enable trace event: %s\n", token);
2990 }
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05002991
2992 /* Put back the comma to allow this to be called again */
2993 if (buf)
2994 *(buf - 1) = ',';
2995 }
2996}
2997
Ezequiel Garcia87819152012-09-12 11:47:57 -03002998static __init int event_trace_enable(void)
2999{
Steven Rostedtae63b312012-05-03 23:09:03 -04003000 struct trace_array *tr = top_trace_array();
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003001 struct trace_event_call **iter, *call;
Ezequiel Garcia87819152012-09-12 11:47:57 -03003002 int ret;
3003
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09003004 if (!tr)
3005 return -ENODEV;
3006
Ezequiel Garcia87819152012-09-12 11:47:57 -03003007 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3008
3009 call = *iter;
3010 ret = event_init(call);
3011 if (!ret)
3012 list_add(&call->list, &ftrace_events);
3013 }
3014
Steven Rostedt77248222013-02-27 16:28:06 -05003015 /*
3016 * We need the top trace array to have a working set of trace
3017 * points at early init, before the debug files and directories
3018 * are created. Create the file entries now, and attach them
3019 * to the actual file dentries later.
3020 */
3021 __trace_early_add_events(tr);
3022
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05003023 early_enable_events(tr, false);
Steven Rostedt81698832012-10-11 10:15:05 -04003024
3025 trace_printk_start_comm();
3026
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04003027 register_event_cmds();
3028
Tom Zanussi85f2b082013-10-24 08:59:24 -05003029 register_trigger_cmds();
3030
Ezequiel Garcia87819152012-09-12 11:47:57 -03003031 return 0;
3032}
3033
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05003034/*
3035 * event_trace_enable() is called from trace_event_init() first to
3036 * initialize events and perhaps start any events that are on the
3037 * command line. Unfortunately, there are some events that will not
3038 * start this early, like the system call tracepoints that need
3039 * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3040 * is called before pid 1 starts, and this flag is never set, making
3041 * the syscall tracepoint never get reached, but the event is enabled
3042 * regardless (and not doing anything).
3043 */
3044static __init int event_trace_enable_again(void)
3045{
3046 struct trace_array *tr;
3047
3048 tr = top_trace_array();
3049 if (!tr)
3050 return -ENODEV;
3051
3052 early_enable_events(tr, true);
3053
3054 return 0;
3055}
3056
3057early_initcall(event_trace_enable_again);
3058
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003059static __init int event_trace_init(void)
3060{
Steven Rostedtae63b312012-05-03 23:09:03 -04003061 struct trace_array *tr;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003062 struct dentry *d_tracer;
3063 struct dentry *entry;
Steven Rostedt6d723732009-04-10 14:53:50 -04003064 int ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003065
Steven Rostedtae63b312012-05-03 23:09:03 -04003066 tr = top_trace_array();
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09003067 if (!tr)
3068 return -ENODEV;
Steven Rostedtae63b312012-05-03 23:09:03 -04003069
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003070 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05003071 if (IS_ERR(d_tracer))
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003072 return 0;
3073
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05003074 entry = tracefs_create_file("available_events", 0444, d_tracer,
Steven Rostedtae63b312012-05-03 23:09:03 -04003075 tr, &ftrace_avail_fops);
Steven Rostedt2314c4a2009-03-10 12:04:02 -04003076 if (!entry)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05003077 pr_warn("Could not create tracefs 'available_events' entry\n");
Steven Rostedt2314c4a2009-03-10 12:04:02 -04003078
Daniel Wagner9f616682015-08-10 14:35:46 +02003079 if (trace_define_generic_fields())
3080 pr_warn("tracing: Failed to allocated generic fields");
3081
Li Zefan8728fe52010-05-24 16:22:49 +08003082 if (trace_define_common_fields())
Fabian Frederick3448bac2014-06-07 13:43:08 +02003083 pr_warn("tracing: Failed to allocate common fields");
Li Zefan8728fe52010-05-24 16:22:49 +08003084
Steven Rostedt77248222013-02-27 16:28:06 -05003085 ret = early_event_add_tracer(d_tracer, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04003086 if (ret)
3087 return ret;
Li Zefan020e5f82009-07-01 10:47:05 +08003088
Oleg Nesterov836d4812013-07-31 19:31:37 +02003089#ifdef CONFIG_MODULES
Steven Rostedt6d723732009-04-10 14:53:50 -04003090 ret = register_module_notifier(&trace_module_nb);
Ming Lei55379372009-05-18 23:04:46 +08003091 if (ret)
Fabian Frederick3448bac2014-06-07 13:43:08 +02003092 pr_warn("Failed to register trace events module notifier\n");
Oleg Nesterov836d4812013-07-31 19:31:37 +02003093#endif
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003094 return 0;
3095}
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05003096
3097void __init trace_event_init(void)
3098{
3099 event_trace_memsetup();
3100 init_ftrace_syscalls();
3101 event_trace_enable();
3102}
3103
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003104fs_initcall(event_trace_init);
Steven Rostedte6187002009-04-15 13:36:40 -04003105
3106#ifdef CONFIG_FTRACE_STARTUP_TEST
3107
3108static DEFINE_SPINLOCK(test_spinlock);
3109static DEFINE_SPINLOCK(test_spinlock_irq);
3110static DEFINE_MUTEX(test_mutex);
3111
3112static __init void test_work(struct work_struct *dummy)
3113{
3114 spin_lock(&test_spinlock);
3115 spin_lock_irq(&test_spinlock_irq);
3116 udelay(1);
3117 spin_unlock_irq(&test_spinlock_irq);
3118 spin_unlock(&test_spinlock);
3119
3120 mutex_lock(&test_mutex);
3121 msleep(1);
3122 mutex_unlock(&test_mutex);
3123}
3124
3125static __init int event_test_thread(void *unused)
3126{
3127 void *test_malloc;
3128
3129 test_malloc = kmalloc(1234, GFP_KERNEL);
3130 if (!test_malloc)
3131 pr_info("failed to kmalloc\n");
3132
3133 schedule_on_each_cpu(test_work);
3134
3135 kfree(test_malloc);
3136
3137 set_current_state(TASK_INTERRUPTIBLE);
Peter Zijlstrafe0e01c2014-10-08 18:51:10 +02003138 while (!kthread_should_stop()) {
Steven Rostedte6187002009-04-15 13:36:40 -04003139 schedule();
Peter Zijlstrafe0e01c2014-10-08 18:51:10 +02003140 set_current_state(TASK_INTERRUPTIBLE);
3141 }
3142 __set_current_state(TASK_RUNNING);
Steven Rostedte6187002009-04-15 13:36:40 -04003143
3144 return 0;
3145}
3146
3147/*
3148 * Do various things that may trigger events.
3149 */
3150static __init void event_test_stuff(void)
3151{
3152 struct task_struct *test_thread;
3153
3154 test_thread = kthread_run(event_test_thread, NULL, "test-events");
3155 msleep(1);
3156 kthread_stop(test_thread);
3157}
3158
3159/*
3160 * For every trace event defined, we will test each trace point separately,
3161 * and then by groups, and finally all trace points.
3162 */
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003163static __init void event_trace_self_tests(void)
Steven Rostedte6187002009-04-15 13:36:40 -04003164{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04003165 struct trace_subsystem_dir *dir;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04003166 struct trace_event_file *file;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003167 struct trace_event_call *call;
Steven Rostedte6187002009-04-15 13:36:40 -04003168 struct event_subsystem *system;
Steven Rostedtae63b312012-05-03 23:09:03 -04003169 struct trace_array *tr;
Steven Rostedte6187002009-04-15 13:36:40 -04003170 int ret;
3171
Steven Rostedtae63b312012-05-03 23:09:03 -04003172 tr = top_trace_array();
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09003173 if (!tr)
3174 return;
Steven Rostedtae63b312012-05-03 23:09:03 -04003175
Steven Rostedte6187002009-04-15 13:36:40 -04003176 pr_info("Running tests on trace events:\n");
3177
Steven Rostedtae63b312012-05-03 23:09:03 -04003178 list_for_each_entry(file, &tr->events, list) {
3179
3180 call = file->event_call;
Steven Rostedte6187002009-04-15 13:36:40 -04003181
Steven Rostedt22392912010-04-21 12:27:06 -04003182 /* Only test those that have a probe */
3183 if (!call->class || !call->class->probe)
Steven Rostedte6187002009-04-15 13:36:40 -04003184 continue;
3185
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04003186/*
3187 * Testing syscall events here is pretty useless, but
3188 * we still do it if configured. But this is time consuming.
3189 * What we really need is a user thread to perform the
3190 * syscalls as we test.
3191 */
3192#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
Steven Rostedt8f082012010-04-20 10:47:33 -04003193 if (call->class->system &&
3194 strcmp(call->class->system, "syscalls") == 0)
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04003195 continue;
3196#endif
3197
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04003198 pr_info("Testing event %s: ", trace_event_name(call));
Steven Rostedte6187002009-04-15 13:36:40 -04003199
3200 /*
3201 * If an event is already enabled, someone is using
3202 * it and the self test should not be on.
3203 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04003204 if (file->flags & EVENT_FILE_FL_ENABLED) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003205 pr_warn("Enabled event during self test!\n");
Steven Rostedte6187002009-04-15 13:36:40 -04003206 WARN_ON_ONCE(1);
3207 continue;
3208 }
3209
Steven Rostedtae63b312012-05-03 23:09:03 -04003210 ftrace_event_enable_disable(file, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04003211 event_test_stuff();
Steven Rostedtae63b312012-05-03 23:09:03 -04003212 ftrace_event_enable_disable(file, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04003213
3214 pr_cont("OK\n");
3215 }
3216
3217 /* Now test at the sub system level */
3218
3219 pr_info("Running tests on trace event systems:\n");
3220
Steven Rostedtae63b312012-05-03 23:09:03 -04003221 list_for_each_entry(dir, &tr->systems, list) {
3222
3223 system = dir->subsystem;
Steven Rostedte6187002009-04-15 13:36:40 -04003224
3225 /* the ftrace system is special, skip it */
3226 if (strcmp(system->name, "ftrace") == 0)
3227 continue;
3228
3229 pr_info("Testing event system %s: ", system->name);
3230
Steven Rostedtae63b312012-05-03 23:09:03 -04003231 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04003232 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003233 pr_warn("error enabling system %s\n",
3234 system->name);
Steven Rostedte6187002009-04-15 13:36:40 -04003235 continue;
3236 }
3237
3238 event_test_stuff();
3239
Steven Rostedtae63b312012-05-03 23:09:03 -04003240 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08003241 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003242 pr_warn("error disabling system %s\n",
3243 system->name);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08003244 continue;
3245 }
Steven Rostedte6187002009-04-15 13:36:40 -04003246
3247 pr_cont("OK\n");
3248 }
3249
3250 /* Test with all events enabled */
3251
3252 pr_info("Running tests on all trace events:\n");
3253 pr_info("Testing all events: ");
3254
Steven Rostedtae63b312012-05-03 23:09:03 -04003255 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04003256 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003257 pr_warn("error enabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003258 return;
Steven Rostedte6187002009-04-15 13:36:40 -04003259 }
3260
3261 event_test_stuff();
3262
3263 /* reset sysname */
Steven Rostedtae63b312012-05-03 23:09:03 -04003264 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04003265 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003266 pr_warn("error disabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003267 return;
Steven Rostedte6187002009-04-15 13:36:40 -04003268 }
3269
3270 pr_cont("OK\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003271}
3272
3273#ifdef CONFIG_FUNCTION_TRACER
3274
Tejun Heo245b2e72009-06-24 15:13:48 +09003275static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003276
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003277static struct trace_event_file event_trace_file __initdata;
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04003278
3279static void __init
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04003280function_test_events_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04003281 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003282{
3283 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04003284 struct ring_buffer *buffer;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003285 struct ftrace_entry *entry;
3286 unsigned long flags;
3287 long disabled;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003288 int cpu;
3289 int pc;
3290
3291 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003292 preempt_disable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003293 cpu = raw_smp_processor_id();
Tejun Heo245b2e72009-06-24 15:13:48 +09003294 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003295
3296 if (disabled != 1)
3297 goto out;
3298
3299 local_save_flags(flags);
3300
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003301 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
3302 TRACE_FN, sizeof(*entry),
3303 flags, pc);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003304 if (!event)
3305 goto out;
3306 entry = ring_buffer_event_data(event);
3307 entry->ip = ip;
3308 entry->parent_ip = parent_ip;
3309
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003310 event_trigger_unlock_commit(&event_trace_file, buffer, event,
3311 entry, flags, pc);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003312 out:
Tejun Heo245b2e72009-06-24 15:13:48 +09003313 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt5168ae52010-06-03 09:36:50 -04003314 preempt_enable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003315}
3316
3317static struct ftrace_ops trace_ops __initdata =
3318{
3319 .func = function_test_events_call,
Steven Rostedt47409742012-07-20 11:04:44 -04003320 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003321};
3322
3323static __init void event_trace_self_test_with_function(void)
3324{
Steven Rostedt17bb6152011-05-23 15:27:46 -04003325 int ret;
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003326
3327 event_trace_file.tr = top_trace_array();
3328 if (WARN_ON(!event_trace_file.tr))
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003329 return;
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003330
Steven Rostedt17bb6152011-05-23 15:27:46 -04003331 ret = register_ftrace_function(&trace_ops);
3332 if (WARN_ON(ret < 0)) {
3333 pr_info("Failed to enable function tracer for event tests\n");
3334 return;
3335 }
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003336 pr_info("Running tests again, along with the function tracer\n");
3337 event_trace_self_tests();
3338 unregister_ftrace_function(&trace_ops);
3339}
3340#else
3341static __init void event_trace_self_test_with_function(void)
3342{
3343}
3344#endif
3345
3346static __init int event_trace_self_tests_init(void)
3347{
Li Zefan020e5f82009-07-01 10:47:05 +08003348 if (!tracing_selftest_disabled) {
3349 event_trace_self_tests();
3350 event_trace_self_test_with_function();
3351 }
Steven Rostedte6187002009-04-15 13:36:40 -04003352
3353 return 0;
3354}
3355
Steven Rostedt28d20e22009-04-20 12:12:44 -04003356late_initcall(event_trace_self_tests_init);
Steven Rostedte6187002009-04-15 13:36:40 -04003357
3358#endif