blob: 9549ed120556ee75f7305e4f2dc379d470c36e37 [file] [log] [blame]
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
Steven Rostedt981d0812009-03-02 13:53:59 -05006 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
Steven Rostedtb77e38a2009-02-24 10:21:36 -05009 */
10
Fabian Frederick3448bac2014-06-07 13:43:08 +020011#define pr_fmt(fmt) fmt
12
Steven Rostedte6187002009-04-15 13:36:40 -040013#include <linux/workqueue.h>
14#include <linux/spinlock.h>
15#include <linux/kthread.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050016#include <linux/tracefs.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050017#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/ctype.h>
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -040020#include <linux/sort.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Steven Rostedte6187002009-04-15 13:36:40 -040022#include <linux/delay.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050023
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -040024#include <trace/events/sched.h>
25
Li Zefan020e5f82009-07-01 10:47:05 +080026#include <asm/setup.h>
27
Steven Rostedt91729ef92009-03-02 15:03:01 -050028#include "trace_output.h"
Steven Rostedtb77e38a2009-02-24 10:21:36 -050029
Steven Rostedt4e5292e2009-09-12 19:26:21 -040030#undef TRACE_SYSTEM
Steven Rostedtb628b3e2009-02-27 23:32:58 -050031#define TRACE_SYSTEM "TRACE_SYSTEM"
32
Li Zefan20c89282009-05-06 10:33:45 +080033DEFINE_MUTEX(event_mutex);
Steven Rostedt11a241a2009-03-02 11:49:04 -050034
Steven Rostedta59fd602009-04-10 13:52:20 -040035LIST_HEAD(ftrace_events);
Daniel Wagner9f616682015-08-10 14:35:46 +020036static LIST_HEAD(ftrace_generic_fields);
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080037static LIST_HEAD(ftrace_common_fields);
Steven Rostedta59fd602009-04-10 13:52:20 -040038
Steven Rostedtd1a29142013-02-27 20:23:57 -050039#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40
41static struct kmem_cache *field_cachep;
42static struct kmem_cache *file_cachep;
43
Steven Rostedt6e94a782013-06-27 10:58:31 -040044static inline int system_refcount(struct event_subsystem *system)
45{
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +020046 return system->ref_count;
Steven Rostedt6e94a782013-06-27 10:58:31 -040047}
48
49static int system_refcount_inc(struct event_subsystem *system)
50{
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +020051 return system->ref_count++;
Steven Rostedt6e94a782013-06-27 10:58:31 -040052}
53
54static int system_refcount_dec(struct event_subsystem *system)
55{
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +020056 return --system->ref_count;
Steven Rostedt6e94a782013-06-27 10:58:31 -040057}
58
Steven Rostedtae63b312012-05-03 23:09:03 -040059/* Double loops, do not use break, only goto's work */
60#define do_for_each_event_file(tr, file) \
61 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
62 list_for_each_entry(file, &tr->events, list)
63
64#define do_for_each_event_file_safe(tr, file) \
65 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -040066 struct trace_event_file *___n; \
Steven Rostedtae63b312012-05-03 23:09:03 -040067 list_for_each_entry_safe(file, ___n, &tr->events, list)
68
69#define while_for_each_event_file() \
70 }
71
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080072static struct list_head *
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040073trace_get_fields(struct trace_event_call *event_call)
Steven Rostedt2e33af02010-04-22 10:35:55 -040074{
75 if (!event_call->class->get_fields)
76 return &event_call->class->fields;
77 return event_call->class->get_fields(event_call);
78}
79
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080080static struct ftrace_event_field *
81__find_event_field(struct list_head *head, char *name)
82{
83 struct ftrace_event_field *field;
84
85 list_for_each_entry(field, head, link) {
86 if (!strcmp(field->name, name))
87 return field;
88 }
89
90 return NULL;
91}
92
93struct ftrace_event_field *
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -040094trace_find_event_field(struct trace_event_call *call, char *name)
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080095{
96 struct ftrace_event_field *field;
97 struct list_head *head;
98
Steven Rostedt (Red Hat)e57cbaf2016-03-03 17:18:20 -050099 head = trace_get_fields(call);
100 field = __find_event_field(head, name);
101 if (field)
102 return field;
103
Daniel Wagner9f616682015-08-10 14:35:46 +0200104 field = __find_event_field(&ftrace_generic_fields, name);
105 if (field)
106 return field;
107
Steven Rostedt (Red Hat)e57cbaf2016-03-03 17:18:20 -0500108 return __find_event_field(&ftrace_common_fields, name);
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +0800109}
110
Li Zefan8728fe52010-05-24 16:22:49 +0800111static int __trace_define_field(struct list_head *head, const char *type,
112 const char *name, int offset, int size,
113 int is_signed, int filter_type)
Tom Zanussicf027f62009-03-22 03:30:39 -0500114{
115 struct ftrace_event_field *field;
116
Steven Rostedtd1a29142013-02-27 20:23:57 -0500117 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
Tom Zanussicf027f62009-03-22 03:30:39 -0500118 if (!field)
Namhyung Kimaaf6ac02013-06-07 15:07:48 +0900119 return -ENOMEM;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +0100120
Steven Rostedt92edca02013-02-27 20:41:37 -0500121 field->name = name;
122 field->type = type;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +0100123
Li Zefan43b51ea2009-08-07 10:33:22 +0800124 if (filter_type == FILTER_OTHER)
125 field->filter_type = filter_assign_type(type);
126 else
127 field->filter_type = filter_type;
128
Tom Zanussicf027f62009-03-22 03:30:39 -0500129 field->offset = offset;
130 field->size = size;
Tom Zanussia118e4d2009-04-28 03:04:53 -0500131 field->is_signed = is_signed;
Li Zefanaa38e9f2009-08-07 10:33:02 +0800132
Steven Rostedt2e33af02010-04-22 10:35:55 -0400133 list_add(&field->link, head);
Tom Zanussicf027f62009-03-22 03:30:39 -0500134
135 return 0;
Tom Zanussicf027f62009-03-22 03:30:39 -0500136}
Li Zefan8728fe52010-05-24 16:22:49 +0800137
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400138int trace_define_field(struct trace_event_call *call, const char *type,
Li Zefan8728fe52010-05-24 16:22:49 +0800139 const char *name, int offset, int size, int is_signed,
140 int filter_type)
141{
142 struct list_head *head;
143
144 if (WARN_ON(!call->class))
145 return 0;
146
147 head = trace_get_fields(call);
148 return __trace_define_field(head, type, name, offset, size,
149 is_signed, filter_type);
150}
Steven Rostedt17c873e2009-04-10 18:12:50 -0400151EXPORT_SYMBOL_GPL(trace_define_field);
Tom Zanussicf027f62009-03-22 03:30:39 -0500152
Daniel Wagner9f616682015-08-10 14:35:46 +0200153#define __generic_field(type, item, filter_type) \
154 ret = __trace_define_field(&ftrace_generic_fields, #type, \
155 #item, 0, 0, is_signed_type(type), \
156 filter_type); \
157 if (ret) \
158 return ret;
159
Li Zefane647d6b2009-08-19 15:54:32 +0800160#define __common_field(type, item) \
Li Zefan8728fe52010-05-24 16:22:49 +0800161 ret = __trace_define_field(&ftrace_common_fields, #type, \
162 "common_" #item, \
163 offsetof(typeof(ent), item), \
164 sizeof(ent.item), \
165 is_signed_type(type), FILTER_OTHER); \
Li Zefane647d6b2009-08-19 15:54:32 +0800166 if (ret) \
167 return ret;
168
Daniel Wagner9f616682015-08-10 14:35:46 +0200169static int trace_define_generic_fields(void)
170{
171 int ret;
172
Steven Rostedt (Red Hat)e57cbaf2016-03-03 17:18:20 -0500173 __generic_field(int, CPU, FILTER_CPU);
174 __generic_field(int, cpu, FILTER_CPU);
175 __generic_field(char *, COMM, FILTER_COMM);
176 __generic_field(char *, comm, FILTER_COMM);
Daniel Wagner9f616682015-08-10 14:35:46 +0200177
178 return ret;
179}
180
Li Zefan8728fe52010-05-24 16:22:49 +0800181static int trace_define_common_fields(void)
Li Zefane647d6b2009-08-19 15:54:32 +0800182{
183 int ret;
184 struct trace_entry ent;
185
186 __common_field(unsigned short, type);
187 __common_field(unsigned char, flags);
188 __common_field(unsigned char, preempt_count);
189 __common_field(int, pid);
Li Zefane647d6b2009-08-19 15:54:32 +0800190
191 return ret;
192}
193
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400194static void trace_destroy_fields(struct trace_event_call *call)
Li Zefan2df75e42009-05-06 10:33:04 +0800195{
196 struct ftrace_event_field *field, *next;
Steven Rostedt2e33af02010-04-22 10:35:55 -0400197 struct list_head *head;
Li Zefan2df75e42009-05-06 10:33:04 +0800198
Steven Rostedt2e33af02010-04-22 10:35:55 -0400199 head = trace_get_fields(call);
200 list_for_each_entry_safe(field, next, head, link) {
Li Zefan2df75e42009-05-06 10:33:04 +0800201 list_del(&field->link);
Steven Rostedtd1a29142013-02-27 20:23:57 -0500202 kmem_cache_free(field_cachep, field);
Li Zefan2df75e42009-05-06 10:33:04 +0800203 }
204}
205
Alexei Starovoitov32bbe002016-04-06 18:43:28 -0700206/*
207 * run-time version of trace_event_get_offsets_<call>() that returns the last
208 * accessible offset of trace fields excluding __dynamic_array bytes
209 */
210int trace_event_get_offsets(struct trace_event_call *call)
211{
212 struct ftrace_event_field *tail;
213 struct list_head *head;
214
215 head = trace_get_fields(call);
216 /*
217 * head->next points to the last field with the largest offset,
218 * since it was added last by trace_define_field()
219 */
220 tail = list_first_entry(head, struct ftrace_event_field, link);
221 return tail->offset + tail->size;
222}
223
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400224int trace_event_raw_init(struct trace_event_call *call)
Li Zefan87d9b4e2009-12-08 11:14:20 +0800225{
226 int id;
227
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -0400228 id = register_trace_event(&call->event);
Li Zefan87d9b4e2009-12-08 11:14:20 +0800229 if (!id)
230 return -ENODEV;
Li Zefan87d9b4e2009-12-08 11:14:20 +0800231
232 return 0;
233}
234EXPORT_SYMBOL_GPL(trace_event_raw_init);
235
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400236bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
237{
238 struct trace_array *tr = trace_file->tr;
239 struct trace_array_cpu *data;
240 struct trace_pid_list *pid_list;
241
242 pid_list = rcu_dereference_sched(tr->filtered_pids);
243 if (!pid_list)
244 return false;
245
246 data = this_cpu_ptr(tr->trace_buffer.data);
247
248 return data->ignore_pid;
249}
250EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
251
Steven Rostedt (Red Hat)3f795dc2015-05-05 13:18:46 -0400252void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
253 struct trace_event_file *trace_file,
254 unsigned long len)
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400255{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400256 struct trace_event_call *event_call = trace_file->event_call;
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400257
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400258 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
259 trace_event_ignore_this_pid(trace_file))
260 return NULL;
261
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400262 local_save_flags(fbuffer->flags);
263 fbuffer->pc = preempt_count();
Steven Rostedt (Red Hat)e9478412016-06-17 17:40:58 -0400264 /*
265 * If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
266 * preemption (adding one to the preempt_count). Since we are
267 * interested in the preempt_count at the time the tracepoint was
268 * hit, we need to subtract one to offset the increment.
269 */
270 if (IS_ENABLED(CONFIG_PREEMPT))
271 fbuffer->pc--;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400272 fbuffer->trace_file = trace_file;
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400273
274 fbuffer->event =
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400275 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400276 event_call->event.type, len,
277 fbuffer->flags, fbuffer->pc);
278 if (!fbuffer->event)
279 return NULL;
280
281 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
282 return fbuffer->entry;
283}
Steven Rostedt (Red Hat)3f795dc2015-05-05 13:18:46 -0400284EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400285
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500286static DEFINE_SPINLOCK(tracepoint_iter_lock);
287
Steven Rostedt (Red Hat)3f795dc2015-05-05 13:18:46 -0400288static void output_printk(struct trace_event_buffer *fbuffer)
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500289{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400290 struct trace_event_call *event_call;
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500291 struct trace_event *event;
292 unsigned long flags;
293 struct trace_iterator *iter = tracepoint_print_iter;
294
295 if (!iter)
296 return;
297
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400298 event_call = fbuffer->trace_file->event_call;
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500299 if (!event_call || !event_call->event.funcs ||
300 !event_call->event.funcs->trace)
301 return;
302
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400303 event = &fbuffer->trace_file->event_call->event;
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500304
305 spin_lock_irqsave(&tracepoint_iter_lock, flags);
306 trace_seq_init(&iter->seq);
307 iter->ent = fbuffer->entry;
308 event_call->event.funcs->trace(iter, 0, event);
309 trace_seq_putc(&iter->seq, 0);
310 printk("%s", iter->seq.buffer);
311
312 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
313}
314
Steven Rostedt (Red Hat)3f795dc2015-05-05 13:18:46 -0400315void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400316{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500317 if (tracepoint_printk)
318 output_printk(fbuffer);
319
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400320 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400321 fbuffer->event, fbuffer->entry,
322 fbuffer->flags, fbuffer->pc);
323}
Steven Rostedt (Red Hat)3f795dc2015-05-05 13:18:46 -0400324EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400325
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400326int trace_event_reg(struct trace_event_call *call,
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -0400327 enum trace_reg type, void *data)
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400328{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400329 struct trace_event_file *file = data;
Steven Rostedtae63b312012-05-03 23:09:03 -0400330
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400331 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400332 switch (type) {
333 case TRACE_REG_REGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400334 return tracepoint_probe_register(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400335 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400336 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400337 case TRACE_REG_UNREGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400338 tracepoint_probe_unregister(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400339 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400340 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400341 return 0;
342
343#ifdef CONFIG_PERF_EVENTS
344 case TRACE_REG_PERF_REGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400345 return tracepoint_probe_register(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400346 call->class->perf_probe,
347 call);
348 case TRACE_REG_PERF_UNREGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400349 tracepoint_probe_unregister(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400350 call->class->perf_probe,
351 call);
352 return 0;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100353 case TRACE_REG_PERF_OPEN:
354 case TRACE_REG_PERF_CLOSE:
Jiri Olsa489c75c2012-02-15 15:51:50 +0100355 case TRACE_REG_PERF_ADD:
356 case TRACE_REG_PERF_DEL:
Jiri Olsaceec0b62012-02-15 15:51:49 +0100357 return 0;
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400358#endif
359 }
360 return 0;
361}
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -0400362EXPORT_SYMBOL_GPL(trace_event_reg);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400363
Li Zefane870e9a2010-07-02 11:07:32 +0800364void trace_event_enable_cmd_record(bool enable)
365{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400366 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -0400367 struct trace_array *tr;
Li Zefane870e9a2010-07-02 11:07:32 +0800368
369 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400370 do_for_each_event_file(tr, file) {
371
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400372 if (!(file->flags & EVENT_FILE_FL_ENABLED))
Li Zefane870e9a2010-07-02 11:07:32 +0800373 continue;
374
375 if (enable) {
376 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400377 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800378 } else {
379 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400380 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800381 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400382 } while_for_each_event_file();
Li Zefane870e9a2010-07-02 11:07:32 +0800383 mutex_unlock(&event_mutex);
384}
385
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400386static int __ftrace_event_enable_disable(struct trace_event_file *file,
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400387 int enable, int soft_disable)
Steven Rostedtfd994982009-02-28 02:41:25 -0500388{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400389 struct trace_event_call *call = file->event_call;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400390 struct trace_array *tr = file->tr;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400391 unsigned long file_flags = file->flags;
Li Zefan3b8e4272009-12-08 11:14:52 +0800392 int ret = 0;
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400393 int disable;
Li Zefan3b8e4272009-12-08 11:14:52 +0800394
Steven Rostedtfd994982009-02-28 02:41:25 -0500395 switch (enable) {
396 case 0:
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400397 /*
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900398 * When soft_disable is set and enable is cleared, the sm_ref
399 * reference counter is decremented. If it reaches 0, we want
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400400 * to clear the SOFT_DISABLED flag but leave the event in the
401 * state that it was. That is, if the event was enabled and
402 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
403 * is set we do not want the event to be enabled before we
404 * clear the bit.
405 *
406 * When soft_disable is not set but the SOFT_MODE flag is,
407 * we do nothing. Do not disable the tracepoint, otherwise
408 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
409 */
410 if (soft_disable) {
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900411 if (atomic_dec_return(&file->sm_ref) > 0)
412 break;
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400413 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
414 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400415 } else
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400416 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400417
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400418 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
419 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
420 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
Li Zefane870e9a2010-07-02 11:07:32 +0800421 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400422 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800423 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400424 call->class->reg(call, TRACE_REG_UNREGISTER, file);
Steven Rostedtfd994982009-02-28 02:41:25 -0500425 }
Tom Zanussi3baa5e42013-06-29 00:08:07 -0500426 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400427 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
428 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
Tom Zanussi3baa5e42013-06-29 00:08:07 -0500429 else
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400430 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
Steven Rostedtfd994982009-02-28 02:41:25 -0500431 break;
432 case 1:
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400433 /*
434 * When soft_disable is set and enable is set, we want to
435 * register the tracepoint for the event, but leave the event
436 * as is. That means, if the event was already enabled, we do
437 * nothing (but set SOFT_MODE). If the event is disabled, we
438 * set SOFT_DISABLED before enabling the event tracepoint, so
439 * it still seems to be disabled.
440 */
441 if (!soft_disable)
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400442 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900443 else {
444 if (atomic_inc_return(&file->sm_ref) > 1)
445 break;
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400446 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900447 }
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400448
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400449 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400450
451 /* Keep the event disabled, when going to SOFT_MODE. */
452 if (soft_disable)
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400453 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400454
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400455 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
Li Zefane870e9a2010-07-02 11:07:32 +0800456 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400457 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800458 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400459 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
Li Zefan3b8e4272009-12-08 11:14:52 +0800460 if (ret) {
461 tracing_stop_cmdline_record();
462 pr_info("event trace: Could not enable event "
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400463 "%s\n", trace_event_name(call));
Li Zefan3b8e4272009-12-08 11:14:52 +0800464 break;
465 }
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400466 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -0500467
468 /* WAS_ENABLED gets set but never cleared. */
469 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
Steven Rostedtfd994982009-02-28 02:41:25 -0500470 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500471 break;
472 }
Li Zefan3b8e4272009-12-08 11:14:52 +0800473
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400474 /* Enable or disable use of trace_buffered_event */
475 if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
476 (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
477 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
478 trace_buffered_event_enable();
479 else
480 trace_buffered_event_disable();
481 }
482
Li Zefan3b8e4272009-12-08 11:14:52 +0800483 return ret;
Steven Rostedtfd994982009-02-28 02:41:25 -0500484}
485
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400486int trace_event_enable_disable(struct trace_event_file *file,
Tom Zanussi85f2b082013-10-24 08:59:24 -0500487 int enable, int soft_disable)
488{
489 return __ftrace_event_enable_disable(file, enable, soft_disable);
490}
491
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400492static int ftrace_event_enable_disable(struct trace_event_file *file,
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400493 int enable)
494{
495 return __ftrace_event_enable_disable(file, enable, 0);
496}
497
Steven Rostedtae63b312012-05-03 23:09:03 -0400498static void ftrace_clear_events(struct trace_array *tr)
Zhaolei0e907c92009-05-25 18:13:59 +0800499{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400500 struct trace_event_file *file;
Zhaolei0e907c92009-05-25 18:13:59 +0800501
502 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400503 list_for_each_entry(file, &tr->events, list) {
504 ftrace_event_enable_disable(file, 0);
Zhaolei0e907c92009-05-25 18:13:59 +0800505 }
506 mutex_unlock(&event_mutex);
507}
508
Steven Rostedtc37775d2016-04-13 16:59:18 -0400509static void
510event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
511{
512 struct trace_pid_list *pid_list;
513 struct trace_array *tr = data;
514
515 pid_list = rcu_dereference_sched(tr->filtered_pids);
Steven Rostedt4e267db2016-04-14 07:38:13 -0400516 trace_filter_add_remove_task(pid_list, NULL, task);
Steven Rostedtc37775d2016-04-13 16:59:18 -0400517}
518
519static void
520event_filter_pid_sched_process_fork(void *data,
521 struct task_struct *self,
522 struct task_struct *task)
523{
524 struct trace_pid_list *pid_list;
525 struct trace_array *tr = data;
526
527 pid_list = rcu_dereference_sched(tr->filtered_pids);
Steven Rostedt4e267db2016-04-14 07:38:13 -0400528 trace_filter_add_remove_task(pid_list, self, task);
Steven Rostedtc37775d2016-04-13 16:59:18 -0400529}
530
531void trace_event_follow_fork(struct trace_array *tr, bool enable)
532{
533 if (enable) {
534 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
535 tr, INT_MIN);
536 register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit,
537 tr, INT_MAX);
538 } else {
539 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
540 tr);
541 unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit,
542 tr);
543 }
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400544}
545
546static void
Linus Torvalds22402cd2015-11-06 13:30:20 -0800547event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400548 struct task_struct *prev, struct task_struct *next)
549{
550 struct trace_array *tr = data;
551 struct trace_pid_list *pid_list;
552
553 pid_list = rcu_dereference_sched(tr->filtered_pids);
554
555 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -0400556 trace_ignore_this_task(pid_list, prev) &&
557 trace_ignore_this_task(pid_list, next));
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400558}
559
560static void
Linus Torvalds22402cd2015-11-06 13:30:20 -0800561event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400562 struct task_struct *prev, struct task_struct *next)
563{
564 struct trace_array *tr = data;
565 struct trace_pid_list *pid_list;
566
567 pid_list = rcu_dereference_sched(tr->filtered_pids);
568
569 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -0400570 trace_ignore_this_task(pid_list, next));
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400571}
572
573static void
574event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
575{
576 struct trace_array *tr = data;
577 struct trace_pid_list *pid_list;
578
579 /* Nothing to do if we are already tracing */
580 if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
581 return;
582
583 pid_list = rcu_dereference_sched(tr->filtered_pids);
584
585 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -0400586 trace_ignore_this_task(pid_list, task));
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400587}
588
589static void
590event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
591{
592 struct trace_array *tr = data;
593 struct trace_pid_list *pid_list;
594
595 /* Nothing to do if we are not tracing */
596 if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
597 return;
598
599 pid_list = rcu_dereference_sched(tr->filtered_pids);
600
601 /* Set tracing if current is enabled */
602 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -0400603 trace_ignore_this_task(pid_list, current));
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400604}
605
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400606static void __ftrace_clear_event_pids(struct trace_array *tr)
607{
608 struct trace_pid_list *pid_list;
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400609 struct trace_event_file *file;
610 int cpu;
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400611
612 pid_list = rcu_dereference_protected(tr->filtered_pids,
613 lockdep_is_held(&event_mutex));
614 if (!pid_list)
615 return;
616
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400617 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
618 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
619
620 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
621 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
622
Steven Rostedt (Red Hat)0f72e372015-12-01 16:08:05 -0500623 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
624 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
625
626 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
627 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
628
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -0400629 list_for_each_entry(file, &tr->events, list) {
630 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
631 }
632
633 for_each_possible_cpu(cpu)
634 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
635
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400636 rcu_assign_pointer(tr->filtered_pids, NULL);
637
638 /* Wait till all users are no longer using pid filtering */
639 synchronize_sched();
640
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400641 trace_free_pid_list(pid_list);
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400642}
643
644static void ftrace_clear_event_pids(struct trace_array *tr)
645{
646 mutex_lock(&event_mutex);
647 __ftrace_clear_event_pids(tr);
648 mutex_unlock(&event_mutex);
649}
650
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400651static void __put_system(struct event_subsystem *system)
652{
653 struct event_filter *filter = system->filter;
654
Steven Rostedt6e94a782013-06-27 10:58:31 -0400655 WARN_ON_ONCE(system_refcount(system) == 0);
656 if (system_refcount_dec(system))
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400657 return;
658
Steven Rostedtae63b312012-05-03 23:09:03 -0400659 list_del(&system->list);
660
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400661 if (filter) {
662 kfree(filter->filter_string);
663 kfree(filter);
664 }
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +0200665 kfree_const(system->name);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400666 kfree(system);
667}
668
669static void __get_system(struct event_subsystem *system)
670{
Steven Rostedt6e94a782013-06-27 10:58:31 -0400671 WARN_ON_ONCE(system_refcount(system) == 0);
672 system_refcount_inc(system);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400673}
674
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -0400675static void __get_system_dir(struct trace_subsystem_dir *dir)
Steven Rostedtae63b312012-05-03 23:09:03 -0400676{
677 WARN_ON_ONCE(dir->ref_count == 0);
678 dir->ref_count++;
679 __get_system(dir->subsystem);
680}
681
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -0400682static void __put_system_dir(struct trace_subsystem_dir *dir)
Steven Rostedtae63b312012-05-03 23:09:03 -0400683{
684 WARN_ON_ONCE(dir->ref_count == 0);
685 /* If the subsystem is about to be freed, the dir must be too */
Steven Rostedt6e94a782013-06-27 10:58:31 -0400686 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
Steven Rostedtae63b312012-05-03 23:09:03 -0400687
688 __put_system(dir->subsystem);
689 if (!--dir->ref_count)
690 kfree(dir);
691}
692
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -0400693static void put_system(struct trace_subsystem_dir *dir)
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400694{
695 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400696 __put_system_dir(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400697 mutex_unlock(&event_mutex);
698}
699
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -0400700static void remove_subsystem(struct trace_subsystem_dir *dir)
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200701{
702 if (!dir)
703 return;
704
705 if (!--dir->nr_events) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -0500706 tracefs_remove_recursive(dir->entry);
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200707 list_del(&dir->list);
708 __put_system_dir(dir);
709 }
710}
711
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400712static void remove_event_file_dir(struct trace_event_file *file)
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200713{
Oleg Nesterovbf682c32013-07-28 20:35:27 +0200714 struct dentry *dir = file->dir;
715 struct dentry *child;
716
717 if (dir) {
718 spin_lock(&dir->d_lock); /* probably unneeded */
Al Viro946e51f2014-10-26 19:19:16 -0400719 list_for_each_entry(child, &dir->d_subdirs, d_child) {
David Howells7682c912015-03-17 22:26:16 +0000720 if (d_really_is_positive(child)) /* probably unneeded */
721 d_inode(child)->i_private = NULL;
Oleg Nesterovbf682c32013-07-28 20:35:27 +0200722 }
723 spin_unlock(&dir->d_lock);
724
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -0500725 tracefs_remove_recursive(dir);
Oleg Nesterovbf682c32013-07-28 20:35:27 +0200726 }
727
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200728 list_del(&file->list);
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200729 remove_subsystem(file->system);
Oleg Nesterov2448e342014-07-11 21:06:38 +0200730 free_event_filter(file->filter);
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200731 kmem_cache_free(file_cachep, file);
732}
733
Li Zefan8f31bfe2009-05-08 10:31:42 +0800734/*
735 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
736 */
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -0400737static int
738__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
739 const char *sub, const char *event, int set)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500740{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400741 struct trace_event_file *file;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400742 struct trace_event_call *call;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400743 const char *name;
Steven Rostedt29f93942009-05-08 16:06:47 -0400744 int ret = -EINVAL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500745
Steven Rostedtae63b312012-05-03 23:09:03 -0400746 list_for_each_entry(file, &tr->events, list) {
747
748 call = file->event_call;
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400749 name = trace_event_name(call);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500750
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400751 if (!name || !call->class || !call->class->reg)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500752 continue;
Steven Rostedt1473e442009-02-24 14:15:08 -0500753
Steven Rostedt9b637762012-05-10 15:55:43 -0400754 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
755 continue;
756
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500757 if (match &&
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400758 strcmp(match, name) != 0 &&
Steven Rostedt8f082012010-04-20 10:47:33 -0400759 strcmp(match, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500760 continue;
761
Steven Rostedt8f082012010-04-20 10:47:33 -0400762 if (sub && strcmp(sub, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500763 continue;
764
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400765 if (event && strcmp(event, name) != 0)
Steven Rostedt1473e442009-02-24 14:15:08 -0500766 continue;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500767
Steven Rostedtae63b312012-05-03 23:09:03 -0400768 ftrace_event_enable_disable(file, set);
Steven Rostedtfd994982009-02-28 02:41:25 -0500769
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500770 ret = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500771 }
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -0400772
773 return ret;
774}
775
776static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
777 const char *sub, const char *event, int set)
778{
779 int ret;
780
781 mutex_lock(&event_mutex);
782 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
Steven Rostedt11a241a2009-03-02 11:49:04 -0500783 mutex_unlock(&event_mutex);
784
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500785 return ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500786}
787
Steven Rostedtae63b312012-05-03 23:09:03 -0400788static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800789{
790 char *event = NULL, *sub = NULL, *match;
Joonsoo Kim84fce9d2015-04-16 13:44:44 +0900791 int ret;
Li Zefan8f31bfe2009-05-08 10:31:42 +0800792
793 /*
794 * The buf format can be <subsystem>:<event-name>
795 * *:<event-name> means any event by that name.
796 * :<event-name> is the same.
797 *
798 * <subsystem>:* means all events in that subsystem
799 * <subsystem>: means the same.
800 *
801 * <name> (no ':') means all events in a subsystem with
802 * the name <name> or any event that matches <name>
803 */
804
805 match = strsep(&buf, ":");
806 if (buf) {
807 sub = match;
808 event = buf;
809 match = NULL;
810
811 if (!strlen(sub) || strcmp(sub, "*") == 0)
812 sub = NULL;
813 if (!strlen(event) || strcmp(event, "*") == 0)
814 event = NULL;
815 }
816
Joonsoo Kim84fce9d2015-04-16 13:44:44 +0900817 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
818
819 /* Put back the colon to allow this to be called again */
820 if (buf)
821 *(buf - 1) = ':';
822
823 return ret;
Li Zefan8f31bfe2009-05-08 10:31:42 +0800824}
825
Steven Rostedt4671c792009-05-08 16:27:41 -0400826/**
827 * trace_set_clr_event - enable or disable an event
828 * @system: system name to match (NULL for any system)
829 * @event: event name to match (NULL for all events, within system)
830 * @set: 1 to enable, 0 to disable
831 *
832 * This is a way for other parts of the kernel to enable or disable
833 * event recording.
834 *
835 * Returns 0 on success, -EINVAL if the parameters do not match any
836 * registered events.
837 */
838int trace_set_clr_event(const char *system, const char *event, int set)
839{
Steven Rostedtae63b312012-05-03 23:09:03 -0400840 struct trace_array *tr = top_trace_array();
841
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +0900842 if (!tr)
843 return -ENODEV;
844
Steven Rostedtae63b312012-05-03 23:09:03 -0400845 return __ftrace_set_clr_event(tr, NULL, system, event, set);
Steven Rostedt4671c792009-05-08 16:27:41 -0400846}
Yuanhan Liu56355b82010-11-08 14:05:12 +0800847EXPORT_SYMBOL_GPL(trace_set_clr_event);
Steven Rostedt4671c792009-05-08 16:27:41 -0400848
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500849/* 128 should be much more than enough */
850#define EVENT_BUF_SIZE 127
851
852static ssize_t
853ftrace_event_write(struct file *file, const char __user *ubuf,
854 size_t cnt, loff_t *ppos)
855{
jolsa@redhat.com48966362009-09-11 17:29:28 +0200856 struct trace_parser parser;
Steven Rostedtae63b312012-05-03 23:09:03 -0400857 struct seq_file *m = file->private_data;
858 struct trace_array *tr = m->private;
Li Zefan4ba79782009-09-22 13:52:20 +0800859 ssize_t read, ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500860
Li Zefan4ba79782009-09-22 13:52:20 +0800861 if (!cnt)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500862 return 0;
863
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400864 ret = tracing_update_buffers();
865 if (ret < 0)
866 return ret;
867
jolsa@redhat.com48966362009-09-11 17:29:28 +0200868 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500869 return -ENOMEM;
870
jolsa@redhat.com48966362009-09-11 17:29:28 +0200871 read = trace_get_user(&parser, ubuf, cnt, ppos);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500872
Li Zefan4ba79782009-09-22 13:52:20 +0800873 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com48966362009-09-11 17:29:28 +0200874 int set = 1;
875
876 if (*parser.buffer == '!')
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500877 set = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500878
jolsa@redhat.com48966362009-09-11 17:29:28 +0200879 parser.buffer[parser.idx] = 0;
880
Steven Rostedtae63b312012-05-03 23:09:03 -0400881 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500882 if (ret)
jolsa@redhat.com48966362009-09-11 17:29:28 +0200883 goto out_put;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500884 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500885
886 ret = read;
887
jolsa@redhat.com48966362009-09-11 17:29:28 +0200888 out_put:
889 trace_parser_put(&parser);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500890
891 return ret;
892}
893
894static void *
895t_next(struct seq_file *m, void *v, loff_t *pos)
896{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400897 struct trace_event_file *file = v;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400898 struct trace_event_call *call;
Steven Rostedtae63b312012-05-03 23:09:03 -0400899 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500900
901 (*pos)++;
902
Steven Rostedtae63b312012-05-03 23:09:03 -0400903 list_for_each_entry_continue(file, &tr->events, list) {
904 call = file->event_call;
Steven Rostedt40e26812009-03-10 11:32:40 -0400905 /*
906 * The ftrace subsystem is for showing formats only.
907 * They can not be enabled or disabled via the event files.
908 */
Steven Rostedt (Red Hat)d0454372016-02-24 09:04:24 -0500909 if (call->class && call->class->reg &&
910 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
Steven Rostedtae63b312012-05-03 23:09:03 -0400911 return file;
Steven Rostedt40e26812009-03-10 11:32:40 -0400912 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500913
Li Zefan30bd39c2009-09-18 14:07:05 +0800914 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500915}
916
917static void *t_start(struct seq_file *m, loff_t *pos)
918{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400919 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -0400920 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800921 loff_t l;
922
Li Zefan20c89282009-05-06 10:33:45 +0800923 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800924
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400925 file = list_entry(&tr->events, struct trace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800926 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400927 file = t_next(m, file, &l);
928 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800929 break;
930 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400931 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500932}
933
934static void *
935s_next(struct seq_file *m, void *v, loff_t *pos)
936{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400937 struct trace_event_file *file = v;
Steven Rostedtae63b312012-05-03 23:09:03 -0400938 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500939
940 (*pos)++;
941
Steven Rostedtae63b312012-05-03 23:09:03 -0400942 list_for_each_entry_continue(file, &tr->events, list) {
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400943 if (file->flags & EVENT_FILE_FL_ENABLED)
Steven Rostedtae63b312012-05-03 23:09:03 -0400944 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500945 }
946
Li Zefan30bd39c2009-09-18 14:07:05 +0800947 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500948}
949
950static void *s_start(struct seq_file *m, loff_t *pos)
951{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400952 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -0400953 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800954 loff_t l;
955
Li Zefan20c89282009-05-06 10:33:45 +0800956 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800957
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400958 file = list_entry(&tr->events, struct trace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800959 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400960 file = s_next(m, file, &l);
961 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800962 break;
963 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400964 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500965}
966
967static int t_show(struct seq_file *m, void *v)
968{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400969 struct trace_event_file *file = v;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400970 struct trace_event_call *call = file->event_call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500971
Steven Rostedt8f082012010-04-20 10:47:33 -0400972 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
973 seq_printf(m, "%s:", call->class->system);
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -0400974 seq_printf(m, "%s\n", trace_event_name(call));
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500975
976 return 0;
977}
978
979static void t_stop(struct seq_file *m, void *p)
980{
Li Zefan20c89282009-05-06 10:33:45 +0800981 mutex_unlock(&event_mutex);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500982}
983
Steven Rostedtf4d34a82016-04-13 16:27:49 -0400984static void *
985p_next(struct seq_file *m, void *v, loff_t *pos)
986{
987 struct trace_array *tr = m->private;
988 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
Steven Rostedtf4d34a82016-04-13 16:27:49 -0400989
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400990 return trace_pid_next(pid_list, v, pos);
Steven Rostedtf4d34a82016-04-13 16:27:49 -0400991}
992
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400993static void *p_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)fb662282015-10-26 03:45:22 -0400994 __acquires(RCU)
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -0400995{
996 struct trace_pid_list *pid_list;
997 struct trace_array *tr = m->private;
998
999 /*
1000 * Grab the mutex, to keep calls to p_next() having the same
1001 * tr->filtered_pids as p_start() has.
1002 * If we just passed the tr->filtered_pids around, then RCU would
1003 * have been enough, but doing that makes things more complex.
1004 */
1005 mutex_lock(&event_mutex);
1006 rcu_read_lock_sched();
1007
1008 pid_list = rcu_dereference_sched(tr->filtered_pids);
1009
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001010 if (!pid_list)
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001011 return NULL;
1012
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -04001013 return trace_pid_start(pid_list, pos);
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001014}
1015
1016static void p_stop(struct seq_file *m, void *p)
Steven Rostedt (Red Hat)fb662282015-10-26 03:45:22 -04001017 __releases(RCU)
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001018{
1019 rcu_read_unlock_sched();
1020 mutex_unlock(&event_mutex);
1021}
1022
Steven Rostedt1473e442009-02-24 14:15:08 -05001023static ssize_t
1024event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1025 loff_t *ppos)
1026{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001027 struct trace_event_file *file;
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +02001028 unsigned long flags;
Tom Zanussia4390592013-06-29 00:08:04 -05001029 char buf[4] = "0";
Steven Rostedt1473e442009-02-24 14:15:08 -05001030
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +02001031 mutex_lock(&event_mutex);
1032 file = event_file_data(filp);
1033 if (likely(file))
1034 flags = file->flags;
1035 mutex_unlock(&event_mutex);
1036
1037 if (!file)
1038 return -ENODEV;
1039
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001040 if (flags & EVENT_FILE_FL_ENABLED &&
1041 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
Tom Zanussia4390592013-06-29 00:08:04 -05001042 strcpy(buf, "1");
1043
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001044 if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1045 flags & EVENT_FILE_FL_SOFT_MODE)
Tom Zanussia4390592013-06-29 00:08:04 -05001046 strcat(buf, "*");
1047
1048 strcat(buf, "\n");
Steven Rostedt1473e442009-02-24 14:15:08 -05001049
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -04001050 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
Steven Rostedt1473e442009-02-24 14:15:08 -05001051}
1052
1053static ssize_t
1054event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1055 loff_t *ppos)
1056{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001057 struct trace_event_file *file;
Steven Rostedt1473e442009-02-24 14:15:08 -05001058 unsigned long val;
1059 int ret;
1060
Peter Huewe22fe9b52011-06-07 21:58:27 +02001061 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1062 if (ret)
Steven Rostedt1473e442009-02-24 14:15:08 -05001063 return ret;
1064
Steven Rostedt1852fcc2009-03-11 14:33:00 -04001065 ret = tracing_update_buffers();
1066 if (ret < 0)
1067 return ret;
1068
Steven Rostedt1473e442009-02-24 14:15:08 -05001069 switch (val) {
1070 case 0:
Steven Rostedt1473e442009-02-24 14:15:08 -05001071 case 1:
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +02001072 ret = -ENODEV;
Steven Rostedt11a241a2009-03-02 11:49:04 -05001073 mutex_lock(&event_mutex);
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +02001074 file = event_file_data(filp);
1075 if (likely(file))
1076 ret = ftrace_event_enable_disable(file, val);
Steven Rostedt11a241a2009-03-02 11:49:04 -05001077 mutex_unlock(&event_mutex);
Steven Rostedt1473e442009-02-24 14:15:08 -05001078 break;
1079
1080 default:
1081 return -EINVAL;
1082 }
1083
1084 *ppos += cnt;
1085
Li Zefan3b8e4272009-12-08 11:14:52 +08001086 return ret ? ret : cnt;
Steven Rostedt1473e442009-02-24 14:15:08 -05001087}
1088
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001089static ssize_t
1090system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1091 loff_t *ppos)
1092{
Li Zefanc142b152009-05-08 10:32:05 +08001093 const char set_to_char[4] = { '?', '0', '1', 'X' };
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001094 struct trace_subsystem_dir *dir = filp->private_data;
Steven Rostedtae63b312012-05-03 23:09:03 -04001095 struct event_subsystem *system = dir->subsystem;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001096 struct trace_event_call *call;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001097 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -04001098 struct trace_array *tr = dir->tr;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001099 char buf[2];
Li Zefanc142b152009-05-08 10:32:05 +08001100 int set = 0;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001101 int ret;
1102
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001103 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001104 list_for_each_entry(file, &tr->events, list) {
1105 call = file->event_call;
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001106 if (!trace_event_name(call) || !call->class || !call->class->reg)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001107 continue;
1108
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001109 if (system && strcmp(call->class->system, system->name) != 0)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001110 continue;
1111
1112 /*
1113 * We need to find out if all the events are set
1114 * or if all events or cleared, or if we have
1115 * a mixture.
1116 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001117 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
Li Zefanc142b152009-05-08 10:32:05 +08001118
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001119 /*
1120 * If we have a mixture, no need to look further.
1121 */
Li Zefanc142b152009-05-08 10:32:05 +08001122 if (set == 3)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001123 break;
1124 }
1125 mutex_unlock(&event_mutex);
1126
Li Zefanc142b152009-05-08 10:32:05 +08001127 buf[0] = set_to_char[set];
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001128 buf[1] = '\n';
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001129
1130 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1131
1132 return ret;
1133}
1134
1135static ssize_t
1136system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1137 loff_t *ppos)
1138{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001139 struct trace_subsystem_dir *dir = filp->private_data;
Steven Rostedtae63b312012-05-03 23:09:03 -04001140 struct event_subsystem *system = dir->subsystem;
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001141 const char *name = NULL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001142 unsigned long val;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001143 ssize_t ret;
1144
Peter Huewe22fe9b52011-06-07 21:58:27 +02001145 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1146 if (ret)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001147 return ret;
1148
1149 ret = tracing_update_buffers();
1150 if (ret < 0)
1151 return ret;
1152
Li Zefan8f31bfe2009-05-08 10:31:42 +08001153 if (val != 0 && val != 1)
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001154 return -EINVAL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001155
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001156 /*
1157 * Opening of "enable" adds a ref count to system,
1158 * so the name is safe to use.
1159 */
1160 if (system)
1161 name = system->name;
1162
Steven Rostedtae63b312012-05-03 23:09:03 -04001163 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001164 if (ret)
Li Zefan8f31bfe2009-05-08 10:31:42 +08001165 goto out;
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001166
1167 ret = cnt;
1168
Li Zefan8f31bfe2009-05-08 10:31:42 +08001169out:
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001170 *ppos += cnt;
1171
1172 return ret;
1173}
1174
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001175enum {
1176 FORMAT_HEADER = 1,
Li Zefan86397dc2010-08-17 13:53:06 +08001177 FORMAT_FIELD_SEPERATOR = 2,
1178 FORMAT_PRINTFMT = 3,
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001179};
1180
1181static void *f_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt981d0812009-03-02 13:53:59 -05001182{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001183 struct trace_event_call *call = event_file_data(m->private);
Li Zefan86397dc2010-08-17 13:53:06 +08001184 struct list_head *common_head = &ftrace_common_fields;
1185 struct list_head *head = trace_get_fields(call);
Oleg Nesterov7710b632013-07-18 20:47:10 +02001186 struct list_head *node = v;
Steven Rostedt981d0812009-03-02 13:53:59 -05001187
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001188 (*pos)++;
Lai Jiangshan5a65e952009-12-15 15:39:53 +08001189
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001190 switch ((unsigned long)v) {
1191 case FORMAT_HEADER:
Oleg Nesterov7710b632013-07-18 20:47:10 +02001192 node = common_head;
1193 break;
Li Zefan86397dc2010-08-17 13:53:06 +08001194
1195 case FORMAT_FIELD_SEPERATOR:
Oleg Nesterov7710b632013-07-18 20:47:10 +02001196 node = head;
1197 break;
Lai Jiangshan5a65e952009-12-15 15:39:53 +08001198
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001199 case FORMAT_PRINTFMT:
1200 /* all done */
1201 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +08001202 }
1203
Oleg Nesterov7710b632013-07-18 20:47:10 +02001204 node = node->prev;
1205 if (node == common_head)
Li Zefan86397dc2010-08-17 13:53:06 +08001206 return (void *)FORMAT_FIELD_SEPERATOR;
Oleg Nesterov7710b632013-07-18 20:47:10 +02001207 else if (node == head)
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001208 return (void *)FORMAT_PRINTFMT;
Oleg Nesterov7710b632013-07-18 20:47:10 +02001209 else
1210 return node;
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001211}
1212
1213static int f_show(struct seq_file *m, void *v)
1214{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001215 struct trace_event_call *call = event_file_data(m->private);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001216 struct ftrace_event_field *field;
1217 const char *array_descriptor;
1218
1219 switch ((unsigned long)v) {
1220 case FORMAT_HEADER:
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001221 seq_printf(m, "name: %s\n", trace_event_name(call));
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001222 seq_printf(m, "ID: %d\n", call->event.type);
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01001223 seq_puts(m, "format:\n");
Li Zefan8728fe52010-05-24 16:22:49 +08001224 return 0;
1225
Li Zefan86397dc2010-08-17 13:53:06 +08001226 case FORMAT_FIELD_SEPERATOR:
1227 seq_putc(m, '\n');
1228 return 0;
1229
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001230 case FORMAT_PRINTFMT:
1231 seq_printf(m, "\nprint fmt: %s\n",
1232 call->print_fmt);
1233 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -05001234 }
1235
Oleg Nesterov7710b632013-07-18 20:47:10 +02001236 field = list_entry(v, struct ftrace_event_field, link);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001237 /*
1238 * Smartly shows the array type(except dynamic array).
1239 * Normal:
1240 * field:TYPE VAR
1241 * If TYPE := TYPE[LEN], it is shown:
1242 * field:TYPE VAR[LEN]
1243 */
1244 array_descriptor = strchr(field->type, '[');
1245
1246 if (!strncmp(field->type, "__data_loc", 10))
1247 array_descriptor = NULL;
1248
1249 if (!array_descriptor)
1250 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1251 field->type, field->name, field->offset,
1252 field->size, !!field->is_signed);
1253 else
1254 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1255 (int)(array_descriptor - field->type),
1256 field->type, field->name,
1257 array_descriptor, field->offset,
1258 field->size, !!field->is_signed);
1259
1260 return 0;
1261}
1262
Oleg Nesterov7710b632013-07-18 20:47:10 +02001263static void *f_start(struct seq_file *m, loff_t *pos)
1264{
1265 void *p = (void *)FORMAT_HEADER;
1266 loff_t l = 0;
1267
Oleg Nesterovc5a44a12013-07-26 19:25:43 +02001268 /* ->stop() is called even if ->start() fails */
1269 mutex_lock(&event_mutex);
1270 if (!event_file_data(m->private))
1271 return ERR_PTR(-ENODEV);
1272
Oleg Nesterov7710b632013-07-18 20:47:10 +02001273 while (l < *pos && p)
1274 p = f_next(m, p, &l);
1275
1276 return p;
1277}
1278
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001279static void f_stop(struct seq_file *m, void *p)
1280{
Oleg Nesterovc5a44a12013-07-26 19:25:43 +02001281 mutex_unlock(&event_mutex);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001282}
1283
1284static const struct seq_operations trace_format_seq_ops = {
1285 .start = f_start,
1286 .next = f_next,
1287 .stop = f_stop,
1288 .show = f_show,
1289};
1290
1291static int trace_format_open(struct inode *inode, struct file *file)
1292{
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001293 struct seq_file *m;
1294 int ret;
1295
1296 ret = seq_open(file, &trace_format_seq_ops);
1297 if (ret < 0)
1298 return ret;
1299
1300 m = file->private_data;
Oleg Nesterovc5a44a12013-07-26 19:25:43 +02001301 m->private = file;
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001302
1303 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -05001304}
1305
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001306static ssize_t
1307event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1308{
Oleg Nesterov1a111262013-07-26 19:25:32 +02001309 int id = (long)event_file_data(filp);
Oleg Nesterovcd458ba2013-07-18 20:47:12 +02001310 char buf[32];
1311 int len;
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001312
1313 if (*ppos)
1314 return 0;
1315
Oleg Nesterov1a111262013-07-26 19:25:32 +02001316 if (unlikely(!id))
1317 return -ENODEV;
1318
1319 len = sprintf(buf, "%d\n", id);
1320
Oleg Nesterovcd458ba2013-07-18 20:47:12 +02001321 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001322}
1323
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001324static ssize_t
1325event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1326 loff_t *ppos)
1327{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001328 struct trace_event_file *file;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001329 struct trace_seq *s;
Oleg Nesterove2912b02013-07-26 19:25:40 +02001330 int r = -ENODEV;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001331
1332 if (*ppos)
1333 return 0;
1334
1335 s = kmalloc(sizeof(*s), GFP_KERNEL);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001336
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001337 if (!s)
1338 return -ENOMEM;
1339
1340 trace_seq_init(s);
1341
Oleg Nesterove2912b02013-07-26 19:25:40 +02001342 mutex_lock(&event_mutex);
Tom Zanussif306cc82013-10-24 08:34:17 -05001343 file = event_file_data(filp);
1344 if (file)
1345 print_event_filter(file, s);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001346 mutex_unlock(&event_mutex);
1347
Tom Zanussif306cc82013-10-24 08:34:17 -05001348 if (file)
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001349 r = simple_read_from_buffer(ubuf, cnt, ppos,
1350 s->buffer, trace_seq_used(s));
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001351
1352 kfree(s);
1353
1354 return r;
1355}
1356
1357static ssize_t
1358event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1359 loff_t *ppos)
1360{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001361 struct trace_event_file *file;
Tom Zanussi8b372562009-04-28 03:04:59 -05001362 char *buf;
Oleg Nesterove2912b02013-07-26 19:25:40 +02001363 int err = -ENODEV;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001364
Tom Zanussi8b372562009-04-28 03:04:59 -05001365 if (cnt >= PAGE_SIZE)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001366 return -EINVAL;
1367
Al Viro70f6cbb2015-12-24 00:13:10 -05001368 buf = memdup_user_nul(ubuf, cnt);
1369 if (IS_ERR(buf))
1370 return PTR_ERR(buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001371
Oleg Nesterove2912b02013-07-26 19:25:40 +02001372 mutex_lock(&event_mutex);
Tom Zanussif306cc82013-10-24 08:34:17 -05001373 file = event_file_data(filp);
1374 if (file)
1375 err = apply_event_filter(file, buf);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001376 mutex_unlock(&event_mutex);
1377
Al Viro70f6cbb2015-12-24 00:13:10 -05001378 kfree(buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001379 if (err < 0)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001380 return err;
Tom Zanussi0a19e532009-04-13 03:17:50 -05001381
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001382 *ppos += cnt;
1383
1384 return cnt;
1385}
1386
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001387static LIST_HEAD(event_subsystems);
1388
1389static int subsystem_open(struct inode *inode, struct file *filp)
1390{
1391 struct event_subsystem *system = NULL;
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001392 struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
Steven Rostedtae63b312012-05-03 23:09:03 -04001393 struct trace_array *tr;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001394 int ret;
1395
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001396 if (tracing_is_disabled())
1397 return -ENODEV;
1398
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001399 /* Make sure the system still exists */
Alexander Z Lama8227412013-07-01 19:37:54 -07001400 mutex_lock(&trace_types_lock);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001401 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001402 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1403 list_for_each_entry(dir, &tr->systems, list) {
1404 if (dir == inode->i_private) {
1405 /* Don't open systems with no events */
1406 if (dir->nr_events) {
1407 __get_system_dir(dir);
1408 system = dir->subsystem;
1409 }
1410 goto exit_loop;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001411 }
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001412 }
1413 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001414 exit_loop:
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001415 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001416 mutex_unlock(&trace_types_lock);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001417
Steven Rostedtae63b312012-05-03 23:09:03 -04001418 if (!system)
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001419 return -ENODEV;
1420
Steven Rostedtae63b312012-05-03 23:09:03 -04001421 /* Some versions of gcc think dir can be uninitialized here */
1422 WARN_ON(!dir);
1423
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001424 /* Still need to increment the ref count of the system */
1425 if (trace_array_get(tr) < 0) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001426 put_system(dir);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001427 return -ENODEV;
1428 }
1429
1430 ret = tracing_open_generic(inode, filp);
1431 if (ret < 0) {
1432 trace_array_put(tr);
1433 put_system(dir);
1434 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001435
1436 return ret;
1437}
1438
1439static int system_tr_open(struct inode *inode, struct file *filp)
1440{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001441 struct trace_subsystem_dir *dir;
Steven Rostedtae63b312012-05-03 23:09:03 -04001442 struct trace_array *tr = inode->i_private;
1443 int ret;
1444
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001445 if (tracing_is_disabled())
1446 return -ENODEV;
1447
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001448 if (trace_array_get(tr) < 0)
1449 return -ENODEV;
1450
Steven Rostedtae63b312012-05-03 23:09:03 -04001451 /* Make a temporary dir that has no system but points to tr */
1452 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001453 if (!dir) {
1454 trace_array_put(tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001455 return -ENOMEM;
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001456 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001457
1458 dir->tr = tr;
1459
1460 ret = tracing_open_generic(inode, filp);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001461 if (ret < 0) {
1462 trace_array_put(tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001463 kfree(dir);
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001464 return ret;
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001465 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001466
1467 filp->private_data = dir;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001468
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001469 return 0;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001470}
1471
1472static int subsystem_release(struct inode *inode, struct file *file)
1473{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001474 struct trace_subsystem_dir *dir = file->private_data;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001475
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001476 trace_array_put(dir->tr);
1477
Steven Rostedtae63b312012-05-03 23:09:03 -04001478 /*
1479 * If dir->subsystem is NULL, then this is a temporary
1480 * descriptor that was made for a trace_array to enable
1481 * all subsystems.
1482 */
1483 if (dir->subsystem)
1484 put_system(dir);
1485 else
1486 kfree(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001487
1488 return 0;
1489}
1490
Tom Zanussicfb180f2009-03-22 03:31:17 -05001491static ssize_t
1492subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1493 loff_t *ppos)
1494{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001495 struct trace_subsystem_dir *dir = filp->private_data;
Steven Rostedtae63b312012-05-03 23:09:03 -04001496 struct event_subsystem *system = dir->subsystem;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001497 struct trace_seq *s;
1498 int r;
1499
1500 if (*ppos)
1501 return 0;
1502
1503 s = kmalloc(sizeof(*s), GFP_KERNEL);
1504 if (!s)
1505 return -ENOMEM;
1506
1507 trace_seq_init(s);
1508
Tom Zanussi8b372562009-04-28 03:04:59 -05001509 print_subsystem_event_filter(system, s);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001510 r = simple_read_from_buffer(ubuf, cnt, ppos,
1511 s->buffer, trace_seq_used(s));
Tom Zanussicfb180f2009-03-22 03:31:17 -05001512
1513 kfree(s);
1514
1515 return r;
1516}
1517
1518static ssize_t
1519subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1520 loff_t *ppos)
1521{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001522 struct trace_subsystem_dir *dir = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -05001523 char *buf;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001524 int err;
1525
Tom Zanussi8b372562009-04-28 03:04:59 -05001526 if (cnt >= PAGE_SIZE)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001527 return -EINVAL;
1528
Al Viro70f6cbb2015-12-24 00:13:10 -05001529 buf = memdup_user_nul(ubuf, cnt);
1530 if (IS_ERR(buf))
1531 return PTR_ERR(buf);
Tom Zanussicfb180f2009-03-22 03:31:17 -05001532
Steven Rostedtae63b312012-05-03 23:09:03 -04001533 err = apply_subsystem_event_filter(dir, buf);
Al Viro70f6cbb2015-12-24 00:13:10 -05001534 kfree(buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001535 if (err < 0)
Li Zefan44e9c8b2009-04-11 15:55:28 +08001536 return err;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001537
1538 *ppos += cnt;
1539
1540 return cnt;
1541}
1542
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001543static ssize_t
1544show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1545{
1546 int (*func)(struct trace_seq *s) = filp->private_data;
1547 struct trace_seq *s;
1548 int r;
1549
1550 if (*ppos)
1551 return 0;
1552
1553 s = kmalloc(sizeof(*s), GFP_KERNEL);
1554 if (!s)
1555 return -ENOMEM;
1556
1557 trace_seq_init(s);
1558
1559 func(s);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001560 r = simple_read_from_buffer(ubuf, cnt, ppos,
1561 s->buffer, trace_seq_used(s));
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001562
1563 kfree(s);
1564
1565 return r;
1566}
1567
Steven Rostedt (Red Hat)8ca532a2015-10-21 15:27:36 -04001568static void ignore_task_cpu(void *data)
1569{
1570 struct trace_array *tr = data;
1571 struct trace_pid_list *pid_list;
1572
1573 /*
1574 * This function is called by on_each_cpu() while the
1575 * event_mutex is held.
1576 */
1577 pid_list = rcu_dereference_protected(tr->filtered_pids,
1578 mutex_is_locked(&event_mutex));
1579
1580 this_cpu_write(tr->trace_buffer.data->ignore_pid,
Steven Rostedt4e267db2016-04-14 07:38:13 -04001581 trace_ignore_this_task(pid_list, current));
Steven Rostedt (Red Hat)8ca532a2015-10-21 15:27:36 -04001582}
1583
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001584static ssize_t
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001585ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001586 size_t cnt, loff_t *ppos)
1587{
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001588 struct seq_file *m = filp->private_data;
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001589 struct trace_array *tr = m->private;
1590 struct trace_pid_list *filtered_pids = NULL;
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001591 struct trace_pid_list *pid_list;
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001592 struct trace_event_file *file;
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001593 ssize_t ret;
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001594
1595 if (!cnt)
1596 return 0;
1597
1598 ret = tracing_update_buffers();
1599 if (ret < 0)
1600 return ret;
1601
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001602 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001603
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001604 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1605 lockdep_is_held(&event_mutex));
1606
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001607 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
1608 if (ret < 0)
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001609 goto out;
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001610
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001611 rcu_assign_pointer(tr->filtered_pids, pid_list);
1612
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001613 list_for_each_entry(file, &tr->events, list) {
1614 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1615 }
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001616
1617 if (filtered_pids) {
1618 synchronize_sched();
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001619 trace_free_pid_list(filtered_pids);
1620 } else if (pid_list) {
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001621 /*
1622 * Register a probe that is called before all other probes
1623 * to set ignore_pid if next or prev do not match.
1624 * Register a probe this is called after all other probes
1625 * to only keep ignore_pid set if next pid matches.
1626 */
1627 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1628 tr, INT_MAX);
1629 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1630 tr, 0);
1631
1632 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1633 tr, INT_MAX);
1634 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1635 tr, 0);
Steven Rostedt (Red Hat)0f72e372015-12-01 16:08:05 -05001636
1637 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1638 tr, INT_MAX);
1639 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1640 tr, 0);
1641
1642 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1643 tr, INT_MAX);
1644 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1645 tr, 0);
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001646 }
1647
Steven Rostedt (Red Hat)799fd442015-11-02 13:08:26 -05001648 /*
1649 * Ignoring of pids is done at task switch. But we have to
1650 * check for those tasks that are currently running.
1651 * Always do this in case a pid was appended or removed.
1652 */
1653 on_each_cpu(ignore_task_cpu, tr, 1);
1654
Steven Rostedtf4d34a82016-04-13 16:27:49 -04001655 out:
Steven Rostedt (Red Hat)3fdaf802015-09-25 12:58:44 -04001656 mutex_unlock(&event_mutex);
1657
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -04001658 if (ret > 0)
1659 *ppos += ret;
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001660
1661 return ret;
1662}
1663
Steven Rostedt15075ca2012-05-03 14:57:28 -04001664static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1665static int ftrace_event_set_open(struct inode *inode, struct file *file);
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001666static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001667static int ftrace_event_release(struct inode *inode, struct file *file);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001668
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001669static const struct seq_operations show_event_seq_ops = {
1670 .start = t_start,
1671 .next = t_next,
1672 .show = t_show,
1673 .stop = t_stop,
1674};
1675
1676static const struct seq_operations show_set_event_seq_ops = {
1677 .start = s_start,
1678 .next = s_next,
1679 .show = t_show,
1680 .stop = t_stop,
1681};
1682
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001683static const struct seq_operations show_set_pid_seq_ops = {
1684 .start = p_start,
1685 .next = p_next,
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -04001686 .show = trace_pid_show,
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001687 .stop = p_stop,
1688};
1689
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001690static const struct file_operations ftrace_avail_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001691 .open = ftrace_event_avail_open,
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001692 .read = seq_read,
1693 .llseek = seq_lseek,
1694 .release = seq_release,
1695};
1696
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001697static const struct file_operations ftrace_set_event_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001698 .open = ftrace_event_set_open,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001699 .read = seq_read,
1700 .write = ftrace_event_write,
1701 .llseek = seq_lseek,
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001702 .release = ftrace_event_release,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001703};
1704
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001705static const struct file_operations ftrace_set_event_pid_fops = {
1706 .open = ftrace_event_set_pid_open,
1707 .read = seq_read,
1708 .write = ftrace_event_pid_write,
1709 .llseek = seq_lseek,
1710 .release = ftrace_event_release,
1711};
1712
Steven Rostedt1473e442009-02-24 14:15:08 -05001713static const struct file_operations ftrace_enable_fops = {
Oleg Nesterovbf682c32013-07-28 20:35:27 +02001714 .open = tracing_open_generic,
Steven Rostedt1473e442009-02-24 14:15:08 -05001715 .read = event_enable_read,
1716 .write = event_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001717 .llseek = default_llseek,
Steven Rostedt1473e442009-02-24 14:15:08 -05001718};
1719
Steven Rostedt981d0812009-03-02 13:53:59 -05001720static const struct file_operations ftrace_event_format_fops = {
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001721 .open = trace_format_open,
1722 .read = seq_read,
1723 .llseek = seq_lseek,
1724 .release = seq_release,
Steven Rostedt981d0812009-03-02 13:53:59 -05001725};
1726
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001727static const struct file_operations ftrace_event_id_fops = {
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001728 .read = event_id_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001729 .llseek = default_llseek,
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001730};
1731
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001732static const struct file_operations ftrace_event_filter_fops = {
1733 .open = tracing_open_generic,
1734 .read = event_filter_read,
1735 .write = event_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001736 .llseek = default_llseek,
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001737};
1738
Tom Zanussicfb180f2009-03-22 03:31:17 -05001739static const struct file_operations ftrace_subsystem_filter_fops = {
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001740 .open = subsystem_open,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001741 .read = subsystem_filter_read,
1742 .write = subsystem_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001743 .llseek = default_llseek,
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001744 .release = subsystem_release,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001745};
1746
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001747static const struct file_operations ftrace_system_enable_fops = {
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001748 .open = subsystem_open,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001749 .read = system_enable_read,
1750 .write = system_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001751 .llseek = default_llseek,
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001752 .release = subsystem_release,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001753};
1754
Steven Rostedtae63b312012-05-03 23:09:03 -04001755static const struct file_operations ftrace_tr_enable_fops = {
1756 .open = system_tr_open,
1757 .read = system_enable_read,
1758 .write = system_enable_write,
1759 .llseek = default_llseek,
1760 .release = subsystem_release,
1761};
1762
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001763static const struct file_operations ftrace_show_header_fops = {
1764 .open = tracing_open_generic,
1765 .read = show_header,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001766 .llseek = default_llseek,
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001767};
1768
Steven Rostedtae63b312012-05-03 23:09:03 -04001769static int
1770ftrace_event_open(struct inode *inode, struct file *file,
1771 const struct seq_operations *seq_ops)
Steven Rostedt1473e442009-02-24 14:15:08 -05001772{
Steven Rostedtae63b312012-05-03 23:09:03 -04001773 struct seq_file *m;
1774 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001775
Steven Rostedtae63b312012-05-03 23:09:03 -04001776 ret = seq_open(file, seq_ops);
1777 if (ret < 0)
1778 return ret;
1779 m = file->private_data;
1780 /* copy tr over to seq ops */
1781 m->private = inode->i_private;
Steven Rostedt1473e442009-02-24 14:15:08 -05001782
Steven Rostedtae63b312012-05-03 23:09:03 -04001783 return ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001784}
1785
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001786static int ftrace_event_release(struct inode *inode, struct file *file)
1787{
1788 struct trace_array *tr = inode->i_private;
1789
1790 trace_array_put(tr);
1791
1792 return seq_release(inode, file);
1793}
1794
Steven Rostedt15075ca2012-05-03 14:57:28 -04001795static int
1796ftrace_event_avail_open(struct inode *inode, struct file *file)
1797{
1798 const struct seq_operations *seq_ops = &show_event_seq_ops;
1799
Steven Rostedtae63b312012-05-03 23:09:03 -04001800 return ftrace_event_open(inode, file, seq_ops);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001801}
1802
1803static int
1804ftrace_event_set_open(struct inode *inode, struct file *file)
1805{
1806 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
Steven Rostedtae63b312012-05-03 23:09:03 -04001807 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001808 int ret;
1809
1810 if (trace_array_get(tr) < 0)
1811 return -ENODEV;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001812
1813 if ((file->f_mode & FMODE_WRITE) &&
1814 (file->f_flags & O_TRUNC))
Steven Rostedtae63b312012-05-03 23:09:03 -04001815 ftrace_clear_events(tr);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001816
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001817 ret = ftrace_event_open(inode, file, seq_ops);
1818 if (ret < 0)
1819 trace_array_put(tr);
1820 return ret;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001821}
1822
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04001823static int
1824ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1825{
1826 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1827 struct trace_array *tr = inode->i_private;
1828 int ret;
1829
1830 if (trace_array_get(tr) < 0)
1831 return -ENODEV;
1832
1833 if ((file->f_mode & FMODE_WRITE) &&
1834 (file->f_flags & O_TRUNC))
1835 ftrace_clear_event_pids(tr);
1836
1837 ret = ftrace_event_open(inode, file, seq_ops);
1838 if (ret < 0)
1839 trace_array_put(tr);
1840 return ret;
1841}
1842
Steven Rostedtae63b312012-05-03 23:09:03 -04001843static struct event_subsystem *
1844create_new_subsystem(const char *name)
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001845{
1846 struct event_subsystem *system;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001847
1848 /* need to create new entry */
1849 system = kmalloc(sizeof(*system), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001850 if (!system)
1851 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001852
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001853 system->ref_count = 1;
Steven Rostedt6e94a782013-06-27 10:58:31 -04001854
1855 /* Only allocate if dynamic (kprobes and modules) */
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +02001856 system->name = kstrdup_const(name, GFP_KERNEL);
1857 if (!system->name)
1858 goto out_free;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001859
Tom Zanussi30e673b2009-04-28 03:04:47 -05001860 system->filter = NULL;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001861
Tom Zanussi8b372562009-04-28 03:04:59 -05001862 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001863 if (!system->filter)
1864 goto out_free;
1865
1866 list_add(&system->list, &event_subsystems);
1867
1868 return system;
1869
1870 out_free:
Rasmus Villemoes79ac6ef2015-09-09 23:24:01 +02001871 kfree_const(system->name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001872 kfree(system);
1873 return NULL;
1874}
1875
1876static struct dentry *
1877event_subsystem_dir(struct trace_array *tr, const char *name,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001878 struct trace_event_file *file, struct dentry *parent)
Steven Rostedtae63b312012-05-03 23:09:03 -04001879{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04001880 struct trace_subsystem_dir *dir;
Steven Rostedtae63b312012-05-03 23:09:03 -04001881 struct event_subsystem *system;
1882 struct dentry *entry;
1883
1884 /* First see if we did not already create this dir */
1885 list_for_each_entry(dir, &tr->systems, list) {
1886 system = dir->subsystem;
1887 if (strcmp(system->name, name) == 0) {
1888 dir->nr_events++;
1889 file->system = dir;
1890 return dir->entry;
1891 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001892 }
1893
Steven Rostedtae63b312012-05-03 23:09:03 -04001894 /* Now see if the system itself exists. */
1895 list_for_each_entry(system, &event_subsystems, list) {
1896 if (strcmp(system->name, name) == 0)
1897 break;
1898 }
1899 /* Reset system variable when not found */
1900 if (&system->list == &event_subsystems)
1901 system = NULL;
1902
1903 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1904 if (!dir)
1905 goto out_fail;
1906
1907 if (!system) {
1908 system = create_new_subsystem(name);
1909 if (!system)
1910 goto out_free;
1911 } else
1912 __get_system(system);
1913
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001914 dir->entry = tracefs_create_dir(name, parent);
Steven Rostedtae63b312012-05-03 23:09:03 -04001915 if (!dir->entry) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02001916 pr_warn("Failed to create system directory %s\n", name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001917 __put_system(system);
1918 goto out_free;
1919 }
1920
1921 dir->tr = tr;
1922 dir->ref_count = 1;
1923 dir->nr_events = 1;
1924 dir->subsystem = system;
1925 file->system = dir;
1926
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001927 entry = tracefs_create_file("filter", 0644, dir->entry, dir,
Tom Zanussie1112b42009-03-31 00:48:49 -05001928 &ftrace_subsystem_filter_fops);
Tom Zanussi8b372562009-04-28 03:04:59 -05001929 if (!entry) {
1930 kfree(system->filter);
1931 system->filter = NULL;
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001932 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
Tom Zanussi8b372562009-04-28 03:04:59 -05001933 }
Tom Zanussie1112b42009-03-31 00:48:49 -05001934
Steven Rostedtae63b312012-05-03 23:09:03 -04001935 trace_create_file("enable", 0644, dir->entry, dir,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001936 &ftrace_system_enable_fops);
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001937
Steven Rostedtae63b312012-05-03 23:09:03 -04001938 list_add(&dir->list, &tr->systems);
1939
1940 return dir->entry;
1941
1942 out_free:
1943 kfree(dir);
1944 out_fail:
1945 /* Only print this message if failed on memory allocation */
1946 if (!dir || !system)
Fabian Frederick3448bac2014-06-07 13:43:08 +02001947 pr_warn("No memory to create event subsystem %s\n", name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001948 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001949}
1950
Steven Rostedt1473e442009-02-24 14:15:08 -05001951static int
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001952event_create_dir(struct dentry *parent, struct trace_event_file *file)
Steven Rostedt1473e442009-02-24 14:15:08 -05001953{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001954 struct trace_event_call *call = file->event_call;
Steven Rostedtae63b312012-05-03 23:09:03 -04001955 struct trace_array *tr = file->tr;
Steven Rostedt2e33af02010-04-22 10:35:55 -04001956 struct list_head *head;
Steven Rostedtae63b312012-05-03 23:09:03 -04001957 struct dentry *d_events;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001958 const char *name;
Steven Rostedtfd994982009-02-28 02:41:25 -05001959 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001960
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001961 /*
1962 * If the trace point header did not define TRACE_SYSTEM
1963 * then the system would be called "TRACE_SYSTEM".
1964 */
Steven Rostedtae63b312012-05-03 23:09:03 -04001965 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1966 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1967 if (!d_events)
1968 return -ENOMEM;
1969 } else
1970 d_events = parent;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001971
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04001972 name = trace_event_name(call);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001973 file->dir = tracefs_create_dir(name, d_events);
Steven Rostedtae63b312012-05-03 23:09:03 -04001974 if (!file->dir) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05001975 pr_warn("Could not create tracefs '%s' directory\n", name);
Steven Rostedt1473e442009-02-24 14:15:08 -05001976 return -1;
1977 }
1978
Steven Rostedt9b637762012-05-10 15:55:43 -04001979 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
Steven Rostedtae63b312012-05-03 23:09:03 -04001980 trace_create_file("enable", 0644, file->dir, file,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001981 &ftrace_enable_fops);
Steven Rostedt1473e442009-02-24 14:15:08 -05001982
Steven Rostedt22392912010-04-21 12:27:06 -04001983#ifdef CONFIG_PERF_EVENTS
Steven Rostedta1d0ce82010-06-08 11:22:06 -04001984 if (call->event.type && call->class->reg)
Oleg Nesterov1a111262013-07-26 19:25:32 +02001985 trace_create_file("id", 0444, file->dir,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001986 (void *)(long)call->event.type,
1987 &ftrace_event_id_fops);
Steven Rostedt22392912010-04-21 12:27:06 -04001988#endif
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001989
Li Zefanc9d932c2010-05-24 16:24:28 +08001990 /*
1991 * Other events may have the same class. Only update
1992 * the fields if they are not already defined.
1993 */
1994 head = trace_get_fields(call);
1995 if (list_empty(head)) {
1996 ret = call->class->define_fields(call);
1997 if (ret < 0) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02001998 pr_warn("Could not initialize trace point events/%s\n",
1999 name);
Steven Rostedtae63b312012-05-03 23:09:03 -04002000 return -1;
Tom Zanussicf027f62009-03-22 03:30:39 -05002001 }
2002 }
Tom Zanussif306cc82013-10-24 08:34:17 -05002003 trace_create_file("filter", 0644, file->dir, file,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002004 &ftrace_event_filter_fops);
Tom Zanussicf027f62009-03-22 03:30:39 -05002005
Chunyu Hu854145e2016-05-03 19:34:34 +08002006 /*
2007 * Only event directories that can be enabled should have
2008 * triggers.
2009 */
2010 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2011 trace_create_file("trigger", 0644, file->dir, file,
2012 &event_trigger_fops);
Tom Zanussi85f2b082013-10-24 08:59:24 -05002013
Tom Zanussi7ef224d2016-03-03 12:54:42 -06002014#ifdef CONFIG_HIST_TRIGGERS
2015 trace_create_file("hist", 0444, file->dir, file,
2016 &event_hist_fops);
2017#endif
Steven Rostedtae63b312012-05-03 23:09:03 -04002018 trace_create_file("format", 0444, file->dir, call,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002019 &ftrace_event_format_fops);
Steven Rostedtfd994982009-02-28 02:41:25 -05002020
Steven Rostedt1473e442009-02-24 14:15:08 -05002021 return 0;
2022}
2023
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002024static void remove_event_from_tracers(struct trace_event_call *call)
Steven Rostedtae63b312012-05-03 23:09:03 -04002025{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002026 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -04002027 struct trace_array *tr;
2028
2029 do_for_each_event_file_safe(tr, file) {
Steven Rostedtae63b312012-05-03 23:09:03 -04002030 if (file->event_call != call)
2031 continue;
2032
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +02002033 remove_event_file_dir(file);
Steven Rostedtae63b312012-05-03 23:09:03 -04002034 /*
2035 * The do_for_each_event_file_safe() is
2036 * a double loop. After finding the call for this
2037 * trace_array, we use break to jump to the next
2038 * trace_array.
2039 */
2040 break;
2041 } while_for_each_event_file();
2042}
2043
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002044static void event_remove(struct trace_event_call *call)
Ezequiel Garcia87819152012-09-12 11:47:57 -03002045{
Steven Rostedtae63b312012-05-03 23:09:03 -04002046 struct trace_array *tr;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002047 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -04002048
2049 do_for_each_event_file(tr, file) {
2050 if (file->event_call != call)
2051 continue;
2052 ftrace_event_enable_disable(file, 0);
2053 /*
2054 * The do_for_each_event_file() is
2055 * a double loop. After finding the call for this
2056 * trace_array, we use break to jump to the next
2057 * trace_array.
2058 */
2059 break;
2060 } while_for_each_event_file();
2061
Ezequiel Garcia87819152012-09-12 11:47:57 -03002062 if (call->event.funcs)
Steven Rostedt (Red Hat)9023c932015-05-05 09:39:12 -04002063 __unregister_trace_event(&call->event);
Steven Rostedtae63b312012-05-03 23:09:03 -04002064 remove_event_from_tracers(call);
Ezequiel Garcia87819152012-09-12 11:47:57 -03002065 list_del(&call->list);
2066}
2067
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002068static int event_init(struct trace_event_call *call)
Ezequiel Garcia87819152012-09-12 11:47:57 -03002069{
2070 int ret = 0;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002071 const char *name;
Ezequiel Garcia87819152012-09-12 11:47:57 -03002072
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002073 name = trace_event_name(call);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002074 if (WARN_ON(!name))
Ezequiel Garcia87819152012-09-12 11:47:57 -03002075 return -EINVAL;
2076
2077 if (call->class->raw_init) {
2078 ret = call->class->raw_init(call);
2079 if (ret < 0 && ret != -ENOSYS)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002080 pr_warn("Could not initialize trace events/%s\n", name);
Ezequiel Garcia87819152012-09-12 11:47:57 -03002081 }
2082
2083 return ret;
2084}
2085
Li Zefan67ead0a2010-05-24 16:25:13 +08002086static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002087__register_event(struct trace_event_call *call, struct module *mod)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002088{
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002089 int ret;
Steven Rostedt6d723732009-04-10 14:53:50 -04002090
Ezequiel Garcia87819152012-09-12 11:47:57 -03002091 ret = event_init(call);
2092 if (ret < 0)
2093 return ret;
Steven Rostedt701970b2009-04-24 23:11:22 -04002094
Steven Rostedtae63b312012-05-03 23:09:03 -04002095 list_add(&call->list, &ftrace_events);
Li Zefan67ead0a2010-05-24 16:25:13 +08002096 call->mod = mod;
Masami Hiramatsu88f70d72009-09-25 11:20:54 -07002097
Steven Rostedtae63b312012-05-03 23:09:03 -04002098 return 0;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002099}
2100
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002101static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2102{
2103 int rlen;
2104 int elen;
2105
2106 /* Find the length of the enum value as a string */
2107 elen = snprintf(ptr, 0, "%ld", map->enum_value);
2108 /* Make sure there's enough room to replace the string with the value */
2109 if (len < elen)
2110 return NULL;
2111
2112 snprintf(ptr, elen + 1, "%ld", map->enum_value);
2113
2114 /* Get the rest of the string of ptr */
2115 rlen = strlen(ptr + len);
2116 memmove(ptr + elen, ptr + len, rlen);
2117 /* Make sure we end the new string */
2118 ptr[elen + rlen] = 0;
2119
2120 return ptr + elen;
2121}
2122
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002123static void update_event_printk(struct trace_event_call *call,
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002124 struct trace_enum_map *map)
2125{
2126 char *ptr;
2127 int quote = 0;
2128 int len = strlen(map->enum_string);
2129
2130 for (ptr = call->print_fmt; *ptr; ptr++) {
2131 if (*ptr == '\\') {
2132 ptr++;
2133 /* paranoid */
2134 if (!*ptr)
2135 break;
2136 continue;
2137 }
2138 if (*ptr == '"') {
2139 quote ^= 1;
2140 continue;
2141 }
2142 if (quote)
2143 continue;
2144 if (isdigit(*ptr)) {
2145 /* skip numbers */
2146 do {
2147 ptr++;
2148 /* Check for alpha chars like ULL */
2149 } while (isalnum(*ptr));
Steven Rostedt (Red Hat)31938992015-04-17 10:27:57 -04002150 if (!*ptr)
2151 break;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002152 /*
2153 * A number must have some kind of delimiter after
2154 * it, and we can ignore that too.
2155 */
2156 continue;
2157 }
2158 if (isalpha(*ptr) || *ptr == '_') {
2159 if (strncmp(map->enum_string, ptr, len) == 0 &&
2160 !isalnum(ptr[len]) && ptr[len] != '_') {
2161 ptr = enum_replace(ptr, map, len);
2162 /* Hmm, enum string smaller than value */
2163 if (WARN_ON_ONCE(!ptr))
2164 return;
2165 /*
2166 * No need to decrement here, as enum_replace()
2167 * returns the pointer to the character passed
2168 * the enum, and two enums can not be placed
2169 * back to back without something in between.
2170 * We can skip that something in between.
2171 */
2172 continue;
2173 }
2174 skip_more:
2175 do {
2176 ptr++;
2177 } while (isalnum(*ptr) || *ptr == '_');
Steven Rostedt (Red Hat)31938992015-04-17 10:27:57 -04002178 if (!*ptr)
2179 break;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002180 /*
2181 * If what comes after this variable is a '.' or
2182 * '->' then we can continue to ignore that string.
2183 */
2184 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2185 ptr += *ptr == '.' ? 1 : 2;
Steven Rostedt (Red Hat)31938992015-04-17 10:27:57 -04002186 if (!*ptr)
2187 break;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002188 goto skip_more;
2189 }
2190 /*
2191 * Once again, we can skip the delimiter that came
2192 * after the string.
2193 */
2194 continue;
2195 }
2196 }
2197}
2198
2199void trace_event_enum_update(struct trace_enum_map **map, int len)
2200{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002201 struct trace_event_call *call, *p;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002202 const char *last_system = NULL;
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002203 bool first = false;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002204 int last_i;
2205 int i;
2206
2207 down_write(&trace_event_sem);
2208 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2209 /* events are usually grouped together with systems */
2210 if (!last_system || call->class->system != last_system) {
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002211 first = true;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002212 last_i = 0;
2213 last_system = call->class->system;
2214 }
2215
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002216 /*
2217 * Since calls are grouped by systems, the likelyhood that the
2218 * next call in the iteration belongs to the same system as the
2219 * previous call is high. As an optimization, we skip seaching
2220 * for a map[] that matches the call's system if the last call
2221 * was from the same system. That's what last_i is for. If the
2222 * call has the same system as the previous call, then last_i
2223 * will be the index of the first map[] that has a matching
2224 * system.
2225 */
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002226 for (i = last_i; i < len; i++) {
2227 if (call->class->system == map[i]->system) {
2228 /* Save the first system if need be */
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002229 if (first) {
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002230 last_i = i;
Steven Rostedt (VMware)9a50ea02018-01-18 15:53:10 -05002231 first = false;
2232 }
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04002233 update_event_printk(call, map[i]);
2234 }
2235 }
2236 }
2237 up_write(&trace_event_sem);
2238}
2239
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002240static struct trace_event_file *
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002241trace_create_new_event(struct trace_event_call *call,
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002242 struct trace_array *tr)
2243{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002244 struct trace_event_file *file;
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002245
2246 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2247 if (!file)
2248 return NULL;
2249
2250 file->event_call = call;
2251 file->tr = tr;
2252 atomic_set(&file->sm_ref, 0);
Tom Zanussi85f2b082013-10-24 08:59:24 -05002253 atomic_set(&file->tm_ref, 0);
2254 INIT_LIST_HEAD(&file->triggers);
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002255 list_add(&file->list, &tr->events);
2256
2257 return file;
2258}
2259
Steven Rostedtae63b312012-05-03 23:09:03 -04002260/* Add an event to a trace directory */
2261static int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002262__trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
Steven Rostedtae63b312012-05-03 23:09:03 -04002263{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002264 struct trace_event_file *file;
Steven Rostedtae63b312012-05-03 23:09:03 -04002265
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002266 file = trace_create_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002267 if (!file)
2268 return -ENOMEM;
2269
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002270 return event_create_dir(tr->event_dir, file);
Steven Rostedtae63b312012-05-03 23:09:03 -04002271}
2272
Steven Rostedt77248222013-02-27 16:28:06 -05002273/*
2274 * Just create a decriptor for early init. A descriptor is required
2275 * for enabling events at boot. We want to enable events before
2276 * the filesystem is initialized.
2277 */
2278static __init int
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002279__trace_early_add_new_event(struct trace_event_call *call,
Steven Rostedt77248222013-02-27 16:28:06 -05002280 struct trace_array *tr)
2281{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002282 struct trace_event_file *file;
Steven Rostedt77248222013-02-27 16:28:06 -05002283
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04002284 file = trace_create_new_event(call, tr);
Steven Rostedt77248222013-02-27 16:28:06 -05002285 if (!file)
2286 return -ENOMEM;
2287
Steven Rostedt77248222013-02-27 16:28:06 -05002288 return 0;
2289}
2290
Steven Rostedtae63b312012-05-03 23:09:03 -04002291struct ftrace_module_file_ops;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002292static void __add_event_to_tracers(struct trace_event_call *call);
Steven Rostedtae63b312012-05-03 23:09:03 -04002293
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002294/* Add an additional event_call dynamically */
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002295int trace_add_event_call(struct trace_event_call *call)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002296{
2297 int ret;
Alexander Z Lama8227412013-07-01 19:37:54 -07002298 mutex_lock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002299 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04002300
2301 ret = __register_event(call, NULL);
2302 if (ret >= 0)
Oleg Nesterov779c5e32013-07-31 19:31:32 +02002303 __add_event_to_tracers(call);
Steven Rostedtae63b312012-05-03 23:09:03 -04002304
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002305 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07002306 mutex_unlock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002307 return ret;
2308}
Steven Rostedt701970b2009-04-24 23:11:22 -04002309
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04002310/*
Alexander Z Lama8227412013-07-01 19:37:54 -07002311 * Must be called under locking of trace_types_lock, event_mutex and
2312 * trace_event_sem.
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04002313 */
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002314static void __trace_remove_event_call(struct trace_event_call *call)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002315{
Ezequiel Garcia87819152012-09-12 11:47:57 -03002316 event_remove(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002317 trace_destroy_fields(call);
Oleg Nesterov57375742014-07-15 20:48:16 +02002318 free_event_filter(call->filter);
2319 call->filter = NULL;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002320}
2321
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002322static int probe_remove_event_call(struct trace_event_call *call)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002323{
Oleg Nesterov2816c552013-07-29 19:50:33 +02002324 struct trace_array *tr;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002325 struct trace_event_file *file;
Oleg Nesterov2816c552013-07-29 19:50:33 +02002326
2327#ifdef CONFIG_PERF_EVENTS
2328 if (call->perf_refcount)
2329 return -EBUSY;
2330#endif
2331 do_for_each_event_file(tr, file) {
2332 if (file->event_call != call)
2333 continue;
2334 /*
2335 * We can't rely on ftrace_event_enable_disable(enable => 0)
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002336 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
Oleg Nesterov2816c552013-07-29 19:50:33 +02002337 * TRACE_REG_UNREGISTER.
2338 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002339 if (file->flags & EVENT_FILE_FL_ENABLED)
Oleg Nesterov2816c552013-07-29 19:50:33 +02002340 return -EBUSY;
Steven Rostedt (Red Hat)2ba64032013-07-31 13:16:22 -04002341 /*
2342 * The do_for_each_event_file_safe() is
2343 * a double loop. After finding the call for this
2344 * trace_array, we use break to jump to the next
2345 * trace_array.
2346 */
Oleg Nesterov2816c552013-07-29 19:50:33 +02002347 break;
2348 } while_for_each_event_file();
2349
2350 __trace_remove_event_call(call);
2351
2352 return 0;
2353}
2354
2355/* Remove an event_call */
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002356int trace_remove_event_call(struct trace_event_call *call)
Oleg Nesterov2816c552013-07-29 19:50:33 +02002357{
2358 int ret;
2359
Alexander Z Lama8227412013-07-01 19:37:54 -07002360 mutex_lock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002361 mutex_lock(&event_mutex);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002362 down_write(&trace_event_sem);
Oleg Nesterov2816c552013-07-29 19:50:33 +02002363 ret = probe_remove_event_call(call);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002364 up_write(&trace_event_sem);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002365 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07002366 mutex_unlock(&trace_types_lock);
Oleg Nesterov2816c552013-07-29 19:50:33 +02002367
2368 return ret;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002369}
2370
2371#define for_each_event(event, start, end) \
2372 for (event = start; \
2373 (unsigned long)event < (unsigned long)end; \
2374 event++)
2375
2376#ifdef CONFIG_MODULES
2377
Steven Rostedt6d723732009-04-10 14:53:50 -04002378static void trace_module_add_events(struct module *mod)
2379{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002380 struct trace_event_call **call, **start, **end;
Steven Rostedt6d723732009-04-10 14:53:50 -04002381
Steven Rostedt (Red Hat)45ab28132014-02-26 13:37:38 -05002382 if (!mod->num_trace_events)
2383 return;
2384
2385 /* Don't add infrastructure for mods without tracepoints */
2386 if (trace_module_has_bad_taint(mod)) {
2387 pr_err("%s: module has bad taint, not creating trace events\n",
2388 mod->name);
2389 return;
2390 }
2391
Steven Rostedt6d723732009-04-10 14:53:50 -04002392 start = mod->trace_events;
2393 end = mod->trace_events + mod->num_trace_events;
2394
Steven Rostedt6d723732009-04-10 14:53:50 -04002395 for_each_event(call, start, end) {
Steven Rostedtae63b312012-05-03 23:09:03 -04002396 __register_event(*call, mod);
Oleg Nesterov779c5e32013-07-31 19:31:32 +02002397 __add_event_to_tracers(*call);
Steven Rostedt6d723732009-04-10 14:53:50 -04002398 }
2399}
2400
2401static void trace_module_remove_events(struct module *mod)
2402{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002403 struct trace_event_call *call, *p;
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05002404 bool clear_trace = false;
Steven Rostedt6d723732009-04-10 14:53:50 -04002405
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002406 down_write(&trace_event_sem);
Steven Rostedt6d723732009-04-10 14:53:50 -04002407 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2408 if (call->mod == mod) {
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05002409 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2410 clear_trace = true;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04002411 __trace_remove_event_call(call);
Steven Rostedt6d723732009-04-10 14:53:50 -04002412 }
2413 }
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002414 up_write(&trace_event_sem);
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002415
2416 /*
2417 * It is safest to reset the ring buffer if the module being unloaded
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002418 * registered any events that were used. The only worry is if
2419 * a new module gets loaded, and takes on the same id as the events
2420 * of this module. When printing out the buffer, traced events left
2421 * over from this module may be passed to the new module events and
2422 * unexpected results may occur.
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002423 */
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05002424 if (clear_trace)
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002425 tracing_reset_all_online_cpus();
Steven Rostedt6d723732009-04-10 14:53:50 -04002426}
2427
Steven Rostedt61f919a2009-04-14 18:22:32 -04002428static int trace_module_notify(struct notifier_block *self,
2429 unsigned long val, void *data)
Steven Rostedt6d723732009-04-10 14:53:50 -04002430{
2431 struct module *mod = data;
2432
Alexander Z Lama8227412013-07-01 19:37:54 -07002433 mutex_lock(&trace_types_lock);
Steven Rostedt6d723732009-04-10 14:53:50 -04002434 mutex_lock(&event_mutex);
2435 switch (val) {
2436 case MODULE_STATE_COMING:
2437 trace_module_add_events(mod);
2438 break;
2439 case MODULE_STATE_GOING:
2440 trace_module_remove_events(mod);
2441 break;
2442 }
2443 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07002444 mutex_unlock(&trace_types_lock);
Steven Rostedt6d723732009-04-10 14:53:50 -04002445
2446 return 0;
2447}
Steven Rostedt (Red Hat)315326c2013-03-02 17:37:14 -05002448
Oleg Nesterov836d4812013-07-31 19:31:37 +02002449static struct notifier_block trace_module_nb = {
2450 .notifier_call = trace_module_notify,
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04002451 .priority = 1, /* higher than trace.c module notify */
Oleg Nesterov836d4812013-07-31 19:31:37 +02002452};
Steven Rostedt61f919a2009-04-14 18:22:32 -04002453#endif /* CONFIG_MODULES */
Steven Rostedt6d723732009-04-10 14:53:50 -04002454
Steven Rostedtae63b312012-05-03 23:09:03 -04002455/* Create a new event directory structure for a trace directory. */
2456static void
2457__trace_add_event_dirs(struct trace_array *tr)
2458{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002459 struct trace_event_call *call;
Steven Rostedtae63b312012-05-03 23:09:03 -04002460 int ret;
2461
2462 list_for_each_entry(call, &ftrace_events, list) {
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002463 ret = __trace_add_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002464 if (ret < 0)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002465 pr_warn("Could not create directory for event %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002466 trace_event_name(call));
Steven Rostedtae63b312012-05-03 23:09:03 -04002467 }
2468}
2469
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002470struct trace_event_file *
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002471find_event_file(struct trace_array *tr, const char *system, const char *event)
2472{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002473 struct trace_event_file *file;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002474 struct trace_event_call *call;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002475 const char *name;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002476
2477 list_for_each_entry(file, &tr->events, list) {
2478
2479 call = file->event_call;
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002480 name = trace_event_name(call);
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002481
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002482 if (!name || !call->class || !call->class->reg)
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002483 continue;
2484
2485 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2486 continue;
2487
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002488 if (strcmp(event, name) == 0 &&
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002489 strcmp(system, call->class->system) == 0)
2490 return file;
2491 }
2492 return NULL;
2493}
2494
Steven Rostedt (Red Hat)2875a082013-12-20 23:23:05 -05002495#ifdef CONFIG_DYNAMIC_FTRACE
2496
2497/* Avoid typos */
2498#define ENABLE_EVENT_STR "enable_event"
2499#define DISABLE_EVENT_STR "disable_event"
2500
2501struct event_probe_data {
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002502 struct trace_event_file *file;
Steven Rostedt (Red Hat)2875a082013-12-20 23:23:05 -05002503 unsigned long count;
2504 int ref;
2505 bool enable;
2506};
2507
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002508static void
2509event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2510{
2511 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2512 struct event_probe_data *data = *pdata;
2513
2514 if (!data)
2515 return;
2516
2517 if (data->enable)
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002518 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002519 else
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002520 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002521}
2522
2523static void
2524event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2525{
2526 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2527 struct event_probe_data *data = *pdata;
2528
2529 if (!data)
2530 return;
2531
2532 if (!data->count)
2533 return;
2534
2535 /* Skip if the event is in a state we want to switch to */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002536 if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002537 return;
2538
2539 if (data->count != -1)
2540 (data->count)--;
2541
2542 event_enable_probe(ip, parent_ip, _data);
2543}
2544
2545static int
2546event_enable_print(struct seq_file *m, unsigned long ip,
2547 struct ftrace_probe_ops *ops, void *_data)
2548{
2549 struct event_probe_data *data = _data;
2550
2551 seq_printf(m, "%ps:", (void *)ip);
2552
2553 seq_printf(m, "%s:%s:%s",
2554 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2555 data->file->event_call->class->system,
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002556 trace_event_name(data->file->event_call));
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002557
2558 if (data->count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002559 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002560 else
2561 seq_printf(m, ":count=%ld\n", data->count);
2562
2563 return 0;
2564}
2565
2566static int
2567event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2568 void **_data)
2569{
2570 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2571 struct event_probe_data *data = *pdata;
2572
2573 data->ref++;
2574 return 0;
2575}
2576
2577static void
2578event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2579 void **_data)
2580{
2581 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2582 struct event_probe_data *data = *pdata;
2583
2584 if (WARN_ON_ONCE(data->ref <= 0))
2585 return;
2586
2587 data->ref--;
2588 if (!data->ref) {
2589 /* Remove the SOFT_MODE flag */
2590 __ftrace_event_enable_disable(data->file, 0, 1);
2591 module_put(data->file->event_call->mod);
2592 kfree(data);
2593 }
2594 *pdata = NULL;
2595}
2596
2597static struct ftrace_probe_ops event_enable_probe_ops = {
2598 .func = event_enable_probe,
2599 .print = event_enable_print,
2600 .init = event_enable_init,
2601 .free = event_enable_free,
2602};
2603
2604static struct ftrace_probe_ops event_enable_count_probe_ops = {
2605 .func = event_enable_count_probe,
2606 .print = event_enable_print,
2607 .init = event_enable_init,
2608 .free = event_enable_free,
2609};
2610
2611static struct ftrace_probe_ops event_disable_probe_ops = {
2612 .func = event_enable_probe,
2613 .print = event_enable_print,
2614 .init = event_enable_init,
2615 .free = event_enable_free,
2616};
2617
2618static struct ftrace_probe_ops event_disable_count_probe_ops = {
2619 .func = event_enable_count_probe,
2620 .print = event_enable_print,
2621 .init = event_enable_init,
2622 .free = event_enable_free,
2623};
2624
2625static int
2626event_enable_func(struct ftrace_hash *hash,
2627 char *glob, char *cmd, char *param, int enabled)
2628{
2629 struct trace_array *tr = top_trace_array();
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002630 struct trace_event_file *file;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002631 struct ftrace_probe_ops *ops;
2632 struct event_probe_data *data;
2633 const char *system;
2634 const char *event;
2635 char *number;
2636 bool enable;
2637 int ret;
2638
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09002639 if (!tr)
2640 return -ENODEV;
2641
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002642 /* hash funcs only work with set_ftrace_filter */
Harsh Prateek Bora8092e802013-05-24 12:52:17 +05302643 if (!enabled || !param)
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002644 return -EINVAL;
2645
2646 system = strsep(&param, ":");
2647 if (!param)
2648 return -EINVAL;
2649
2650 event = strsep(&param, ":");
2651
2652 mutex_lock(&event_mutex);
2653
2654 ret = -EINVAL;
2655 file = find_event_file(tr, system, event);
2656 if (!file)
2657 goto out;
2658
2659 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2660
2661 if (enable)
2662 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2663 else
2664 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2665
2666 if (glob[0] == '!') {
2667 unregister_ftrace_function_probe_func(glob+1, ops);
2668 ret = 0;
2669 goto out;
2670 }
2671
2672 ret = -ENOMEM;
2673 data = kzalloc(sizeof(*data), GFP_KERNEL);
2674 if (!data)
2675 goto out;
2676
2677 data->enable = enable;
2678 data->count = -1;
2679 data->file = file;
2680
2681 if (!param)
2682 goto out_reg;
2683
2684 number = strsep(&param, ":");
2685
2686 ret = -EINVAL;
2687 if (!strlen(number))
2688 goto out_free;
2689
2690 /*
2691 * We use the callback data field (which is a pointer)
2692 * as our counter.
2693 */
2694 ret = kstrtoul(number, 0, &data->count);
2695 if (ret)
2696 goto out_free;
2697
2698 out_reg:
2699 /* Don't let event modules unload while probe registered */
2700 ret = try_module_get(file->event_call->mod);
Masami Hiramatsu6ed01062013-05-16 20:48:49 +09002701 if (!ret) {
2702 ret = -EBUSY;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002703 goto out_free;
Masami Hiramatsu6ed01062013-05-16 20:48:49 +09002704 }
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002705
2706 ret = __ftrace_event_enable_disable(file, 1, 1);
2707 if (ret < 0)
2708 goto out_put;
2709 ret = register_ftrace_function_probe(glob, ops, data);
Steven Rostedt (Red Hat)ff305de2013-05-09 11:30:26 -04002710 /*
2711 * The above returns on success the # of functions enabled,
2712 * but if it didn't find any functions it returns zero.
2713 * Consider no functions a failure too.
2714 */
Masami Hiramatsua5b85bd2013-05-09 14:44:14 +09002715 if (!ret) {
2716 ret = -ENOENT;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002717 goto out_disable;
Steven Rostedt (Red Hat)ff305de2013-05-09 11:30:26 -04002718 } else if (ret < 0)
2719 goto out_disable;
2720 /* Just return zero, not the number of enabled functions */
2721 ret = 0;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002722 out:
2723 mutex_unlock(&event_mutex);
2724 return ret;
2725
2726 out_disable:
2727 __ftrace_event_enable_disable(file, 0, 1);
2728 out_put:
2729 module_put(file->event_call->mod);
2730 out_free:
2731 kfree(data);
2732 goto out;
2733}
2734
2735static struct ftrace_func_command event_enable_cmd = {
2736 .name = ENABLE_EVENT_STR,
2737 .func = event_enable_func,
2738};
2739
2740static struct ftrace_func_command event_disable_cmd = {
2741 .name = DISABLE_EVENT_STR,
2742 .func = event_enable_func,
2743};
2744
2745static __init int register_event_cmds(void)
2746{
2747 int ret;
2748
2749 ret = register_ftrace_command(&event_enable_cmd);
2750 if (WARN_ON(ret < 0))
2751 return ret;
2752 ret = register_ftrace_command(&event_disable_cmd);
2753 if (WARN_ON(ret < 0))
2754 unregister_ftrace_command(&event_enable_cmd);
2755 return ret;
2756}
2757#else
2758static inline int register_event_cmds(void) { return 0; }
2759#endif /* CONFIG_DYNAMIC_FTRACE */
2760
Steven Rostedt77248222013-02-27 16:28:06 -05002761/*
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002762 * The top level array has already had its trace_event_file
Steven Rostedt77248222013-02-27 16:28:06 -05002763 * descriptors created in order to allow for early events to
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002764 * be recorded. This function is called after the tracefs has been
Steven Rostedt77248222013-02-27 16:28:06 -05002765 * initialized, and we now have to create the files associated
2766 * to the events.
2767 */
2768static __init void
2769__trace_early_add_event_dirs(struct trace_array *tr)
2770{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002771 struct trace_event_file *file;
Steven Rostedt77248222013-02-27 16:28:06 -05002772 int ret;
2773
2774
2775 list_for_each_entry(file, &tr->events, list) {
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002776 ret = event_create_dir(tr->event_dir, file);
Steven Rostedt77248222013-02-27 16:28:06 -05002777 if (ret < 0)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002778 pr_warn("Could not create directory for event %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002779 trace_event_name(file->event_call));
Steven Rostedt77248222013-02-27 16:28:06 -05002780 }
2781}
2782
2783/*
2784 * For early boot up, the top trace array requires to have
2785 * a list of events that can be enabled. This must be done before
2786 * the filesystem is set up in order to allow events to be traced
2787 * early.
2788 */
2789static __init void
2790__trace_early_add_events(struct trace_array *tr)
2791{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002792 struct trace_event_call *call;
Steven Rostedt77248222013-02-27 16:28:06 -05002793 int ret;
2794
2795 list_for_each_entry(call, &ftrace_events, list) {
2796 /* Early boot up should not have any modules loaded */
2797 if (WARN_ON_ONCE(call->mod))
2798 continue;
2799
2800 ret = __trace_early_add_new_event(call, tr);
2801 if (ret < 0)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002802 pr_warn("Could not create early event %s\n",
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04002803 trace_event_name(call));
Steven Rostedt77248222013-02-27 16:28:06 -05002804 }
2805}
2806
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002807/* Remove the event directory structure for a trace directory. */
2808static void
2809__trace_remove_event_dirs(struct trace_array *tr)
2810{
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002811 struct trace_event_file *file, *next;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002812
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +02002813 list_for_each_entry_safe(file, next, &tr->events, list)
2814 remove_event_file_dir(file);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002815}
2816
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002817static void __add_event_to_tracers(struct trace_event_call *call)
Steven Rostedtae63b312012-05-03 23:09:03 -04002818{
2819 struct trace_array *tr;
2820
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002821 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2822 __trace_add_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002823}
2824
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002825extern struct trace_event_call *__start_ftrace_events[];
2826extern struct trace_event_call *__stop_ftrace_events[];
Steven Rostedta59fd602009-04-10 13:52:20 -04002827
Li Zefan020e5f82009-07-01 10:47:05 +08002828static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2829
2830static __init int setup_trace_event(char *str)
2831{
2832 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05002833 ring_buffer_expanded = true;
2834 tracing_selftest_disabled = true;
Li Zefan020e5f82009-07-01 10:47:05 +08002835
2836 return 1;
2837}
2838__setup("trace_event=", setup_trace_event);
2839
Steven Rostedt77248222013-02-27 16:28:06 -05002840/* Expects to have event_mutex held when called */
2841static int
2842create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
Steven Rostedtae63b312012-05-03 23:09:03 -04002843{
2844 struct dentry *d_events;
2845 struct dentry *entry;
2846
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002847 entry = tracefs_create_file("set_event", 0644, parent,
Steven Rostedtae63b312012-05-03 23:09:03 -04002848 tr, &ftrace_set_event_fops);
2849 if (!entry) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002850 pr_warn("Could not create tracefs 'set_event' entry\n");
Steven Rostedtae63b312012-05-03 23:09:03 -04002851 return -ENOMEM;
2852 }
2853
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002854 d_events = tracefs_create_dir("events", parent);
Steven Rostedt277ba042012-08-03 16:10:49 -04002855 if (!d_events) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002856 pr_warn("Could not create tracefs 'events' directory\n");
Steven Rostedt277ba042012-08-03 16:10:49 -04002857 return -ENOMEM;
2858 }
Steven Rostedtae63b312012-05-03 23:09:03 -04002859
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04002860 entry = tracefs_create_file("set_event_pid", 0644, parent,
2861 tr, &ftrace_set_event_pid_fops);
2862
Steven Rostedtae63b312012-05-03 23:09:03 -04002863 /* ring buffer internal formats */
2864 trace_create_file("header_page", 0444, d_events,
2865 ring_buffer_print_page_header,
2866 &ftrace_show_header_fops);
2867
2868 trace_create_file("header_event", 0444, d_events,
2869 ring_buffer_print_entry_header,
2870 &ftrace_show_header_fops);
2871
2872 trace_create_file("enable", 0644, d_events,
2873 tr, &ftrace_tr_enable_fops);
2874
2875 tr->event_dir = d_events;
Steven Rostedt77248222013-02-27 16:28:06 -05002876
2877 return 0;
2878}
2879
2880/**
2881 * event_trace_add_tracer - add a instance of a trace_array to events
2882 * @parent: The parent dentry to place the files/directories for events in
2883 * @tr: The trace array associated with these events
2884 *
2885 * When a new instance is created, it needs to set up its events
2886 * directory, as well as other files associated with events. It also
2887 * creates the event hierachry in the @parent/events directory.
2888 *
2889 * Returns 0 on success.
2890 */
2891int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2892{
2893 int ret;
2894
2895 mutex_lock(&event_mutex);
2896
2897 ret = create_event_toplevel_files(parent, tr);
2898 if (ret)
2899 goto out_unlock;
2900
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002901 down_write(&trace_event_sem);
Steven Rostedtae63b312012-05-03 23:09:03 -04002902 __trace_add_event_dirs(tr);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002903 up_write(&trace_event_sem);
Steven Rostedt277ba042012-08-03 16:10:49 -04002904
Steven Rostedt77248222013-02-27 16:28:06 -05002905 out_unlock:
Steven Rostedt277ba042012-08-03 16:10:49 -04002906 mutex_unlock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04002907
Steven Rostedt77248222013-02-27 16:28:06 -05002908 return ret;
2909}
2910
2911/*
2912 * The top trace array already had its file descriptors created.
2913 * Now the files themselves need to be created.
2914 */
2915static __init int
2916early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2917{
2918 int ret;
2919
2920 mutex_lock(&event_mutex);
2921
2922 ret = create_event_toplevel_files(parent, tr);
2923 if (ret)
2924 goto out_unlock;
2925
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002926 down_write(&trace_event_sem);
Steven Rostedt77248222013-02-27 16:28:06 -05002927 __trace_early_add_event_dirs(tr);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002928 up_write(&trace_event_sem);
Steven Rostedt77248222013-02-27 16:28:06 -05002929
2930 out_unlock:
2931 mutex_unlock(&event_mutex);
2932
2933 return ret;
Steven Rostedtae63b312012-05-03 23:09:03 -04002934}
2935
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002936int event_trace_del_tracer(struct trace_array *tr)
2937{
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002938 mutex_lock(&event_mutex);
2939
Tom Zanussi85f2b082013-10-24 08:59:24 -05002940 /* Disable any event triggers and associated soft-disabled events */
2941 clear_event_triggers(tr);
2942
Steven Rostedt (Red Hat)49090102015-09-24 11:33:26 -04002943 /* Clear the pid list */
2944 __ftrace_clear_event_pids(tr);
2945
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -04002946 /* Disable any running events */
2947 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2948
Steven Rostedt3ccb0122013-12-03 12:41:20 -05002949 /* Access to events are within rcu_read_lock_sched() */
2950 synchronize_sched();
2951
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002952 down_write(&trace_event_sem);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002953 __trace_remove_event_dirs(tr);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05002954 tracefs_remove_recursive(tr->event_dir);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002955 up_write(&trace_event_sem);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002956
2957 tr->event_dir = NULL;
2958
2959 mutex_unlock(&event_mutex);
2960
2961 return 0;
2962}
2963
Steven Rostedtd1a29142013-02-27 20:23:57 -05002964static __init int event_trace_memsetup(void)
2965{
2966 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002967 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
Steven Rostedtd1a29142013-02-27 20:23:57 -05002968 return 0;
2969}
2970
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05002971static __init void
2972early_enable_events(struct trace_array *tr, bool disable_first)
2973{
2974 char *buf = bootup_event_buf;
2975 char *token;
2976 int ret;
2977
2978 while (true) {
2979 token = strsep(&buf, ",");
2980
2981 if (!token)
2982 break;
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05002983
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05002984 if (*token) {
2985 /* Restarting syscalls requires that we stop them first */
2986 if (disable_first)
2987 ftrace_set_clr_event(tr, token, 0);
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05002988
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05002989 ret = ftrace_set_clr_event(tr, token, 1);
2990 if (ret)
2991 pr_warn("Failed to enable trace event: %s\n", token);
2992 }
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05002993
2994 /* Put back the comma to allow this to be called again */
2995 if (buf)
2996 *(buf - 1) = ',';
2997 }
2998}
2999
Ezequiel Garcia87819152012-09-12 11:47:57 -03003000static __init int event_trace_enable(void)
3001{
Steven Rostedtae63b312012-05-03 23:09:03 -04003002 struct trace_array *tr = top_trace_array();
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003003 struct trace_event_call **iter, *call;
Ezequiel Garcia87819152012-09-12 11:47:57 -03003004 int ret;
3005
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09003006 if (!tr)
3007 return -ENODEV;
3008
Ezequiel Garcia87819152012-09-12 11:47:57 -03003009 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3010
3011 call = *iter;
3012 ret = event_init(call);
3013 if (!ret)
3014 list_add(&call->list, &ftrace_events);
3015 }
3016
Steven Rostedt77248222013-02-27 16:28:06 -05003017 /*
3018 * We need the top trace array to have a working set of trace
3019 * points at early init, before the debug files and directories
3020 * are created. Create the file entries now, and attach them
3021 * to the actual file dentries later.
3022 */
3023 __trace_early_add_events(tr);
3024
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05003025 early_enable_events(tr, false);
Steven Rostedt81698832012-10-11 10:15:05 -04003026
3027 trace_printk_start_comm();
3028
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04003029 register_event_cmds();
3030
Tom Zanussi85f2b082013-10-24 08:59:24 -05003031 register_trigger_cmds();
3032
Ezequiel Garcia87819152012-09-12 11:47:57 -03003033 return 0;
3034}
3035
Steven Rostedt (Red Hat)ce1039b2015-01-14 12:53:45 -05003036/*
3037 * event_trace_enable() is called from trace_event_init() first to
3038 * initialize events and perhaps start any events that are on the
3039 * command line. Unfortunately, there are some events that will not
3040 * start this early, like the system call tracepoints that need
3041 * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3042 * is called before pid 1 starts, and this flag is never set, making
3043 * the syscall tracepoint never get reached, but the event is enabled
3044 * regardless (and not doing anything).
3045 */
3046static __init int event_trace_enable_again(void)
3047{
3048 struct trace_array *tr;
3049
3050 tr = top_trace_array();
3051 if (!tr)
3052 return -ENODEV;
3053
3054 early_enable_events(tr, true);
3055
3056 return 0;
3057}
3058
3059early_initcall(event_trace_enable_again);
3060
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003061static __init int event_trace_init(void)
3062{
Steven Rostedtae63b312012-05-03 23:09:03 -04003063 struct trace_array *tr;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003064 struct dentry *d_tracer;
3065 struct dentry *entry;
Steven Rostedt6d723732009-04-10 14:53:50 -04003066 int ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003067
Steven Rostedtae63b312012-05-03 23:09:03 -04003068 tr = top_trace_array();
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09003069 if (!tr)
3070 return -ENODEV;
Steven Rostedtae63b312012-05-03 23:09:03 -04003071
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003072 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05003073 if (IS_ERR(d_tracer))
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003074 return 0;
3075
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05003076 entry = tracefs_create_file("available_events", 0444, d_tracer,
Steven Rostedtae63b312012-05-03 23:09:03 -04003077 tr, &ftrace_avail_fops);
Steven Rostedt2314c4a2009-03-10 12:04:02 -04003078 if (!entry)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05003079 pr_warn("Could not create tracefs 'available_events' entry\n");
Steven Rostedt2314c4a2009-03-10 12:04:02 -04003080
Daniel Wagner9f616682015-08-10 14:35:46 +02003081 if (trace_define_generic_fields())
3082 pr_warn("tracing: Failed to allocated generic fields");
3083
Li Zefan8728fe52010-05-24 16:22:49 +08003084 if (trace_define_common_fields())
Fabian Frederick3448bac2014-06-07 13:43:08 +02003085 pr_warn("tracing: Failed to allocate common fields");
Li Zefan8728fe52010-05-24 16:22:49 +08003086
Steven Rostedt77248222013-02-27 16:28:06 -05003087 ret = early_event_add_tracer(d_tracer, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04003088 if (ret)
3089 return ret;
Li Zefan020e5f82009-07-01 10:47:05 +08003090
Oleg Nesterov836d4812013-07-31 19:31:37 +02003091#ifdef CONFIG_MODULES
Steven Rostedt6d723732009-04-10 14:53:50 -04003092 ret = register_module_notifier(&trace_module_nb);
Ming Lei55379372009-05-18 23:04:46 +08003093 if (ret)
Fabian Frederick3448bac2014-06-07 13:43:08 +02003094 pr_warn("Failed to register trace events module notifier\n");
Oleg Nesterov836d4812013-07-31 19:31:37 +02003095#endif
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003096 return 0;
3097}
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05003098
3099void __init trace_event_init(void)
3100{
3101 event_trace_memsetup();
3102 init_ftrace_syscalls();
3103 event_trace_enable();
3104}
3105
Steven Rostedtb77e38a2009-02-24 10:21:36 -05003106fs_initcall(event_trace_init);
Steven Rostedte6187002009-04-15 13:36:40 -04003107
3108#ifdef CONFIG_FTRACE_STARTUP_TEST
3109
3110static DEFINE_SPINLOCK(test_spinlock);
3111static DEFINE_SPINLOCK(test_spinlock_irq);
3112static DEFINE_MUTEX(test_mutex);
3113
3114static __init void test_work(struct work_struct *dummy)
3115{
3116 spin_lock(&test_spinlock);
3117 spin_lock_irq(&test_spinlock_irq);
3118 udelay(1);
3119 spin_unlock_irq(&test_spinlock_irq);
3120 spin_unlock(&test_spinlock);
3121
3122 mutex_lock(&test_mutex);
3123 msleep(1);
3124 mutex_unlock(&test_mutex);
3125}
3126
3127static __init int event_test_thread(void *unused)
3128{
3129 void *test_malloc;
3130
3131 test_malloc = kmalloc(1234, GFP_KERNEL);
3132 if (!test_malloc)
3133 pr_info("failed to kmalloc\n");
3134
3135 schedule_on_each_cpu(test_work);
3136
3137 kfree(test_malloc);
3138
3139 set_current_state(TASK_INTERRUPTIBLE);
Peter Zijlstrafe0e01c2014-10-08 18:51:10 +02003140 while (!kthread_should_stop()) {
Steven Rostedte6187002009-04-15 13:36:40 -04003141 schedule();
Peter Zijlstrafe0e01c2014-10-08 18:51:10 +02003142 set_current_state(TASK_INTERRUPTIBLE);
3143 }
3144 __set_current_state(TASK_RUNNING);
Steven Rostedte6187002009-04-15 13:36:40 -04003145
3146 return 0;
3147}
3148
3149/*
3150 * Do various things that may trigger events.
3151 */
3152static __init void event_test_stuff(void)
3153{
3154 struct task_struct *test_thread;
3155
3156 test_thread = kthread_run(event_test_thread, NULL, "test-events");
3157 msleep(1);
3158 kthread_stop(test_thread);
3159}
3160
3161/*
3162 * For every trace event defined, we will test each trace point separately,
3163 * and then by groups, and finally all trace points.
3164 */
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003165static __init void event_trace_self_tests(void)
Steven Rostedte6187002009-04-15 13:36:40 -04003166{
Steven Rostedt (Red Hat)7967b3e2015-05-13 14:59:40 -04003167 struct trace_subsystem_dir *dir;
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04003168 struct trace_event_file *file;
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003169 struct trace_event_call *call;
Steven Rostedte6187002009-04-15 13:36:40 -04003170 struct event_subsystem *system;
Steven Rostedtae63b312012-05-03 23:09:03 -04003171 struct trace_array *tr;
Steven Rostedte6187002009-04-15 13:36:40 -04003172 int ret;
3173
Steven Rostedtae63b312012-05-03 23:09:03 -04003174 tr = top_trace_array();
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09003175 if (!tr)
3176 return;
Steven Rostedtae63b312012-05-03 23:09:03 -04003177
Steven Rostedte6187002009-04-15 13:36:40 -04003178 pr_info("Running tests on trace events:\n");
3179
Steven Rostedtae63b312012-05-03 23:09:03 -04003180 list_for_each_entry(file, &tr->events, list) {
3181
3182 call = file->event_call;
Steven Rostedte6187002009-04-15 13:36:40 -04003183
Steven Rostedt22392912010-04-21 12:27:06 -04003184 /* Only test those that have a probe */
3185 if (!call->class || !call->class->probe)
Steven Rostedte6187002009-04-15 13:36:40 -04003186 continue;
3187
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04003188/*
3189 * Testing syscall events here is pretty useless, but
3190 * we still do it if configured. But this is time consuming.
3191 * What we really need is a user thread to perform the
3192 * syscalls as we test.
3193 */
3194#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
Steven Rostedt8f082012010-04-20 10:47:33 -04003195 if (call->class->system &&
3196 strcmp(call->class->system, "syscalls") == 0)
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04003197 continue;
3198#endif
3199
Steven Rostedt (Red Hat)687fcc42015-05-13 14:20:14 -04003200 pr_info("Testing event %s: ", trace_event_name(call));
Steven Rostedte6187002009-04-15 13:36:40 -04003201
3202 /*
3203 * If an event is already enabled, someone is using
3204 * it and the self test should not be on.
3205 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04003206 if (file->flags & EVENT_FILE_FL_ENABLED) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003207 pr_warn("Enabled event during self test!\n");
Steven Rostedte6187002009-04-15 13:36:40 -04003208 WARN_ON_ONCE(1);
3209 continue;
3210 }
3211
Steven Rostedtae63b312012-05-03 23:09:03 -04003212 ftrace_event_enable_disable(file, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04003213 event_test_stuff();
Steven Rostedtae63b312012-05-03 23:09:03 -04003214 ftrace_event_enable_disable(file, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04003215
3216 pr_cont("OK\n");
3217 }
3218
3219 /* Now test at the sub system level */
3220
3221 pr_info("Running tests on trace event systems:\n");
3222
Steven Rostedtae63b312012-05-03 23:09:03 -04003223 list_for_each_entry(dir, &tr->systems, list) {
3224
3225 system = dir->subsystem;
Steven Rostedte6187002009-04-15 13:36:40 -04003226
3227 /* the ftrace system is special, skip it */
3228 if (strcmp(system->name, "ftrace") == 0)
3229 continue;
3230
3231 pr_info("Testing event system %s: ", system->name);
3232
Steven Rostedtae63b312012-05-03 23:09:03 -04003233 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04003234 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003235 pr_warn("error enabling system %s\n",
3236 system->name);
Steven Rostedte6187002009-04-15 13:36:40 -04003237 continue;
3238 }
3239
3240 event_test_stuff();
3241
Steven Rostedtae63b312012-05-03 23:09:03 -04003242 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08003243 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003244 pr_warn("error disabling system %s\n",
3245 system->name);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08003246 continue;
3247 }
Steven Rostedte6187002009-04-15 13:36:40 -04003248
3249 pr_cont("OK\n");
3250 }
3251
3252 /* Test with all events enabled */
3253
3254 pr_info("Running tests on all trace events:\n");
3255 pr_info("Testing all events: ");
3256
Steven Rostedtae63b312012-05-03 23:09:03 -04003257 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04003258 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003259 pr_warn("error enabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003260 return;
Steven Rostedte6187002009-04-15 13:36:40 -04003261 }
3262
3263 event_test_stuff();
3264
3265 /* reset sysname */
Steven Rostedtae63b312012-05-03 23:09:03 -04003266 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04003267 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02003268 pr_warn("error disabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003269 return;
Steven Rostedte6187002009-04-15 13:36:40 -04003270 }
3271
3272 pr_cont("OK\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003273}
3274
3275#ifdef CONFIG_FUNCTION_TRACER
3276
Tejun Heo245b2e72009-06-24 15:13:48 +09003277static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003278
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003279static struct trace_event_file event_trace_file __initdata;
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04003280
3281static void __init
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04003282function_test_events_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04003283 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003284{
3285 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04003286 struct ring_buffer *buffer;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003287 struct ftrace_entry *entry;
3288 unsigned long flags;
3289 long disabled;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003290 int cpu;
3291 int pc;
3292
3293 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003294 preempt_disable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003295 cpu = raw_smp_processor_id();
Tejun Heo245b2e72009-06-24 15:13:48 +09003296 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003297
3298 if (disabled != 1)
3299 goto out;
3300
3301 local_save_flags(flags);
3302
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003303 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
3304 TRACE_FN, sizeof(*entry),
3305 flags, pc);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003306 if (!event)
3307 goto out;
3308 entry = ring_buffer_event_data(event);
3309 entry->ip = ip;
3310 entry->parent_ip = parent_ip;
3311
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003312 event_trigger_unlock_commit(&event_trace_file, buffer, event,
3313 entry, flags, pc);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003314 out:
Tejun Heo245b2e72009-06-24 15:13:48 +09003315 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt5168ae52010-06-03 09:36:50 -04003316 preempt_enable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003317}
3318
3319static struct ftrace_ops trace_ops __initdata =
3320{
3321 .func = function_test_events_call,
Steven Rostedt47409742012-07-20 11:04:44 -04003322 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003323};
3324
3325static __init void event_trace_self_test_with_function(void)
3326{
Steven Rostedt17bb6152011-05-23 15:27:46 -04003327 int ret;
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003328
3329 event_trace_file.tr = top_trace_array();
3330 if (WARN_ON(!event_trace_file.tr))
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003331 return;
Steven Rostedt (Red Hat)9b9db272016-04-29 18:10:21 -04003332
Steven Rostedt17bb6152011-05-23 15:27:46 -04003333 ret = register_ftrace_function(&trace_ops);
3334 if (WARN_ON(ret < 0)) {
3335 pr_info("Failed to enable function tracer for event tests\n");
3336 return;
3337 }
Steven Rostedt9ea21c12009-04-16 12:15:44 -04003338 pr_info("Running tests again, along with the function tracer\n");
3339 event_trace_self_tests();
3340 unregister_ftrace_function(&trace_ops);
3341}
3342#else
3343static __init void event_trace_self_test_with_function(void)
3344{
3345}
3346#endif
3347
3348static __init int event_trace_self_tests_init(void)
3349{
Li Zefan020e5f82009-07-01 10:47:05 +08003350 if (!tracing_selftest_disabled) {
3351 event_trace_self_tests();
3352 event_trace_self_test_with_function();
3353 }
Steven Rostedte6187002009-04-15 13:36:40 -04003354
3355 return 0;
3356}
3357
Steven Rostedt28d20e22009-04-20 12:12:44 -04003358late_initcall(event_trace_self_tests_init);
Steven Rostedte6187002009-04-15 13:36:40 -04003359
3360#endif