blob: 0ad5ba9a9120b5833d3cd877195ab662096a4abb [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050077static int
78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010079{
80 return 0;
81}
Steven Rostedt0f048702008-11-05 16:05:44 -050082
83/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040084 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
86 * occurred.
87 */
88static DEFINE_PER_CPU(bool, trace_cmdline_save);
89
90/*
Steven Rostedt0f048702008-11-05 16:05:44 -050091 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
94 * this back to zero.
95 */
Hannes Eder4fd27352009-02-10 19:44:12 +010096static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050097
Jason Wessel955b61e2010-08-05 09:22:23 -050098cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +020099
Steven Rostedt944ac422008-10-23 19:26:08 -0400100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400114 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115
116enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400117
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
122/* Map of enums to their values, for "enum_map" file */
123struct trace_enum_map_head {
124 struct module *mod;
125 unsigned long length;
126};
127
128union trace_enum_map_item;
129
130struct trace_enum_map_tail {
131 /*
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
134 */
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
137};
138
139static DEFINE_MUTEX(trace_enum_mutex);
140
141/*
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
147 */
148union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
152};
153
154static union trace_enum_map_item *trace_enum_maps;
155#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
156
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500158
Li Zefanee6c2c12009-09-18 14:06:47 +0800159#define MAX_TRACER_SIZE 100
160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500161static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100162
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500163static bool allocate_snapshot;
164
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200165static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100166{
Chen Gang67012ab2013-04-08 12:06:44 +0800167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400169 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100171 return 1;
172}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200173__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100174
Steven Rostedt944ac422008-10-23 19:26:08 -0400175static int __init set_ftrace_dump_on_oops(char *str)
176{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
179 return 1;
180 }
181
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
184 return 1;
185 }
186
187 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400188}
189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200190
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400191static int __init stop_trace_on_warning(char *str)
192{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195 return 1;
196}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200197__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400198
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400199static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500200{
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
204 return 1;
205}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400206__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500207
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400208
209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static int __init set_trace_boot_options(char *str)
212{
Chen Gang67012ab2013-04-08 12:06:44 +0800213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400214 return 0;
215}
216__setup("trace_options=", set_trace_boot_options);
217
Steven Rostedte1e232c2014-02-10 23:38:46 -0500218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219static char *trace_boot_clock __initdata;
220
221static int __init set_trace_boot_clock(char *str)
222{
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
225 return 0;
226}
227__setup("trace_clock=", set_trace_boot_clock);
228
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500229static int __init set_tracepoint_printk(char *str)
230{
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
233 return 1;
234}
235__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400236
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800237unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200238{
239 nsec += 500;
240 do_div(nsec, 1000);
241 return nsec;
242}
243
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400244/* trace_flags holds trace_options default values */
245#define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
251
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400252/* trace_options that are only supported by global_trace */
253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
255
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400256/* trace_flags that are default zero for instances */
257#define ZEROED_TRACE_FLAGS \
258 TRACE_ITER_EVENT_FORK
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400259
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200260/*
261 * The global_trace is the descriptor that holds the tracing
262 * buffers for the live tracing. For each CPU, it contains
263 * a link list of pages that will store trace entries. The
264 * page descriptor of the pages in the memory is used to hold
265 * the link list by linking the lru item in the page descriptor
266 * to each of the pages in the buffer per CPU.
267 *
268 * For each active CPU there is a data field that holds the
269 * pages for the buffer for that CPU. Each CPU has the same number
270 * of pages allocated for its buffer.
271 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400272static struct trace_array global_trace = {
273 .trace_flags = TRACE_DEFAULT_FLAGS,
274};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200275
Steven Rostedtae63b312012-05-03 23:09:03 -0400276LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400278int trace_array_get(struct trace_array *this_tr)
279{
280 struct trace_array *tr;
281 int ret = -ENODEV;
282
283 mutex_lock(&trace_types_lock);
284 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
285 if (tr == this_tr) {
286 tr->ref++;
287 ret = 0;
288 break;
289 }
290 }
291 mutex_unlock(&trace_types_lock);
292
293 return ret;
294}
295
296static void __trace_array_put(struct trace_array *this_tr)
297{
298 WARN_ON(!this_tr->ref);
299 this_tr->ref--;
300}
301
302void trace_array_put(struct trace_array *this_tr)
303{
304 mutex_lock(&trace_types_lock);
305 __trace_array_put(this_tr);
306 mutex_unlock(&trace_types_lock);
307}
308
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400309int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 struct ring_buffer *buffer,
311 struct ring_buffer_event *event)
312{
313 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
314 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400315 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500316 return 1;
317 }
318
319 return 0;
320}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500321
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400322void trace_free_pid_list(struct trace_pid_list *pid_list)
323{
324 vfree(pid_list->pids);
325 kfree(pid_list);
326}
327
Steven Rostedtd8275c42016-04-14 12:15:22 -0400328/**
329 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
330 * @filtered_pids: The list of pids to check
331 * @search_pid: The PID to find in @filtered_pids
332 *
333 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
334 */
335bool
336trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
337{
338 /*
339 * If pid_max changed after filtered_pids was created, we
340 * by default ignore all pids greater than the previous pid_max.
341 */
342 if (search_pid >= filtered_pids->pid_max)
343 return false;
344
345 return test_bit(search_pid, filtered_pids->pids);
346}
347
348/**
349 * trace_ignore_this_task - should a task be ignored for tracing
350 * @filtered_pids: The list of pids to check
351 * @task: The task that should be ignored if not filtered
352 *
353 * Checks if @task should be traced or not from @filtered_pids.
354 * Returns true if @task should *NOT* be traced.
355 * Returns false if @task should be traced.
356 */
357bool
358trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
359{
360 /*
361 * Return false, because if filtered_pids does not exist,
362 * all pids are good to trace.
363 */
364 if (!filtered_pids)
365 return false;
366
367 return !trace_find_filtered_pid(filtered_pids, task->pid);
368}
369
370/**
371 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
372 * @pid_list: The list to modify
373 * @self: The current task for fork or NULL for exit
374 * @task: The task to add or remove
375 *
376 * If adding a task, if @self is defined, the task is only added if @self
377 * is also included in @pid_list. This happens on fork and tasks should
378 * only be added when the parent is listed. If @self is NULL, then the
379 * @task pid will be removed from the list, which would happen on exit
380 * of a task.
381 */
382void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
383 struct task_struct *self,
384 struct task_struct *task)
385{
386 if (!pid_list)
387 return;
388
389 /* For forks, we only add if the forking task is listed */
390 if (self) {
391 if (!trace_find_filtered_pid(pid_list, self->pid))
392 return;
393 }
394
395 /* Sorry, but we don't support pid_max changing after setting */
396 if (task->pid >= pid_list->pid_max)
397 return;
398
399 /* "self" is set for forks, and NULL for exits */
400 if (self)
401 set_bit(task->pid, pid_list->pids);
402 else
403 clear_bit(task->pid, pid_list->pids);
404}
405
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400406/**
407 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
408 * @pid_list: The pid list to show
409 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
410 * @pos: The position of the file
411 *
412 * This is used by the seq_file "next" operation to iterate the pids
413 * listed in a trace_pid_list structure.
414 *
415 * Returns the pid+1 as we want to display pid of zero, but NULL would
416 * stop the iteration.
417 */
418void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
419{
420 unsigned long pid = (unsigned long)v;
421
422 (*pos)++;
423
424 /* pid already is +1 of the actual prevous bit */
425 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
426
427 /* Return pid + 1 to allow zero to be represented */
428 if (pid < pid_list->pid_max)
429 return (void *)(pid + 1);
430
431 return NULL;
432}
433
434/**
435 * trace_pid_start - Used for seq_file to start reading pid lists
436 * @pid_list: The pid list to show
437 * @pos: The position of the file
438 *
439 * This is used by seq_file "start" operation to start the iteration
440 * of listing pids.
441 *
442 * Returns the pid+1 as we want to display pid of zero, but NULL would
443 * stop the iteration.
444 */
445void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
446{
447 unsigned long pid;
448 loff_t l = 0;
449
450 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
451 if (pid >= pid_list->pid_max)
452 return NULL;
453
454 /* Return pid + 1 so that zero can be the exit value */
455 for (pid++; pid && l < *pos;
456 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
457 ;
458 return (void *)pid;
459}
460
461/**
462 * trace_pid_show - show the current pid in seq_file processing
463 * @m: The seq_file structure to write into
464 * @v: A void pointer of the pid (+1) value to display
465 *
466 * Can be directly used by seq_file operations to display the current
467 * pid value.
468 */
469int trace_pid_show(struct seq_file *m, void *v)
470{
471 unsigned long pid = (unsigned long)v - 1;
472
473 seq_printf(m, "%lu\n", pid);
474 return 0;
475}
476
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400477/* 128 should be much more than enough */
478#define PID_BUF_SIZE 127
479
480int trace_pid_write(struct trace_pid_list *filtered_pids,
481 struct trace_pid_list **new_pid_list,
482 const char __user *ubuf, size_t cnt)
483{
484 struct trace_pid_list *pid_list;
485 struct trace_parser parser;
486 unsigned long val;
487 int nr_pids = 0;
488 ssize_t read = 0;
489 ssize_t ret = 0;
490 loff_t pos;
491 pid_t pid;
492
493 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
494 return -ENOMEM;
495
496 /*
497 * Always recreate a new array. The write is an all or nothing
498 * operation. Always create a new array when adding new pids by
499 * the user. If the operation fails, then the current list is
500 * not modified.
501 */
502 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
503 if (!pid_list)
504 return -ENOMEM;
505
506 pid_list->pid_max = READ_ONCE(pid_max);
507
508 /* Only truncating will shrink pid_max */
509 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
510 pid_list->pid_max = filtered_pids->pid_max;
511
512 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
513 if (!pid_list->pids) {
514 kfree(pid_list);
515 return -ENOMEM;
516 }
517
518 if (filtered_pids) {
519 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000520 for_each_set_bit(pid, filtered_pids->pids,
521 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400522 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400523 nr_pids++;
524 }
525 }
526
527 while (cnt > 0) {
528
529 pos = 0;
530
531 ret = trace_get_user(&parser, ubuf, cnt, &pos);
532 if (ret < 0 || !trace_parser_loaded(&parser))
533 break;
534
535 read += ret;
536 ubuf += ret;
537 cnt -= ret;
538
539 parser.buffer[parser.idx] = 0;
540
541 ret = -EINVAL;
542 if (kstrtoul(parser.buffer, 0, &val))
543 break;
544 if (val >= pid_list->pid_max)
545 break;
546
547 pid = (pid_t)val;
548
549 set_bit(pid, pid_list->pids);
550 nr_pids++;
551
552 trace_parser_clear(&parser);
553 ret = 0;
554 }
555 trace_parser_put(&parser);
556
557 if (ret < 0) {
558 trace_free_pid_list(pid_list);
559 return ret;
560 }
561
562 if (!nr_pids) {
563 /* Cleared the list of pids */
564 trace_free_pid_list(pid_list);
565 read = ret;
566 pid_list = NULL;
567 }
568
569 *new_pid_list = pid_list;
570
571 return read;
572}
573
Fabian Frederickad1438a2014-04-17 21:44:42 +0200574static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400575{
576 u64 ts;
577
578 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700579 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400580 return trace_clock_local();
581
Alexander Z Lam94571582013-08-02 18:36:16 -0700582 ts = ring_buffer_time_stamp(buf->buffer, cpu);
583 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400584
585 return ts;
586}
587
Alexander Z Lam94571582013-08-02 18:36:16 -0700588cycle_t ftrace_now(int cpu)
589{
590 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
591}
592
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400593/**
594 * tracing_is_enabled - Show if global_trace has been disabled
595 *
596 * Shows if the global trace has been enabled or not. It uses the
597 * mirror flag "buffer_disabled" to be used in fast paths such as for
598 * the irqsoff tracer. But it may be inaccurate due to races. If you
599 * need to know the accurate state, use tracing_is_on() which is a little
600 * slower, but accurate.
601 */
Steven Rostedt90369902008-11-05 16:05:44 -0500602int tracing_is_enabled(void)
603{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400604 /*
605 * For quick access (irqsoff uses this in fast path), just
606 * return the mirror variable of the state of the ring buffer.
607 * It's a little racy, but we don't really care.
608 */
609 smp_rmb();
610 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500611}
612
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200613/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400614 * trace_buf_size is the size in bytes that is allocated
615 * for a buffer. Note, the number of bytes is always rounded
616 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400617 *
618 * This number is purposely set to a low number of 16384.
619 * If the dump on oops happens, it will be much appreciated
620 * to not have to wait for all that output. Anyway this can be
621 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200622 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400623#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400624
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400625static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200626
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200627/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200628static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200629
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200630/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200631 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200632 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700633DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200634
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800635/*
636 * serialize the access of the ring buffer
637 *
638 * ring buffer serializes readers, but it is low level protection.
639 * The validity of the events (which returns by ring_buffer_peek() ..etc)
640 * are not protected by ring buffer.
641 *
642 * The content of events may become garbage if we allow other process consumes
643 * these events concurrently:
644 * A) the page of the consumed events may become a normal page
645 * (not reader page) in ring buffer, and this page will be rewrited
646 * by events producer.
647 * B) The page of the consumed events may become a page for splice_read,
648 * and this page will be returned to system.
649 *
650 * These primitives allow multi process access to different cpu ring buffer
651 * concurrently.
652 *
653 * These primitives don't distinguish read-only and read-consume access.
654 * Multi read-only access are also serialized.
655 */
656
657#ifdef CONFIG_SMP
658static DECLARE_RWSEM(all_cpu_access_lock);
659static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
660
661static inline void trace_access_lock(int cpu)
662{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500663 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800664 /* gain it for accessing the whole ring buffer. */
665 down_write(&all_cpu_access_lock);
666 } else {
667 /* gain it for accessing a cpu ring buffer. */
668
Steven Rostedtae3b5092013-01-23 15:22:59 -0500669 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800670 down_read(&all_cpu_access_lock);
671
672 /* Secondly block other access to this @cpu ring buffer. */
673 mutex_lock(&per_cpu(cpu_access_lock, cpu));
674 }
675}
676
677static inline void trace_access_unlock(int cpu)
678{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500679 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800680 up_write(&all_cpu_access_lock);
681 } else {
682 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
683 up_read(&all_cpu_access_lock);
684 }
685}
686
687static inline void trace_access_lock_init(void)
688{
689 int cpu;
690
691 for_each_possible_cpu(cpu)
692 mutex_init(&per_cpu(cpu_access_lock, cpu));
693}
694
695#else
696
697static DEFINE_MUTEX(access_lock);
698
699static inline void trace_access_lock(int cpu)
700{
701 (void)cpu;
702 mutex_lock(&access_lock);
703}
704
705static inline void trace_access_unlock(int cpu)
706{
707 (void)cpu;
708 mutex_unlock(&access_lock);
709}
710
711static inline void trace_access_lock_init(void)
712{
713}
714
715#endif
716
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400717#ifdef CONFIG_STACKTRACE
718static void __ftrace_trace_stack(struct ring_buffer *buffer,
719 unsigned long flags,
720 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400721static inline void ftrace_trace_stack(struct trace_array *tr,
722 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400723 unsigned long flags,
724 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400725
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400726#else
727static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
730{
731}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400732static inline void ftrace_trace_stack(struct trace_array *tr,
733 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400734 unsigned long flags,
735 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400736{
737}
738
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400739#endif
740
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400741static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400742{
743 if (tr->trace_buffer.buffer)
744 ring_buffer_record_on(tr->trace_buffer.buffer);
745 /*
746 * This flag is looked at when buffers haven't been allocated
747 * yet, or by some tracers (like irqsoff), that just want to
748 * know if the ring buffer has been disabled, but it can handle
749 * races of where it gets disabled but we still do a record.
750 * As the check is in the fast path of the tracers, it is more
751 * important to be fast than accurate.
752 */
753 tr->buffer_disabled = 0;
754 /* Make the flag seen by readers */
755 smp_wmb();
756}
757
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200758/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500759 * tracing_on - enable tracing buffers
760 *
761 * This function enables tracing buffers that may have been
762 * disabled with tracing_off.
763 */
764void tracing_on(void)
765{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400766 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500767}
768EXPORT_SYMBOL_GPL(tracing_on);
769
770/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500771 * __trace_puts - write a constant string into the trace buffer.
772 * @ip: The address of the caller
773 * @str: The constant string to write
774 * @size: The size of the string.
775 */
776int __trace_puts(unsigned long ip, const char *str, int size)
777{
778 struct ring_buffer_event *event;
779 struct ring_buffer *buffer;
780 struct print_entry *entry;
781 unsigned long irq_flags;
782 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800783 int pc;
784
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400785 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800786 return 0;
787
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800788 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500789
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500790 if (unlikely(tracing_selftest_running || tracing_disabled))
791 return 0;
792
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500793 alloc = sizeof(*entry) + size + 2; /* possible \n added */
794
795 local_save_flags(irq_flags);
796 buffer = global_trace.trace_buffer.buffer;
797 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800798 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500799 if (!event)
800 return 0;
801
802 entry = ring_buffer_event_data(event);
803 entry->ip = ip;
804
805 memcpy(&entry->buf, str, size);
806
807 /* Add a newline if necessary */
808 if (entry->buf[size - 1] != '\n') {
809 entry->buf[size] = '\n';
810 entry->buf[size + 1] = '\0';
811 } else
812 entry->buf[size] = '\0';
813
814 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400815 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500816
817 return size;
818}
819EXPORT_SYMBOL_GPL(__trace_puts);
820
821/**
822 * __trace_bputs - write the pointer to a constant string into trace buffer
823 * @ip: The address of the caller
824 * @str: The constant string to write to the buffer to
825 */
826int __trace_bputs(unsigned long ip, const char *str)
827{
828 struct ring_buffer_event *event;
829 struct ring_buffer *buffer;
830 struct bputs_entry *entry;
831 unsigned long irq_flags;
832 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800833 int pc;
834
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400835 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800836 return 0;
837
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800838 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500839
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500840 if (unlikely(tracing_selftest_running || tracing_disabled))
841 return 0;
842
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500843 local_save_flags(irq_flags);
844 buffer = global_trace.trace_buffer.buffer;
845 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800846 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500847 if (!event)
848 return 0;
849
850 entry = ring_buffer_event_data(event);
851 entry->ip = ip;
852 entry->str = str;
853
854 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400855 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500856
857 return 1;
858}
859EXPORT_SYMBOL_GPL(__trace_bputs);
860
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500861#ifdef CONFIG_TRACER_SNAPSHOT
862/**
863 * trace_snapshot - take a snapshot of the current buffer.
864 *
865 * This causes a swap between the snapshot buffer and the current live
866 * tracing buffer. You can use this to take snapshots of the live
867 * trace when some condition is triggered, but continue to trace.
868 *
869 * Note, make sure to allocate the snapshot with either
870 * a tracing_snapshot_alloc(), or by doing it manually
871 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
872 *
873 * If the snapshot buffer is not allocated, it will stop tracing.
874 * Basically making a permanent snapshot.
875 */
876void tracing_snapshot(void)
877{
878 struct trace_array *tr = &global_trace;
879 struct tracer *tracer = tr->current_trace;
880 unsigned long flags;
881
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500882 if (in_nmi()) {
883 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
884 internal_trace_puts("*** snapshot is being ignored ***\n");
885 return;
886 }
887
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500888 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500889 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
890 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500891 tracing_off();
892 return;
893 }
894
895 /* Note, snapshot can not be used when the tracer uses it */
896 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500897 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
898 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500899 return;
900 }
901
902 local_irq_save(flags);
903 update_max_tr(tr, current, smp_processor_id());
904 local_irq_restore(flags);
905}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500906EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500907
908static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
909 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400910static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
911
912static int alloc_snapshot(struct trace_array *tr)
913{
914 int ret;
915
916 if (!tr->allocated_snapshot) {
917
918 /* allocate spare buffer */
919 ret = resize_buffer_duplicate_size(&tr->max_buffer,
920 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
921 if (ret < 0)
922 return ret;
923
924 tr->allocated_snapshot = true;
925 }
926
927 return 0;
928}
929
Fabian Frederickad1438a2014-04-17 21:44:42 +0200930static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400931{
932 /*
933 * We don't free the ring buffer. instead, resize it because
934 * The max_tr ring buffer has some state (e.g. ring->clock) and
935 * we want preserve it.
936 */
937 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
938 set_buffer_entries(&tr->max_buffer, 1);
939 tracing_reset_online_cpus(&tr->max_buffer);
940 tr->allocated_snapshot = false;
941}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500942
943/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500944 * tracing_alloc_snapshot - allocate snapshot buffer.
945 *
946 * This only allocates the snapshot buffer if it isn't already
947 * allocated - it doesn't also take a snapshot.
948 *
949 * This is meant to be used in cases where the snapshot buffer needs
950 * to be set up for events that can't sleep but need to be able to
951 * trigger a snapshot.
952 */
953int tracing_alloc_snapshot(void)
954{
955 struct trace_array *tr = &global_trace;
956 int ret;
957
958 ret = alloc_snapshot(tr);
959 WARN_ON(ret < 0);
960
961 return ret;
962}
963EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
964
965/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500966 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
967 *
968 * This is similar to trace_snapshot(), but it will allocate the
969 * snapshot buffer if it isn't already allocated. Use this only
970 * where it is safe to sleep, as the allocation may sleep.
971 *
972 * This causes a swap between the snapshot buffer and the current live
973 * tracing buffer. You can use this to take snapshots of the live
974 * trace when some condition is triggered, but continue to trace.
975 */
976void tracing_snapshot_alloc(void)
977{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500978 int ret;
979
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500980 ret = tracing_alloc_snapshot();
981 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400982 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500983
984 tracing_snapshot();
985}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500986EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500987#else
988void tracing_snapshot(void)
989{
990 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
991}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500992EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500993int tracing_alloc_snapshot(void)
994{
995 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
996 return -ENODEV;
997}
998EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500999void tracing_snapshot_alloc(void)
1000{
1001 /* Give warning */
1002 tracing_snapshot();
1003}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001004EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001005#endif /* CONFIG_TRACER_SNAPSHOT */
1006
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -04001007static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001008{
1009 if (tr->trace_buffer.buffer)
1010 ring_buffer_record_off(tr->trace_buffer.buffer);
1011 /*
1012 * This flag is looked at when buffers haven't been allocated
1013 * yet, or by some tracers (like irqsoff), that just want to
1014 * know if the ring buffer has been disabled, but it can handle
1015 * races of where it gets disabled but we still do a record.
1016 * As the check is in the fast path of the tracers, it is more
1017 * important to be fast than accurate.
1018 */
1019 tr->buffer_disabled = 1;
1020 /* Make the flag seen by readers */
1021 smp_wmb();
1022}
1023
Steven Rostedt499e5472012-02-22 15:50:28 -05001024/**
1025 * tracing_off - turn off tracing buffers
1026 *
1027 * This function stops the tracing buffers from recording data.
1028 * It does not disable any overhead the tracers themselves may
1029 * be causing. This function simply causes all recording to
1030 * the ring buffers to fail.
1031 */
1032void tracing_off(void)
1033{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001034 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001035}
1036EXPORT_SYMBOL_GPL(tracing_off);
1037
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001038void disable_trace_on_warning(void)
1039{
1040 if (__disable_trace_on_warning)
1041 tracing_off();
1042}
1043
Steven Rostedt499e5472012-02-22 15:50:28 -05001044/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001045 * tracer_tracing_is_on - show real state of ring buffer enabled
1046 * @tr : the trace array to know if ring buffer is enabled
1047 *
1048 * Shows real state of the ring buffer if it is enabled or not.
1049 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -04001050static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001051{
1052 if (tr->trace_buffer.buffer)
1053 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1054 return !tr->buffer_disabled;
1055}
1056
Steven Rostedt499e5472012-02-22 15:50:28 -05001057/**
1058 * tracing_is_on - show state of ring buffers enabled
1059 */
1060int tracing_is_on(void)
1061{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001062 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001063}
1064EXPORT_SYMBOL_GPL(tracing_is_on);
1065
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001066static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001067{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001068 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001069
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001070 if (!str)
1071 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001072 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001073 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001074 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001075 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001076 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001077 return 1;
1078}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001079__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001080
Tim Bird0e950172010-02-25 15:36:43 -08001081static int __init set_tracing_thresh(char *str)
1082{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001083 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001084 int ret;
1085
1086 if (!str)
1087 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001088 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001089 if (ret < 0)
1090 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001091 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001092 return 1;
1093}
1094__setup("tracing_thresh=", set_tracing_thresh);
1095
Steven Rostedt57f50be2008-05-12 21:20:44 +02001096unsigned long nsecs_to_usecs(unsigned long nsecs)
1097{
1098 return nsecs / 1000;
1099}
1100
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001101/*
1102 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1103 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1104 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1105 * of strings in the order that the enums were defined.
1106 */
1107#undef C
1108#define C(a, b) b
1109
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001110/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001112 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001113 NULL
1114};
1115
Zhaolei5079f322009-08-25 16:12:56 +08001116static struct {
1117 u64 (*func)(void);
1118 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001119 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001120} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001121 { trace_clock_local, "local", 1 },
1122 { trace_clock_global, "global", 1 },
1123 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001124 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001125 { trace_clock, "perf", 1 },
1126 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001127 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001128 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001129};
1130
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001131/*
1132 * trace_parser_get_init - gets the buffer for trace parser
1133 */
1134int trace_parser_get_init(struct trace_parser *parser, int size)
1135{
1136 memset(parser, 0, sizeof(*parser));
1137
1138 parser->buffer = kmalloc(size, GFP_KERNEL);
1139 if (!parser->buffer)
1140 return 1;
1141
1142 parser->size = size;
1143 return 0;
1144}
1145
1146/*
1147 * trace_parser_put - frees the buffer for trace parser
1148 */
1149void trace_parser_put(struct trace_parser *parser)
1150{
1151 kfree(parser->buffer);
1152}
1153
1154/*
1155 * trace_get_user - reads the user input string separated by space
1156 * (matched by isspace(ch))
1157 *
1158 * For each string found the 'struct trace_parser' is updated,
1159 * and the function returns.
1160 *
1161 * Returns number of bytes read.
1162 *
1163 * See kernel/trace/trace.h for 'struct trace_parser' details.
1164 */
1165int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1166 size_t cnt, loff_t *ppos)
1167{
1168 char ch;
1169 size_t read = 0;
1170 ssize_t ret;
1171
1172 if (!*ppos)
1173 trace_parser_clear(parser);
1174
1175 ret = get_user(ch, ubuf++);
1176 if (ret)
1177 goto out;
1178
1179 read++;
1180 cnt--;
1181
1182 /*
1183 * The parser is not finished with the last write,
1184 * continue reading the user input without skipping spaces.
1185 */
1186 if (!parser->cont) {
1187 /* skip white space */
1188 while (cnt && isspace(ch)) {
1189 ret = get_user(ch, ubuf++);
1190 if (ret)
1191 goto out;
1192 read++;
1193 cnt--;
1194 }
1195
1196 /* only spaces were written */
1197 if (isspace(ch)) {
1198 *ppos += read;
1199 ret = read;
1200 goto out;
1201 }
1202
1203 parser->idx = 0;
1204 }
1205
1206 /* read the non-space input */
1207 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001208 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001209 parser->buffer[parser->idx++] = ch;
1210 else {
1211 ret = -EINVAL;
1212 goto out;
1213 }
1214 ret = get_user(ch, ubuf++);
1215 if (ret)
1216 goto out;
1217 read++;
1218 cnt--;
1219 }
1220
1221 /* We either got finished input or we have to wait for another call. */
1222 if (isspace(ch)) {
1223 parser->buffer[parser->idx] = 0;
1224 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001225 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001226 parser->cont = true;
1227 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001228 } else {
1229 ret = -EINVAL;
1230 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001231 }
1232
1233 *ppos += read;
1234 ret = read;
1235
1236out:
1237 return ret;
1238}
1239
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001240/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001241static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001242{
1243 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001244
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001245 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001246 return -EBUSY;
1247
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001248 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001249 if (cnt > len)
1250 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001251 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001252
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001253 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001254 return cnt;
1255}
1256
Tim Bird0e950172010-02-25 15:36:43 -08001257unsigned long __read_mostly tracing_thresh;
1258
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001259#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001260/*
1261 * Copy the new maximum trace into the separate maximum-trace
1262 * structure. (this way the maximum trace is permanently saved,
1263 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1264 */
1265static void
1266__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1267{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001268 struct trace_buffer *trace_buf = &tr->trace_buffer;
1269 struct trace_buffer *max_buf = &tr->max_buffer;
1270 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1271 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001272
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001273 max_buf->cpu = cpu;
1274 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001275
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001276 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001277 max_data->critical_start = data->critical_start;
1278 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001279
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001280 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001281 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001282 /*
1283 * If tsk == current, then use current_uid(), as that does not use
1284 * RCU. The irq tracer can be called out of RCU scope.
1285 */
1286 if (tsk == current)
1287 max_data->uid = current_uid();
1288 else
1289 max_data->uid = task_uid(tsk);
1290
Steven Rostedt8248ac02009-09-02 12:27:41 -04001291 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1292 max_data->policy = tsk->policy;
1293 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001294
1295 /* record this tasks comm */
1296 tracing_record_cmdline(tsk);
1297}
1298
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001299/**
1300 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1301 * @tr: tracer
1302 * @tsk: the task with the latency
1303 * @cpu: The cpu that initiated the trace.
1304 *
1305 * Flip the buffers between the @tr and the max_tr and record information
1306 * about which task was the cause of this latency.
1307 */
Ingo Molnare309b412008-05-12 21:20:51 +02001308void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001309update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001311 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001312
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001313 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001314 return;
1315
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001316 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001317
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001318 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001319 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001320 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001321 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001322 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001323
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001324 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001325
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001326 buf = tr->trace_buffer.buffer;
1327 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1328 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001329
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001330 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001331 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001332}
1333
1334/**
1335 * update_max_tr_single - only copy one trace over, and reset the rest
1336 * @tr - tracer
1337 * @tsk - task with the latency
1338 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001339 *
1340 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001341 */
Ingo Molnare309b412008-05-12 21:20:51 +02001342void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001343update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1344{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001345 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001346
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001347 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001348 return;
1349
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001350 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001351 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001352 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001353 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001354 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001355 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001356
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001357 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001358
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001359 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001360
Steven Rostedte8165db2009-09-03 19:13:05 -04001361 if (ret == -EBUSY) {
1362 /*
1363 * We failed to swap the buffer due to a commit taking
1364 * place on this CPU. We fail to record, but we reset
1365 * the max trace buffer (no one writes directly to it)
1366 * and flag that it failed.
1367 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001368 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001369 "Failed to swap buffers due to commit in progress\n");
1370 }
1371
Steven Rostedte8165db2009-09-03 19:13:05 -04001372 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001373
1374 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001375 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001376}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001377#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001378
Rabin Vincente30f53a2014-11-10 19:46:34 +01001379static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001380{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001381 /* Iterators are static, they should be filled or empty */
1382 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001383 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001384
Rabin Vincente30f53a2014-11-10 19:46:34 +01001385 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1386 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001387}
1388
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001389#ifdef CONFIG_FTRACE_STARTUP_TEST
1390static int run_tracer_selftest(struct tracer *type)
1391{
1392 struct trace_array *tr = &global_trace;
1393 struct tracer *saved_tracer = tr->current_trace;
1394 int ret;
1395
1396 if (!type->selftest || tracing_selftest_disabled)
1397 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001398
1399 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001400 * Run a selftest on this tracer.
1401 * Here we reset the trace buffer, and set the current
1402 * tracer to be this tracer. The tracer can then run some
1403 * internal tracing to verify that everything is in order.
1404 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001405 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001406 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001407
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001408 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001409
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001410#ifdef CONFIG_TRACER_MAX_TRACE
1411 if (type->use_max_tr) {
1412 /* If we expanded the buffers, make sure the max is expanded too */
1413 if (ring_buffer_expanded)
1414 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1415 RING_BUFFER_ALL_CPUS);
1416 tr->allocated_snapshot = true;
1417 }
1418#endif
1419
1420 /* the test is responsible for initializing and enabling */
1421 pr_info("Testing tracer %s: ", type->name);
1422 ret = type->selftest(type, tr);
1423 /* the test is responsible for resetting too */
1424 tr->current_trace = saved_tracer;
1425 if (ret) {
1426 printk(KERN_CONT "FAILED!\n");
1427 /* Add the warning after printing 'FAILED' */
1428 WARN_ON(1);
1429 return -1;
1430 }
1431 /* Only reset on passing, to avoid touching corrupted buffers */
1432 tracing_reset_online_cpus(&tr->trace_buffer);
1433
1434#ifdef CONFIG_TRACER_MAX_TRACE
1435 if (type->use_max_tr) {
1436 tr->allocated_snapshot = false;
1437
1438 /* Shrink the max buffer again */
1439 if (ring_buffer_expanded)
1440 ring_buffer_resize(tr->max_buffer.buffer, 1,
1441 RING_BUFFER_ALL_CPUS);
1442 }
1443#endif
1444
1445 printk(KERN_CONT "PASSED\n");
1446 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001447}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001448#else
1449static inline int run_tracer_selftest(struct tracer *type)
1450{
1451 return 0;
1452}
1453#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001454
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001455static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1456
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001457static void __init apply_trace_boot_options(void);
1458
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001459/**
1460 * register_tracer - register a tracer with the ftrace system.
1461 * @type - the plugin for the tracer
1462 *
1463 * Register a new plugin tracer.
1464 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001465int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001466{
1467 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001468 int ret = 0;
1469
1470 if (!type->name) {
1471 pr_info("Tracer must have a name\n");
1472 return -1;
1473 }
1474
Dan Carpenter24a461d2010-07-10 12:06:44 +02001475 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001476 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1477 return -1;
1478 }
1479
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001480 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001481
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001482 tracing_selftest_running = true;
1483
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001484 for (t = trace_types; t; t = t->next) {
1485 if (strcmp(type->name, t->name) == 0) {
1486 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001487 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001488 type->name);
1489 ret = -1;
1490 goto out;
1491 }
1492 }
1493
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001494 if (!type->set_flag)
1495 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001496 if (!type->flags) {
1497 /*allocate a dummy tracer_flags*/
1498 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001499 if (!type->flags) {
1500 ret = -ENOMEM;
1501 goto out;
1502 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001503 type->flags->val = 0;
1504 type->flags->opts = dummy_tracer_opt;
1505 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001506 if (!type->flags->opts)
1507 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001508
Chunyu Hud39cdd22016-03-08 21:37:01 +08001509 /* store the tracer for __set_tracer_option */
1510 type->flags->trace = type;
1511
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001512 ret = run_tracer_selftest(type);
1513 if (ret < 0)
1514 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001515
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001516 type->next = trace_types;
1517 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001518 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001519
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001520 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001521 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522 mutex_unlock(&trace_types_lock);
1523
Steven Rostedtdac74942009-02-05 01:13:38 -05001524 if (ret || !default_bootup_tracer)
1525 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001526
Li Zefanee6c2c12009-09-18 14:06:47 +08001527 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001528 goto out_unlock;
1529
1530 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1531 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001532 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001533 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001534
1535 apply_trace_boot_options();
1536
Steven Rostedtdac74942009-02-05 01:13:38 -05001537 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001538 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001539#ifdef CONFIG_FTRACE_STARTUP_TEST
1540 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1541 type->name);
1542#endif
1543
1544 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001545 return ret;
1546}
1547
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001548void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001549{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001550 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001551
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001552 if (!buffer)
1553 return;
1554
Steven Rostedtf6339032009-09-04 12:35:16 -04001555 ring_buffer_record_disable(buffer);
1556
1557 /* Make sure all commits have finished */
1558 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001559 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001560
1561 ring_buffer_record_enable(buffer);
1562}
1563
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001564void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001565{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001566 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001567 int cpu;
1568
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001569 if (!buffer)
1570 return;
1571
Steven Rostedt621968c2009-09-04 12:02:35 -04001572 ring_buffer_record_disable(buffer);
1573
1574 /* Make sure all commits have finished */
1575 synchronize_sched();
1576
Alexander Z Lam94571582013-08-02 18:36:16 -07001577 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001578
1579 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001580 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001581
1582 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001583}
1584
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001585/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001586void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001587{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001588 struct trace_array *tr;
1589
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001590 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001591 tracing_reset_online_cpus(&tr->trace_buffer);
1592#ifdef CONFIG_TRACER_MAX_TRACE
1593 tracing_reset_online_cpus(&tr->max_buffer);
1594#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001595 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001596}
1597
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001598#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001599#define NO_CMDLINE_MAP UINT_MAX
Dmitry Shmidtcb575f72015-10-28 10:45:04 -07001600static unsigned saved_tgids[SAVED_CMDLINES_DEFAULT];
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001601static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001602struct saved_cmdlines_buffer {
1603 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1604 unsigned *map_cmdline_to_pid;
1605 unsigned cmdline_num;
1606 int cmdline_idx;
1607 char *saved_cmdlines;
1608};
1609static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001610
Steven Rostedt25b0b442008-05-12 21:21:00 +02001611/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001612static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001613
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001614static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001615{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001616 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1617}
1618
1619static inline void set_cmdline(int idx, const char *cmdline)
1620{
1621 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1622}
1623
1624static int allocate_cmdlines_buffer(unsigned int val,
1625 struct saved_cmdlines_buffer *s)
1626{
1627 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1628 GFP_KERNEL);
1629 if (!s->map_cmdline_to_pid)
1630 return -ENOMEM;
1631
1632 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1633 if (!s->saved_cmdlines) {
1634 kfree(s->map_cmdline_to_pid);
1635 return -ENOMEM;
1636 }
1637
1638 s->cmdline_idx = 0;
1639 s->cmdline_num = val;
1640 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1641 sizeof(s->map_pid_to_cmdline));
1642 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1643 val * sizeof(*s->map_cmdline_to_pid));
1644
1645 return 0;
1646}
1647
1648static int trace_create_savedcmd(void)
1649{
1650 int ret;
1651
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001652 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001653 if (!savedcmd)
1654 return -ENOMEM;
1655
1656 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1657 if (ret < 0) {
1658 kfree(savedcmd);
1659 savedcmd = NULL;
1660 return -ENOMEM;
1661 }
1662
1663 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001664}
1665
Carsten Emdeb5130b12009-09-13 01:43:07 +02001666int is_tracing_stopped(void)
1667{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001668 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001669}
1670
Steven Rostedt0f048702008-11-05 16:05:44 -05001671/**
1672 * tracing_start - quick start of the tracer
1673 *
1674 * If tracing is enabled but was stopped by tracing_stop,
1675 * this will start the tracer back up.
1676 */
1677void tracing_start(void)
1678{
1679 struct ring_buffer *buffer;
1680 unsigned long flags;
1681
1682 if (tracing_disabled)
1683 return;
1684
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001685 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1686 if (--global_trace.stop_count) {
1687 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001688 /* Someone screwed up their debugging */
1689 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001690 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001691 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001692 goto out;
1693 }
1694
Steven Rostedta2f80712010-03-12 19:56:00 -05001695 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001696 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001697
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001698 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001699 if (buffer)
1700 ring_buffer_record_enable(buffer);
1701
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001702#ifdef CONFIG_TRACER_MAX_TRACE
1703 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001704 if (buffer)
1705 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001706#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001707
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001708 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001709
Steven Rostedt0f048702008-11-05 16:05:44 -05001710 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001711 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1712}
1713
1714static void tracing_start_tr(struct trace_array *tr)
1715{
1716 struct ring_buffer *buffer;
1717 unsigned long flags;
1718
1719 if (tracing_disabled)
1720 return;
1721
1722 /* If global, we need to also start the max tracer */
1723 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1724 return tracing_start();
1725
1726 raw_spin_lock_irqsave(&tr->start_lock, flags);
1727
1728 if (--tr->stop_count) {
1729 if (tr->stop_count < 0) {
1730 /* Someone screwed up their debugging */
1731 WARN_ON_ONCE(1);
1732 tr->stop_count = 0;
1733 }
1734 goto out;
1735 }
1736
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001737 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001738 if (buffer)
1739 ring_buffer_record_enable(buffer);
1740
1741 out:
1742 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001743}
1744
1745/**
1746 * tracing_stop - quick stop of the tracer
1747 *
1748 * Light weight way to stop tracing. Use in conjunction with
1749 * tracing_start.
1750 */
1751void tracing_stop(void)
1752{
1753 struct ring_buffer *buffer;
1754 unsigned long flags;
1755
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001756 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1757 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001758 goto out;
1759
Steven Rostedta2f80712010-03-12 19:56:00 -05001760 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001761 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001762
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001763 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001764 if (buffer)
1765 ring_buffer_record_disable(buffer);
1766
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001767#ifdef CONFIG_TRACER_MAX_TRACE
1768 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001769 if (buffer)
1770 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001771#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001772
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001773 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001774
Steven Rostedt0f048702008-11-05 16:05:44 -05001775 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001776 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1777}
1778
1779static void tracing_stop_tr(struct trace_array *tr)
1780{
1781 struct ring_buffer *buffer;
1782 unsigned long flags;
1783
1784 /* If global, we need to also stop the max tracer */
1785 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1786 return tracing_stop();
1787
1788 raw_spin_lock_irqsave(&tr->start_lock, flags);
1789 if (tr->stop_count++)
1790 goto out;
1791
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001792 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001793 if (buffer)
1794 ring_buffer_record_disable(buffer);
1795
1796 out:
1797 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001798}
1799
Ingo Molnare309b412008-05-12 21:20:51 +02001800void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001801
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001802static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001803{
Carsten Emdea635cf02009-03-18 09:00:41 +01001804 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001805
1806 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001807 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001808
1809 /*
1810 * It's not the end of the world if we don't get
1811 * the lock, but we also don't want to spin
1812 * nor do we want to disable interrupts,
1813 * so if we miss here, then better luck next time.
1814 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001815 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001816 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001817
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001818 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001819 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001820 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001821
Carsten Emdea635cf02009-03-18 09:00:41 +01001822 /*
1823 * Check whether the cmdline buffer at idx has a pid
1824 * mapped. We are going to overwrite that entry so we
1825 * need to clear the map_pid_to_cmdline. Otherwise we
1826 * would read the new comm for the old pid.
1827 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001828 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001829 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001830 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001831
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001832 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1833 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001834
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001835 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001836 }
1837
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001838 set_cmdline(idx, tsk->comm);
Jamie Gennis13b625d2012-11-21 15:04:25 -08001839 saved_tgids[idx] = tsk->tgid;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001840 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001841
1842 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001843}
1844
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001845static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001846{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001847 unsigned map;
1848
Steven Rostedt4ca530852009-03-16 19:20:15 -04001849 if (!pid) {
1850 strcpy(comm, "<idle>");
1851 return;
1852 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001853
Steven Rostedt74bf4072010-01-25 15:11:53 -05001854 if (WARN_ON_ONCE(pid < 0)) {
1855 strcpy(comm, "<XXX>");
1856 return;
1857 }
1858
Steven Rostedt4ca530852009-03-16 19:20:15 -04001859 if (pid > PID_MAX_DEFAULT) {
1860 strcpy(comm, "<...>");
1861 return;
1862 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001863
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001864 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001865 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001866 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001867 else
1868 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001869}
1870
1871void trace_find_cmdline(int pid, char comm[])
1872{
1873 preempt_disable();
1874 arch_spin_lock(&trace_cmdline_lock);
1875
1876 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001877
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001878 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001879 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001880}
1881
Jamie Gennis13b625d2012-11-21 15:04:25 -08001882int trace_find_tgid(int pid)
1883{
1884 unsigned map;
1885 int tgid;
1886
1887 preempt_disable();
1888 arch_spin_lock(&trace_cmdline_lock);
Dmitry Shmidtcb575f72015-10-28 10:45:04 -07001889 map = savedcmd->map_pid_to_cmdline[pid];
Jamie Gennis13b625d2012-11-21 15:04:25 -08001890 if (map != NO_CMDLINE_MAP)
1891 tgid = saved_tgids[map];
1892 else
1893 tgid = -1;
1894
1895 arch_spin_unlock(&trace_cmdline_lock);
1896 preempt_enable();
1897
1898 return tgid;
1899}
1900
Ingo Molnare309b412008-05-12 21:20:51 +02001901void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001902{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001903 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001904 return;
1905
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001906 if (!__this_cpu_read(trace_cmdline_save))
1907 return;
1908
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001909 if (trace_save_cmdline(tsk))
1910 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001911}
1912
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001913void
Steven Rostedt38697052008-10-01 13:14:09 -04001914tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1915 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001916{
1917 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001918
Steven Rostedt777e2082008-09-29 23:02:42 -04001919 entry->preempt_count = pc & 0xff;
1920 entry->pid = (tsk) ? tsk->pid : 0;
1921 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001922#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001923 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001924#else
1925 TRACE_FLAG_IRQS_NOSUPPORT |
1926#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01001927 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001928 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1929 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001930 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1931 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001932}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001933EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001934
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001935static __always_inline void
1936trace_event_setup(struct ring_buffer_event *event,
1937 int type, unsigned long flags, int pc)
1938{
1939 struct trace_entry *ent = ring_buffer_event_data(event);
1940
1941 tracing_generic_entry_update(ent, flags, pc);
1942 ent->type = type;
1943}
1944
Steven Rostedte77405a2009-09-02 14:17:06 -04001945struct ring_buffer_event *
1946trace_buffer_lock_reserve(struct ring_buffer *buffer,
1947 int type,
1948 unsigned long len,
1949 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001950{
1951 struct ring_buffer_event *event;
1952
Steven Rostedte77405a2009-09-02 14:17:06 -04001953 event = ring_buffer_lock_reserve(buffer, len);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001954 if (event != NULL)
1955 trace_event_setup(event, type, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001956
1957 return event;
1958}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001959
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001960DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1961DEFINE_PER_CPU(int, trace_buffered_event_cnt);
1962static int trace_buffered_event_ref;
1963
1964/**
1965 * trace_buffered_event_enable - enable buffering events
1966 *
1967 * When events are being filtered, it is quicker to use a temporary
1968 * buffer to write the event data into if there's a likely chance
1969 * that it will not be committed. The discard of the ring buffer
1970 * is not as fast as committing, and is much slower than copying
1971 * a commit.
1972 *
1973 * When an event is to be filtered, allocate per cpu buffers to
1974 * write the event data into, and if the event is filtered and discarded
1975 * it is simply dropped, otherwise, the entire data is to be committed
1976 * in one shot.
1977 */
1978void trace_buffered_event_enable(void)
1979{
1980 struct ring_buffer_event *event;
1981 struct page *page;
1982 int cpu;
1983
1984 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
1985
1986 if (trace_buffered_event_ref++)
1987 return;
1988
1989 for_each_tracing_cpu(cpu) {
1990 page = alloc_pages_node(cpu_to_node(cpu),
1991 GFP_KERNEL | __GFP_NORETRY, 0);
1992 if (!page)
1993 goto failed;
1994
1995 event = page_address(page);
1996 memset(event, 0, sizeof(*event));
1997
1998 per_cpu(trace_buffered_event, cpu) = event;
1999
2000 preempt_disable();
2001 if (cpu == smp_processor_id() &&
2002 this_cpu_read(trace_buffered_event) !=
2003 per_cpu(trace_buffered_event, cpu))
2004 WARN_ON_ONCE(1);
2005 preempt_enable();
2006 }
2007
2008 return;
2009 failed:
2010 trace_buffered_event_disable();
2011}
2012
2013static void enable_trace_buffered_event(void *data)
2014{
2015 /* Probably not needed, but do it anyway */
2016 smp_rmb();
2017 this_cpu_dec(trace_buffered_event_cnt);
2018}
2019
2020static void disable_trace_buffered_event(void *data)
2021{
2022 this_cpu_inc(trace_buffered_event_cnt);
2023}
2024
2025/**
2026 * trace_buffered_event_disable - disable buffering events
2027 *
2028 * When a filter is removed, it is faster to not use the buffered
2029 * events, and to commit directly into the ring buffer. Free up
2030 * the temp buffers when there are no more users. This requires
2031 * special synchronization with current events.
2032 */
2033void trace_buffered_event_disable(void)
2034{
2035 int cpu;
2036
2037 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2038
2039 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2040 return;
2041
2042 if (--trace_buffered_event_ref)
2043 return;
2044
2045 preempt_disable();
2046 /* For each CPU, set the buffer as used. */
2047 smp_call_function_many(tracing_buffer_mask,
2048 disable_trace_buffered_event, NULL, 1);
2049 preempt_enable();
2050
2051 /* Wait for all current users to finish */
2052 synchronize_sched();
2053
2054 for_each_tracing_cpu(cpu) {
2055 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2056 per_cpu(trace_buffered_event, cpu) = NULL;
2057 }
2058 /*
2059 * Make sure trace_buffered_event is NULL before clearing
2060 * trace_buffered_event_cnt.
2061 */
2062 smp_wmb();
2063
2064 preempt_disable();
2065 /* Do the work on each cpu */
2066 smp_call_function_many(tracing_buffer_mask,
2067 enable_trace_buffered_event, NULL, 1);
2068 preempt_enable();
2069}
2070
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002071void
2072__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
2073{
2074 __this_cpu_write(trace_cmdline_save, true);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002075
2076 /* If this is the temp buffer, we need to commit fully */
2077 if (this_cpu_read(trace_buffered_event) == event) {
2078 /* Length is in event->array[0] */
2079 ring_buffer_write(buffer, event->array[0], &event->array[1]);
2080 /* Release the temp buffer */
2081 this_cpu_dec(trace_buffered_event_cnt);
2082 } else
2083 ring_buffer_unlock_commit(buffer, event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002084}
2085
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002086static struct ring_buffer *temp_buffer;
2087
Steven Rostedtef5580d2009-02-27 19:38:04 -05002088struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002089trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002090 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002091 int type, unsigned long len,
2092 unsigned long flags, int pc)
2093{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002094 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002095 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002096
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002097 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002098
2099 if ((trace_file->flags &
2100 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2101 (entry = this_cpu_read(trace_buffered_event))) {
2102 /* Try to use the per cpu buffer first */
2103 val = this_cpu_inc_return(trace_buffered_event_cnt);
2104 if (val == 1) {
2105 trace_event_setup(entry, type, flags, pc);
2106 entry->array[0] = len;
2107 return entry;
2108 }
2109 this_cpu_dec(trace_buffered_event_cnt);
2110 }
2111
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002112 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002113 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002114 /*
2115 * If tracing is off, but we have triggers enabled
2116 * we still need to look at the event data. Use the temp_buffer
2117 * to store the trace event for the tigger to use. It's recusive
2118 * safe and will not be recorded anywhere.
2119 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002120 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002121 *current_rb = temp_buffer;
2122 entry = trace_buffer_lock_reserve(*current_rb,
2123 type, len, flags, pc);
2124 }
2125 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002126}
2127EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2128
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002129void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2130 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002131 struct ring_buffer_event *event,
2132 unsigned long flags, int pc,
2133 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002134{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002135 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002136
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002137 /*
2138 * If regs is not set, then skip the following callers:
2139 * trace_buffer_unlock_commit_regs
2140 * event_trigger_unlock_commit
2141 * trace_event_buffer_commit
2142 * trace_event_raw_event_sched_switch
2143 * Note, we can still get here via blktrace, wakeup tracer
2144 * and mmiotrace, but that's ok if they lose a function or
2145 * two. They are that meaningful.
2146 */
2147 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002148 ftrace_trace_userstack(buffer, flags, pc);
2149}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002150
Ingo Molnare309b412008-05-12 21:20:51 +02002151void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002152trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002153 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2154 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002155{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002156 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002157 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002158 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002159 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002160
Steven Rostedte77405a2009-09-02 14:17:06 -04002161 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002162 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002163 if (!event)
2164 return;
2165 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002166 entry->ip = ip;
2167 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002168
Tom Zanussif306cc82013-10-24 08:34:17 -05002169 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002170 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002171}
2172
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002173#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002174
2175#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2176struct ftrace_stack {
2177 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2178};
2179
2180static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2181static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2182
Steven Rostedte77405a2009-09-02 14:17:06 -04002183static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002184 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002185 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002186{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002187 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002188 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002189 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002190 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002191 int use_stack;
2192 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002193
2194 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002195 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002196
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002197 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002198 * Add two, for this function and the call to save_stack_trace()
2199 * If regs is set, then these functions will not be in the way.
2200 */
2201 if (!regs)
2202 trace.skip += 2;
2203
2204 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002205 * Since events can happen in NMIs there's no safe way to
2206 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2207 * or NMI comes in, it will just have to use the default
2208 * FTRACE_STACK_SIZE.
2209 */
2210 preempt_disable_notrace();
2211
Shan Wei82146522012-11-19 13:21:01 +08002212 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002213 /*
2214 * We don't need any atomic variables, just a barrier.
2215 * If an interrupt comes in, we don't care, because it would
2216 * have exited and put the counter back to what we want.
2217 * We just need a barrier to keep gcc from moving things
2218 * around.
2219 */
2220 barrier();
2221 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002222 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002223 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2224
2225 if (regs)
2226 save_stack_trace_regs(regs, &trace);
2227 else
2228 save_stack_trace(&trace);
2229
2230 if (trace.nr_entries > size)
2231 size = trace.nr_entries;
2232 } else
2233 /* From now on, use_stack is a boolean */
2234 use_stack = 0;
2235
2236 size *= sizeof(unsigned long);
2237
2238 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
2239 sizeof(*entry) + size, flags, pc);
2240 if (!event)
2241 goto out;
2242 entry = ring_buffer_event_data(event);
2243
2244 memset(&entry->caller, 0, size);
2245
2246 if (use_stack)
2247 memcpy(&entry->caller, trace.entries,
2248 trace.nr_entries * sizeof(unsigned long));
2249 else {
2250 trace.max_entries = FTRACE_STACK_ENTRIES;
2251 trace.entries = entry->caller;
2252 if (regs)
2253 save_stack_trace_regs(regs, &trace);
2254 else
2255 save_stack_trace(&trace);
2256 }
2257
2258 entry->size = trace.nr_entries;
2259
Tom Zanussif306cc82013-10-24 08:34:17 -05002260 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002261 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002262
2263 out:
2264 /* Again, don't let gcc optimize things here */
2265 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002266 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002267 preempt_enable_notrace();
2268
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002269}
2270
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002271static inline void ftrace_trace_stack(struct trace_array *tr,
2272 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002273 unsigned long flags,
2274 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002275{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002276 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002277 return;
2278
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002279 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002280}
2281
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002282void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2283 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002284{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002285 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04002286}
2287
Steven Rostedt03889382009-12-11 09:48:22 -05002288/**
2289 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002290 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002291 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002292void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002293{
2294 unsigned long flags;
2295
2296 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002297 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002298
2299 local_save_flags(flags);
2300
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002301 /*
2302 * Skip 3 more, seems to get us at the caller of
2303 * this function.
2304 */
2305 skip += 3;
2306 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2307 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002308}
2309
Steven Rostedt91e86e52010-11-10 12:56:12 +01002310static DEFINE_PER_CPU(int, user_stack_count);
2311
Steven Rostedte77405a2009-09-02 14:17:06 -04002312void
2313ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002314{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002315 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002316 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002317 struct userstack_entry *entry;
2318 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002319
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002320 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002321 return;
2322
Steven Rostedtb6345872010-03-12 20:03:30 -05002323 /*
2324 * NMIs can not handle page faults, even with fix ups.
2325 * The save user stack can (and often does) fault.
2326 */
2327 if (unlikely(in_nmi()))
2328 return;
2329
Steven Rostedt91e86e52010-11-10 12:56:12 +01002330 /*
2331 * prevent recursion, since the user stack tracing may
2332 * trigger other kernel events.
2333 */
2334 preempt_disable();
2335 if (__this_cpu_read(user_stack_count))
2336 goto out;
2337
2338 __this_cpu_inc(user_stack_count);
2339
Steven Rostedte77405a2009-09-02 14:17:06 -04002340 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002341 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002342 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002343 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002344 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002345
Steven Rostedt48659d32009-09-11 11:36:23 -04002346 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002347 memset(&entry->caller, 0, sizeof(entry->caller));
2348
2349 trace.nr_entries = 0;
2350 trace.max_entries = FTRACE_STACK_ENTRIES;
2351 trace.skip = 0;
2352 trace.entries = entry->caller;
2353
2354 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002355 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002356 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002357
Li Zefan1dbd1952010-12-09 15:47:56 +08002358 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002359 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002360 out:
2361 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002362}
2363
Hannes Eder4fd27352009-02-10 19:44:12 +01002364#ifdef UNUSED
2365static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002366{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002367 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002368}
Hannes Eder4fd27352009-02-10 19:44:12 +01002369#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002370
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002371#endif /* CONFIG_STACKTRACE */
2372
Steven Rostedt07d777f2011-09-22 14:01:55 -04002373/* created for use with alloc_percpu */
2374struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002375 int nesting;
2376 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002377};
2378
2379static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002380
2381/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002382 * Thise allows for lockless recording. If we're nested too deeply, then
2383 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002384 */
2385static char *get_trace_buf(void)
2386{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002387 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002388
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002389 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002390 return NULL;
2391
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002392 return &buffer->buffer[buffer->nesting++][0];
2393}
2394
2395static void put_trace_buf(void)
2396{
2397 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002398}
2399
2400static int alloc_percpu_trace_buffer(void)
2401{
2402 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002403
2404 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002405 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2406 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002407
2408 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002409 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002410}
2411
Steven Rostedt81698832012-10-11 10:15:05 -04002412static int buffers_allocated;
2413
Steven Rostedt07d777f2011-09-22 14:01:55 -04002414void trace_printk_init_buffers(void)
2415{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002416 if (buffers_allocated)
2417 return;
2418
2419 if (alloc_percpu_trace_buffer())
2420 return;
2421
Steven Rostedt2184db42014-05-28 13:14:40 -04002422 /* trace_printk() is for debug use only. Don't use it in production. */
2423
Joe Perchesa395d6a2016-03-22 14:28:09 -07002424 pr_warn("\n");
2425 pr_warn("**********************************************************\n");
2426 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2427 pr_warn("** **\n");
2428 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2429 pr_warn("** **\n");
2430 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2431 pr_warn("** unsafe for production use. **\n");
2432 pr_warn("** **\n");
2433 pr_warn("** If you see this message and you are not debugging **\n");
2434 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2435 pr_warn("** **\n");
2436 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2437 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002438
Steven Rostedtb382ede62012-10-10 21:44:34 -04002439 /* Expand the buffers to set size */
2440 tracing_update_buffers();
2441
Steven Rostedt07d777f2011-09-22 14:01:55 -04002442 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002443
2444 /*
2445 * trace_printk_init_buffers() can be called by modules.
2446 * If that happens, then we need to start cmdline recording
2447 * directly here. If the global_trace.buffer is already
2448 * allocated here, then this was called by module code.
2449 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002450 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002451 tracing_start_cmdline_record();
2452}
2453
2454void trace_printk_start_comm(void)
2455{
2456 /* Start tracing comms if trace printk is set */
2457 if (!buffers_allocated)
2458 return;
2459 tracing_start_cmdline_record();
2460}
2461
2462static void trace_printk_start_stop_comm(int enabled)
2463{
2464 if (!buffers_allocated)
2465 return;
2466
2467 if (enabled)
2468 tracing_start_cmdline_record();
2469 else
2470 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002471}
2472
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002473/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002474 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002475 *
2476 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002477int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002478{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002479 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002480 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002481 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002482 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002483 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002484 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002485 char *tbuffer;
2486 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002487
2488 if (unlikely(tracing_selftest_running || tracing_disabled))
2489 return 0;
2490
2491 /* Don't pollute graph traces with trace_vprintk internals */
2492 pause_graph_tracing();
2493
2494 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002495 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002496
Steven Rostedt07d777f2011-09-22 14:01:55 -04002497 tbuffer = get_trace_buf();
2498 if (!tbuffer) {
2499 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002500 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002501 }
2502
2503 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2504
2505 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002506 goto out;
2507
Steven Rostedt07d777f2011-09-22 14:01:55 -04002508 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002509 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002510 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002511 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2512 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002513 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002514 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002515 entry = ring_buffer_event_data(event);
2516 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002517 entry->fmt = fmt;
2518
Steven Rostedt07d777f2011-09-22 14:01:55 -04002519 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002520 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002521 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002522 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002523 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002524
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002525out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002526 put_trace_buf();
2527
2528out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002529 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002530 unpause_graph_tracing();
2531
2532 return len;
2533}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002534EXPORT_SYMBOL_GPL(trace_vbprintk);
2535
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002536static int
2537__trace_array_vprintk(struct ring_buffer *buffer,
2538 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002539{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002540 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002541 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002542 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002543 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002544 unsigned long flags;
2545 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002546
2547 if (tracing_disabled || tracing_selftest_running)
2548 return 0;
2549
Steven Rostedt07d777f2011-09-22 14:01:55 -04002550 /* Don't pollute graph traces with trace_vprintk internals */
2551 pause_graph_tracing();
2552
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002553 pc = preempt_count();
2554 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002555
Steven Rostedt07d777f2011-09-22 14:01:55 -04002556
2557 tbuffer = get_trace_buf();
2558 if (!tbuffer) {
2559 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002560 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002561 }
2562
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002563 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002564
Steven Rostedt07d777f2011-09-22 14:01:55 -04002565 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002566 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002567 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002568 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002569 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002570 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002571 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002572 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002573
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002574 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002575 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002576 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002577 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002578 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002579
2580out:
2581 put_trace_buf();
2582
2583out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002584 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002585 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002586
2587 return len;
2588}
Steven Rostedt659372d2009-09-03 19:11:07 -04002589
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002590int trace_array_vprintk(struct trace_array *tr,
2591 unsigned long ip, const char *fmt, va_list args)
2592{
2593 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2594}
2595
2596int trace_array_printk(struct trace_array *tr,
2597 unsigned long ip, const char *fmt, ...)
2598{
2599 int ret;
2600 va_list ap;
2601
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002602 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002603 return 0;
2604
2605 va_start(ap, fmt);
2606 ret = trace_array_vprintk(tr, ip, fmt, ap);
2607 va_end(ap);
2608 return ret;
2609}
2610
2611int trace_array_printk_buf(struct ring_buffer *buffer,
2612 unsigned long ip, const char *fmt, ...)
2613{
2614 int ret;
2615 va_list ap;
2616
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002617 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002618 return 0;
2619
2620 va_start(ap, fmt);
2621 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2622 va_end(ap);
2623 return ret;
2624}
2625
Steven Rostedt659372d2009-09-03 19:11:07 -04002626int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2627{
Steven Rostedta813a152009-10-09 01:41:35 -04002628 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002629}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002630EXPORT_SYMBOL_GPL(trace_vprintk);
2631
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002632static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002633{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002634 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2635
Steven Rostedt5a90f572008-09-03 17:42:51 -04002636 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002637 if (buf_iter)
2638 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002639}
2640
Ingo Molnare309b412008-05-12 21:20:51 +02002641static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002642peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2643 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002644{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002645 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002646 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002647
Steven Rostedtd7690412008-10-01 00:29:53 -04002648 if (buf_iter)
2649 event = ring_buffer_iter_peek(buf_iter, ts);
2650 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002651 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002652 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002653
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002654 if (event) {
2655 iter->ent_size = ring_buffer_event_length(event);
2656 return ring_buffer_event_data(event);
2657 }
2658 iter->ent_size = 0;
2659 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002660}
Steven Rostedtd7690412008-10-01 00:29:53 -04002661
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002662static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002663__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2664 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002665{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002666 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002667 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002668 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002669 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002670 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002671 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002672 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002673 int cpu;
2674
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002675 /*
2676 * If we are in a per_cpu trace file, don't bother by iterating over
2677 * all cpu and peek directly.
2678 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002679 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002680 if (ring_buffer_empty_cpu(buffer, cpu_file))
2681 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002682 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002683 if (ent_cpu)
2684 *ent_cpu = cpu_file;
2685
2686 return ent;
2687 }
2688
Steven Rostedtab464282008-05-12 21:21:00 +02002689 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002690
2691 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002692 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002693
Steven Rostedtbc21b472010-03-31 19:49:26 -04002694 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002695
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002696 /*
2697 * Pick the entry with the smallest timestamp:
2698 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002699 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002700 next = ent;
2701 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002702 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002703 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002704 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002705 }
2706 }
2707
Steven Rostedt12b5da32012-03-27 10:43:28 -04002708 iter->ent_size = next_size;
2709
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002710 if (ent_cpu)
2711 *ent_cpu = next_cpu;
2712
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002713 if (ent_ts)
2714 *ent_ts = next_ts;
2715
Steven Rostedtbc21b472010-03-31 19:49:26 -04002716 if (missing_events)
2717 *missing_events = next_lost;
2718
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002719 return next;
2720}
2721
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002722/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002723struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2724 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002725{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002726 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002727}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002728
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002729/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002730void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002731{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002732 iter->ent = __find_next_entry(iter, &iter->cpu,
2733 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002734
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002735 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002736 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002737
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002738 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002739}
2740
Ingo Molnare309b412008-05-12 21:20:51 +02002741static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002742{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002743 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002744 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002745}
2746
Ingo Molnare309b412008-05-12 21:20:51 +02002747static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002748{
2749 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002750 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002751 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002752
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002753 WARN_ON_ONCE(iter->leftover);
2754
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002755 (*pos)++;
2756
2757 /* can't go backwards */
2758 if (iter->idx > i)
2759 return NULL;
2760
2761 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002762 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002763 else
2764 ent = iter;
2765
2766 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002767 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002768
2769 iter->pos = *pos;
2770
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002771 return ent;
2772}
2773
Jason Wessel955b61e2010-08-05 09:22:23 -05002774void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002775{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002776 struct ring_buffer_event *event;
2777 struct ring_buffer_iter *buf_iter;
2778 unsigned long entries = 0;
2779 u64 ts;
2780
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002781 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002782
Steven Rostedt6d158a82012-06-27 20:46:14 -04002783 buf_iter = trace_buffer_iter(iter, cpu);
2784 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002785 return;
2786
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002787 ring_buffer_iter_reset(buf_iter);
2788
2789 /*
2790 * We could have the case with the max latency tracers
2791 * that a reset never took place on a cpu. This is evident
2792 * by the timestamp being before the start of the buffer.
2793 */
2794 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002795 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002796 break;
2797 entries++;
2798 ring_buffer_read(buf_iter, NULL);
2799 }
2800
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002801 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002802}
2803
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002804/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002805 * The current tracer is copied to avoid a global locking
2806 * all around.
2807 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002808static void *s_start(struct seq_file *m, loff_t *pos)
2809{
2810 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002811 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002812 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002813 void *p = NULL;
2814 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002815 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002816
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002817 /*
2818 * copy the tracer to avoid using a global lock all around.
2819 * iter->trace is a copy of current_trace, the pointer to the
2820 * name may be used instead of a strcmp(), as iter->trace->name
2821 * will point to the same string as current_trace->name.
2822 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002823 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002824 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2825 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002826 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002827
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002828#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002829 if (iter->snapshot && iter->trace->use_max_tr)
2830 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002831#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002832
2833 if (!iter->snapshot)
2834 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002835
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002836 if (*pos != iter->pos) {
2837 iter->ent = NULL;
2838 iter->cpu = 0;
2839 iter->idx = -1;
2840
Steven Rostedtae3b5092013-01-23 15:22:59 -05002841 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002842 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002843 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002844 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002845 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002846
Lai Jiangshanac91d852010-03-02 17:54:50 +08002847 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002848 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2849 ;
2850
2851 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002852 /*
2853 * If we overflowed the seq_file before, then we want
2854 * to just reuse the trace_seq buffer again.
2855 */
2856 if (iter->leftover)
2857 p = iter;
2858 else {
2859 l = *pos - 1;
2860 p = s_next(m, p, &l);
2861 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002862 }
2863
Lai Jiangshan4f535962009-05-18 19:35:34 +08002864 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002865 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002866 return p;
2867}
2868
2869static void s_stop(struct seq_file *m, void *p)
2870{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002871 struct trace_iterator *iter = m->private;
2872
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002873#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002874 if (iter->snapshot && iter->trace->use_max_tr)
2875 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002876#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002877
2878 if (!iter->snapshot)
2879 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002880
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002881 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002882 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002883}
2884
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002885static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002886get_total_entries(struct trace_buffer *buf,
2887 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002888{
2889 unsigned long count;
2890 int cpu;
2891
2892 *total = 0;
2893 *entries = 0;
2894
2895 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002896 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002897 /*
2898 * If this buffer has skipped entries, then we hold all
2899 * entries for the trace and we need to ignore the
2900 * ones before the time stamp.
2901 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002902 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2903 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002904 /* total is the same as the entries */
2905 *total += count;
2906 } else
2907 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002908 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002909 *entries += count;
2910 }
2911}
2912
Ingo Molnare309b412008-05-12 21:20:51 +02002913static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002914{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002915 seq_puts(m, "# _------=> CPU# \n"
2916 "# / _-----=> irqs-off \n"
2917 "# | / _----=> need-resched \n"
2918 "# || / _---=> hardirq/softirq \n"
2919 "# ||| / _--=> preempt-depth \n"
2920 "# |||| / delay \n"
2921 "# cmd pid ||||| time | caller \n"
2922 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002923}
2924
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002925static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002926{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002927 unsigned long total;
2928 unsigned long entries;
2929
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002930 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002931 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2932 entries, total, num_online_cpus());
2933 seq_puts(m, "#\n");
2934}
2935
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002936static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002937{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002938 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002939 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2940 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002941}
2942
Jamie Gennis13b625d2012-11-21 15:04:25 -08002943static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
2944{
2945 print_event_info(buf, m);
2946 seq_puts(m, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
2947 seq_puts(m, "# | | | | | |\n");
2948}
2949
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002950static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002951{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002952 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002953 seq_puts(m, "# _-----=> irqs-off\n"
2954 "# / _----=> need-resched\n"
2955 "# | / _---=> hardirq/softirq\n"
2956 "# || / _--=> preempt-depth\n"
2957 "# ||| / delay\n"
2958 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2959 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002960}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002961
Jamie Gennis13b625d2012-11-21 15:04:25 -08002962static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
2963{
2964 print_event_info(buf, m);
2965 seq_puts(m, "# _-----=> irqs-off\n");
2966 seq_puts(m, "# / _----=> need-resched\n");
2967 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2968 seq_puts(m, "# || / _--=> preempt-depth\n");
2969 seq_puts(m, "# ||| / delay\n");
2970 seq_puts(m, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
2971 seq_puts(m, "# | | | | |||| | |\n");
2972}
2973
Jiri Olsa62b915f2010-04-02 19:01:22 +02002974void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002975print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2976{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002977 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002978 struct trace_buffer *buf = iter->trace_buffer;
2979 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002980 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002981 unsigned long entries;
2982 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002983 const char *name = "preemption";
2984
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002985 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002986
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002987 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002988
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002989 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002990 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002991 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002992 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002993 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002994 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002995 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002996 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002997 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002998 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002999#if defined(CONFIG_PREEMPT_NONE)
3000 "server",
3001#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3002 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003003#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003004 "preempt",
3005#else
3006 "unknown",
3007#endif
3008 /* These are reserved for later use */
3009 0, 0, 0, 0);
3010#ifdef CONFIG_SMP
3011 seq_printf(m, " #P:%d)\n", num_online_cpus());
3012#else
3013 seq_puts(m, ")\n");
3014#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003015 seq_puts(m, "# -----------------\n");
3016 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003017 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003018 data->comm, data->pid,
3019 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003020 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003021 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003022
3023 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003024 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003025 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3026 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003027 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003028 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3029 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003030 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003031 }
3032
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003033 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003034}
3035
Steven Rostedta3097202008-11-07 22:36:02 -05003036static void test_cpu_buff_start(struct trace_iterator *iter)
3037{
3038 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003039 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003040
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003041 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003042 return;
3043
3044 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3045 return;
3046
Sasha Levin919cd972015-09-04 12:45:56 -04003047 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003048 return;
3049
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003050 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003051 return;
3052
Sasha Levin919cd972015-09-04 12:45:56 -04003053 if (iter->started)
3054 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003055
3056 /* Don't print started cpu buffer for the first entry of the trace */
3057 if (iter->idx > 1)
3058 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3059 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003060}
3061
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003062static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003063{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003064 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003065 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003066 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003067 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003068 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003069
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003070 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003071
Steven Rostedta3097202008-11-07 22:36:02 -05003072 test_cpu_buff_start(iter);
3073
Steven Rostedtf633cef2008-12-23 23:24:13 -05003074 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003075
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003076 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003077 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3078 trace_print_lat_context(iter);
3079 else
3080 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003081 }
3082
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003083 if (trace_seq_has_overflowed(s))
3084 return TRACE_TYPE_PARTIAL_LINE;
3085
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003086 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003087 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003088
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003089 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003090
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003091 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003092}
3093
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003094static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003095{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003096 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003097 struct trace_seq *s = &iter->seq;
3098 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003099 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003100
3101 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003102
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003103 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003104 trace_seq_printf(s, "%d %d %llu ",
3105 entry->pid, iter->cpu, iter->ts);
3106
3107 if (trace_seq_has_overflowed(s))
3108 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003109
Steven Rostedtf633cef2008-12-23 23:24:13 -05003110 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003111 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003112 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003113
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003114 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003115
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003116 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003117}
3118
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003119static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003120{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003121 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003122 struct trace_seq *s = &iter->seq;
3123 unsigned char newline = '\n';
3124 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003125 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003126
3127 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003128
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003129 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003130 SEQ_PUT_HEX_FIELD(s, entry->pid);
3131 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3132 SEQ_PUT_HEX_FIELD(s, iter->ts);
3133 if (trace_seq_has_overflowed(s))
3134 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003135 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003136
Steven Rostedtf633cef2008-12-23 23:24:13 -05003137 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003138 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003139 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003140 if (ret != TRACE_TYPE_HANDLED)
3141 return ret;
3142 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003143
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003144 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003145
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003146 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003147}
3148
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003149static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003150{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003151 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003152 struct trace_seq *s = &iter->seq;
3153 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003154 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003155
3156 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003157
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003158 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003159 SEQ_PUT_FIELD(s, entry->pid);
3160 SEQ_PUT_FIELD(s, iter->cpu);
3161 SEQ_PUT_FIELD(s, iter->ts);
3162 if (trace_seq_has_overflowed(s))
3163 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003164 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003165
Steven Rostedtf633cef2008-12-23 23:24:13 -05003166 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003167 return event ? event->funcs->binary(iter, 0, event) :
3168 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003169}
3170
Jiri Olsa62b915f2010-04-02 19:01:22 +02003171int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003172{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003173 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003174 int cpu;
3175
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003176 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003177 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003178 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003179 buf_iter = trace_buffer_iter(iter, cpu);
3180 if (buf_iter) {
3181 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003182 return 0;
3183 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003184 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003185 return 0;
3186 }
3187 return 1;
3188 }
3189
Steven Rostedtab464282008-05-12 21:21:00 +02003190 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003191 buf_iter = trace_buffer_iter(iter, cpu);
3192 if (buf_iter) {
3193 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003194 return 0;
3195 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003196 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003197 return 0;
3198 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003199 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003200
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003201 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003202}
3203
Lai Jiangshan4f535962009-05-18 19:35:34 +08003204/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003205enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003206{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003207 struct trace_array *tr = iter->tr;
3208 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003209 enum print_line_t ret;
3210
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003211 if (iter->lost_events) {
3212 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3213 iter->cpu, iter->lost_events);
3214 if (trace_seq_has_overflowed(&iter->seq))
3215 return TRACE_TYPE_PARTIAL_LINE;
3216 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003217
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003218 if (iter->trace && iter->trace->print_line) {
3219 ret = iter->trace->print_line(iter);
3220 if (ret != TRACE_TYPE_UNHANDLED)
3221 return ret;
3222 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003223
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003224 if (iter->ent->type == TRACE_BPUTS &&
3225 trace_flags & TRACE_ITER_PRINTK &&
3226 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3227 return trace_print_bputs_msg_only(iter);
3228
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003229 if (iter->ent->type == TRACE_BPRINT &&
3230 trace_flags & TRACE_ITER_PRINTK &&
3231 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003232 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003233
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003234 if (iter->ent->type == TRACE_PRINT &&
3235 trace_flags & TRACE_ITER_PRINTK &&
3236 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003237 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003238
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003239 if (trace_flags & TRACE_ITER_BIN)
3240 return print_bin_fmt(iter);
3241
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003242 if (trace_flags & TRACE_ITER_HEX)
3243 return print_hex_fmt(iter);
3244
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003245 if (trace_flags & TRACE_ITER_RAW)
3246 return print_raw_fmt(iter);
3247
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003248 return print_trace_fmt(iter);
3249}
3250
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003251void trace_latency_header(struct seq_file *m)
3252{
3253 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003254 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003255
3256 /* print nothing if the buffers are empty */
3257 if (trace_empty(iter))
3258 return;
3259
3260 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3261 print_trace_header(m, iter);
3262
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003263 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003264 print_lat_help_header(m);
3265}
3266
Jiri Olsa62b915f2010-04-02 19:01:22 +02003267void trace_default_header(struct seq_file *m)
3268{
3269 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003270 struct trace_array *tr = iter->tr;
3271 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003272
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003273 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3274 return;
3275
Jiri Olsa62b915f2010-04-02 19:01:22 +02003276 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3277 /* print nothing if the buffers are empty */
3278 if (trace_empty(iter))
3279 return;
3280 print_trace_header(m, iter);
3281 if (!(trace_flags & TRACE_ITER_VERBOSE))
3282 print_lat_help_header(m);
3283 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003284 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3285 if (trace_flags & TRACE_ITER_IRQ_INFO)
Jamie Gennis13b625d2012-11-21 15:04:25 -08003286 if (trace_flags & TRACE_ITER_TGID)
3287 print_func_help_header_irq_tgid(iter->trace_buffer, m);
3288 else
3289 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003290 else
Jamie Gennis13b625d2012-11-21 15:04:25 -08003291 if (trace_flags & TRACE_ITER_TGID)
3292 print_func_help_header_tgid(iter->trace_buffer, m);
3293 else
3294 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003295 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003296 }
3297}
3298
Steven Rostedte0a413f2011-09-29 21:26:16 -04003299static void test_ftrace_alive(struct seq_file *m)
3300{
3301 if (!ftrace_is_dead())
3302 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003303 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3304 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003305}
3306
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003307#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003308static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003309{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003310 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3311 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3312 "# Takes a snapshot of the main buffer.\n"
3313 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3314 "# (Doesn't have to be '2' works with any number that\n"
3315 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003316}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003317
3318static void show_snapshot_percpu_help(struct seq_file *m)
3319{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003320 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003321#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003322 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3323 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003324#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003325 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3326 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003327#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003328 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3329 "# (Doesn't have to be '2' works with any number that\n"
3330 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003331}
3332
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003333static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3334{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003335 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003336 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003337 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003338 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003339
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003340 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003341 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3342 show_snapshot_main_help(m);
3343 else
3344 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003345}
3346#else
3347/* Should never be called */
3348static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3349#endif
3350
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003351static int s_show(struct seq_file *m, void *v)
3352{
3353 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003354 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003355
3356 if (iter->ent == NULL) {
3357 if (iter->tr) {
3358 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3359 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003360 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003361 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003362 if (iter->snapshot && trace_empty(iter))
3363 print_snapshot_help(m, iter);
3364 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003365 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003366 else
3367 trace_default_header(m);
3368
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003369 } else if (iter->leftover) {
3370 /*
3371 * If we filled the seq_file buffer earlier, we
3372 * want to just show it now.
3373 */
3374 ret = trace_print_seq(m, &iter->seq);
3375
3376 /* ret should this time be zero, but you never know */
3377 iter->leftover = ret;
3378
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003379 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003380 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003381 ret = trace_print_seq(m, &iter->seq);
3382 /*
3383 * If we overflow the seq_file buffer, then it will
3384 * ask us for this data again at start up.
3385 * Use that instead.
3386 * ret is 0 if seq_file write succeeded.
3387 * -1 otherwise.
3388 */
3389 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003390 }
3391
3392 return 0;
3393}
3394
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003395/*
3396 * Should be used after trace_array_get(), trace_types_lock
3397 * ensures that i_cdev was already initialized.
3398 */
3399static inline int tracing_get_cpu(struct inode *inode)
3400{
3401 if (inode->i_cdev) /* See trace_create_cpu_file() */
3402 return (long)inode->i_cdev - 1;
3403 return RING_BUFFER_ALL_CPUS;
3404}
3405
James Morris88e9d342009-09-22 16:43:43 -07003406static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003407 .start = s_start,
3408 .next = s_next,
3409 .stop = s_stop,
3410 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003411};
3412
Ingo Molnare309b412008-05-12 21:20:51 +02003413static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003414__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003415{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003416 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003417 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003418 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003419
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003420 if (tracing_disabled)
3421 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003422
Jiri Olsa50e18b92012-04-25 10:23:39 +02003423 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003424 if (!iter)
3425 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003426
Gil Fruchter72917232015-06-09 10:32:35 +03003427 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003428 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003429 if (!iter->buffer_iter)
3430 goto release;
3431
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003432 /*
3433 * We make a copy of the current tracer to avoid concurrent
3434 * changes on it while we are reading.
3435 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003436 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003437 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003438 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003439 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003440
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003441 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003442
Li Zefan79f55992009-06-15 14:58:26 +08003443 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003444 goto fail;
3445
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003446 iter->tr = tr;
3447
3448#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003449 /* Currently only the top directory has a snapshot */
3450 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003451 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003452 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003453#endif
3454 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003455 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003456 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003457 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003458 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003459
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003460 /* Notify the tracer early; before we stop tracing. */
3461 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003462 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003463
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003464 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003465 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003466 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3467
David Sharp8be07092012-11-13 12:18:22 -08003468 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003469 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003470 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3471
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003472 /* stop the trace while dumping if we are not opening "snapshot" */
3473 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003474 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003475
Steven Rostedtae3b5092013-01-23 15:22:59 -05003476 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003477 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003478 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003479 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003480 }
3481 ring_buffer_read_prepare_sync();
3482 for_each_tracing_cpu(cpu) {
3483 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003484 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003485 }
3486 } else {
3487 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003488 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003489 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003490 ring_buffer_read_prepare_sync();
3491 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003492 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003493 }
3494
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003495 mutex_unlock(&trace_types_lock);
3496
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003497 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003498
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003499 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003500 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003501 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003502 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003503release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003504 seq_release_private(inode, file);
3505 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003506}
3507
3508int tracing_open_generic(struct inode *inode, struct file *filp)
3509{
Steven Rostedt60a11772008-05-12 21:20:44 +02003510 if (tracing_disabled)
3511 return -ENODEV;
3512
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003513 filp->private_data = inode->i_private;
3514 return 0;
3515}
3516
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003517bool tracing_is_disabled(void)
3518{
3519 return (tracing_disabled) ? true: false;
3520}
3521
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003522/*
3523 * Open and update trace_array ref count.
3524 * Must have the current trace_array passed to it.
3525 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003526static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003527{
3528 struct trace_array *tr = inode->i_private;
3529
3530 if (tracing_disabled)
3531 return -ENODEV;
3532
3533 if (trace_array_get(tr) < 0)
3534 return -ENODEV;
3535
3536 filp->private_data = inode->i_private;
3537
3538 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003539}
3540
Hannes Eder4fd27352009-02-10 19:44:12 +01003541static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003542{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003543 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003544 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003545 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003546 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003547
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003548 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003549 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003550 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003551 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003552
Oleg Nesterov6484c712013-07-23 17:26:10 +02003553 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003554 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003555 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003556
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003557 for_each_tracing_cpu(cpu) {
3558 if (iter->buffer_iter[cpu])
3559 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3560 }
3561
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003562 if (iter->trace && iter->trace->close)
3563 iter->trace->close(iter);
3564
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003565 if (!iter->snapshot)
3566 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003567 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003568
3569 __trace_array_put(tr);
3570
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003571 mutex_unlock(&trace_types_lock);
3572
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003573 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003574 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003575 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003576 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003577 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003578
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003579 return 0;
3580}
3581
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003582static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3583{
3584 struct trace_array *tr = inode->i_private;
3585
3586 trace_array_put(tr);
3587 return 0;
3588}
3589
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003590static int tracing_single_release_tr(struct inode *inode, struct file *file)
3591{
3592 struct trace_array *tr = inode->i_private;
3593
3594 trace_array_put(tr);
3595
3596 return single_release(inode, file);
3597}
3598
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003599static int tracing_open(struct inode *inode, struct file *file)
3600{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003601 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003602 struct trace_iterator *iter;
3603 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003604
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003605 if (trace_array_get(tr) < 0)
3606 return -ENODEV;
3607
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003608 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003609 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3610 int cpu = tracing_get_cpu(inode);
3611
3612 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003613 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003614 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003615 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003616 }
3617
3618 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003619 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003620 if (IS_ERR(iter))
3621 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003622 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003623 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3624 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003625
3626 if (ret < 0)
3627 trace_array_put(tr);
3628
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003629 return ret;
3630}
3631
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003632/*
3633 * Some tracers are not suitable for instance buffers.
3634 * A tracer is always available for the global array (toplevel)
3635 * or if it explicitly states that it is.
3636 */
3637static bool
3638trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3639{
3640 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3641}
3642
3643/* Find the next tracer that this trace array may use */
3644static struct tracer *
3645get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3646{
3647 while (t && !trace_ok_for_array(t, tr))
3648 t = t->next;
3649
3650 return t;
3651}
3652
Ingo Molnare309b412008-05-12 21:20:51 +02003653static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003654t_next(struct seq_file *m, void *v, loff_t *pos)
3655{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003656 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003657 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003658
3659 (*pos)++;
3660
3661 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003662 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003663
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003664 return t;
3665}
3666
3667static void *t_start(struct seq_file *m, loff_t *pos)
3668{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003669 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003670 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003671 loff_t l = 0;
3672
3673 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003674
3675 t = get_tracer_for_array(tr, trace_types);
3676 for (; t && l < *pos; t = t_next(m, t, &l))
3677 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003678
3679 return t;
3680}
3681
3682static void t_stop(struct seq_file *m, void *p)
3683{
3684 mutex_unlock(&trace_types_lock);
3685}
3686
3687static int t_show(struct seq_file *m, void *v)
3688{
3689 struct tracer *t = v;
3690
3691 if (!t)
3692 return 0;
3693
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003694 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003695 if (t->next)
3696 seq_putc(m, ' ');
3697 else
3698 seq_putc(m, '\n');
3699
3700 return 0;
3701}
3702
James Morris88e9d342009-09-22 16:43:43 -07003703static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003704 .start = t_start,
3705 .next = t_next,
3706 .stop = t_stop,
3707 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003708};
3709
3710static int show_traces_open(struct inode *inode, struct file *file)
3711{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003712 struct trace_array *tr = inode->i_private;
3713 struct seq_file *m;
3714 int ret;
3715
Steven Rostedt60a11772008-05-12 21:20:44 +02003716 if (tracing_disabled)
3717 return -ENODEV;
3718
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003719 ret = seq_open(file, &show_traces_seq_ops);
3720 if (ret)
3721 return ret;
3722
3723 m = file->private_data;
3724 m->private = tr;
3725
3726 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003727}
3728
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003729static ssize_t
3730tracing_write_stub(struct file *filp, const char __user *ubuf,
3731 size_t count, loff_t *ppos)
3732{
3733 return count;
3734}
3735
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003736loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003737{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003738 int ret;
3739
Slava Pestov364829b2010-11-24 15:13:16 -08003740 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003741 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003742 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003743 file->f_pos = ret = 0;
3744
3745 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003746}
3747
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003748static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003749 .open = tracing_open,
3750 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003751 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003752 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003753 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003754};
3755
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003756static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003757 .open = show_traces_open,
3758 .read = seq_read,
3759 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003760 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003761};
3762
Ingo Molnar36dfe922008-05-12 21:20:52 +02003763/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003764 * The tracer itself will not take this lock, but still we want
3765 * to provide a consistent cpumask to user-space:
3766 */
3767static DEFINE_MUTEX(tracing_cpumask_update_lock);
3768
3769/*
3770 * Temporary storage for the character representation of the
3771 * CPU bitmask (and one more byte for the newline):
3772 */
3773static char mask_str[NR_CPUS + 1];
3774
Ingo Molnarc7078de2008-05-12 21:20:52 +02003775static ssize_t
3776tracing_cpumask_read(struct file *filp, char __user *ubuf,
3777 size_t count, loff_t *ppos)
3778{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003779 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003780 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003781
3782 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003783
Tejun Heo1a402432015-02-13 14:37:39 -08003784 len = snprintf(mask_str, count, "%*pb\n",
3785 cpumask_pr_args(tr->tracing_cpumask));
3786 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003787 count = -EINVAL;
3788 goto out_err;
3789 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003790 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3791
3792out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003793 mutex_unlock(&tracing_cpumask_update_lock);
3794
3795 return count;
3796}
3797
3798static ssize_t
3799tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3800 size_t count, loff_t *ppos)
3801{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003802 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303803 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003804 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303805
3806 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3807 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003808
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303809 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003810 if (err)
3811 goto err_unlock;
3812
Li Zefan215368e2009-06-15 10:56:42 +08003813 mutex_lock(&tracing_cpumask_update_lock);
3814
Steven Rostedta5e25882008-12-02 15:34:05 -05003815 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003816 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003817 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003818 /*
3819 * Increase/decrease the disabled counter if we are
3820 * about to flip a bit in the cpumask:
3821 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003822 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303823 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003824 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3825 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003826 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003827 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303828 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003829 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3830 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003831 }
3832 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003833 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003834 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003835
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003836 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003837
Ingo Molnarc7078de2008-05-12 21:20:52 +02003838 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303839 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003840
Ingo Molnarc7078de2008-05-12 21:20:52 +02003841 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003842
3843err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003844 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003845
3846 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003847}
3848
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003849static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003850 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003851 .read = tracing_cpumask_read,
3852 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003853 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003854 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003855};
3856
Li Zefanfdb372e2009-12-08 11:15:59 +08003857static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003858{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003859 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003860 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003861 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003862 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003863
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003864 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003865 tracer_flags = tr->current_trace->flags->val;
3866 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003867
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003868 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003869 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003870 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003871 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003872 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003873 }
3874
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003875 for (i = 0; trace_opts[i].name; i++) {
3876 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003877 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003878 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003879 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003880 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003881 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003882
Li Zefanfdb372e2009-12-08 11:15:59 +08003883 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003884}
3885
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003886static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003887 struct tracer_flags *tracer_flags,
3888 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003889{
Chunyu Hud39cdd22016-03-08 21:37:01 +08003890 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003891 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003892
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003893 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003894 if (ret)
3895 return ret;
3896
3897 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003898 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003899 else
Zhaolei77708412009-08-07 18:53:21 +08003900 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003901 return 0;
3902}
3903
Li Zefan8d18eaa2009-12-08 11:17:06 +08003904/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003905static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003906{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003907 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003908 struct tracer_flags *tracer_flags = trace->flags;
3909 struct tracer_opt *opts = NULL;
3910 int i;
3911
3912 for (i = 0; tracer_flags->opts[i].name; i++) {
3913 opts = &tracer_flags->opts[i];
3914
3915 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003916 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003917 }
3918
3919 return -EINVAL;
3920}
3921
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003922/* Some tracers require overwrite to stay enabled */
3923int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3924{
3925 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3926 return -1;
3927
3928 return 0;
3929}
3930
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003931int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003932{
3933 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003934 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003935 return 0;
3936
3937 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003938 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003939 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003940 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003941
3942 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003943 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003944 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003945 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003946
3947 if (mask == TRACE_ITER_RECORD_CMD)
3948 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003949
Steven Rostedtc37775d2016-04-13 16:59:18 -04003950 if (mask == TRACE_ITER_EVENT_FORK)
3951 trace_event_follow_fork(tr, enabled);
3952
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003953 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003954 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003955#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003956 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003957#endif
3958 }
Steven Rostedt81698832012-10-11 10:15:05 -04003959
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003960 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04003961 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003962 trace_printk_control(enabled);
3963 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003964
3965 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003966}
3967
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003968static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003969{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003970 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003971 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003972 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003973 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003974 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003975
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003976 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003977
Li Zefan8d18eaa2009-12-08 11:17:06 +08003978 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003979 neg = 1;
3980 cmp += 2;
3981 }
3982
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003983 mutex_lock(&trace_types_lock);
3984
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003985 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003986 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003987 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003988 break;
3989 }
3990 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003991
3992 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003993 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003994 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003995
3996 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003997
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003998 /*
3999 * If the first trailing whitespace is replaced with '\0' by strstrip,
4000 * turn it back into a space.
4001 */
4002 if (orig_len > strlen(option))
4003 option[strlen(option)] = ' ';
4004
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004005 return ret;
4006}
4007
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004008static void __init apply_trace_boot_options(void)
4009{
4010 char *buf = trace_boot_options_buf;
4011 char *option;
4012
4013 while (true) {
4014 option = strsep(&buf, ",");
4015
4016 if (!option)
4017 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004018
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004019 if (*option)
4020 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004021
4022 /* Put back the comma to allow this to be called again */
4023 if (buf)
4024 *(buf - 1) = ',';
4025 }
4026}
4027
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004028static ssize_t
4029tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4030 size_t cnt, loff_t *ppos)
4031{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004032 struct seq_file *m = filp->private_data;
4033 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004034 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004035 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004036
4037 if (cnt >= sizeof(buf))
4038 return -EINVAL;
4039
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004040 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004041 return -EFAULT;
4042
Steven Rostedta8dd2172013-01-09 20:54:17 -05004043 buf[cnt] = 0;
4044
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004045 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004046 if (ret < 0)
4047 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004048
Jiri Olsacf8517c2009-10-23 19:36:16 -04004049 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004050
4051 return cnt;
4052}
4053
Li Zefanfdb372e2009-12-08 11:15:59 +08004054static int tracing_trace_options_open(struct inode *inode, struct file *file)
4055{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004056 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004057 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004058
Li Zefanfdb372e2009-12-08 11:15:59 +08004059 if (tracing_disabled)
4060 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004061
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004062 if (trace_array_get(tr) < 0)
4063 return -ENODEV;
4064
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004065 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4066 if (ret < 0)
4067 trace_array_put(tr);
4068
4069 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004070}
4071
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004072static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004073 .open = tracing_trace_options_open,
4074 .read = seq_read,
4075 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004076 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004077 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004078};
4079
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004080static const char readme_msg[] =
4081 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004082 "# echo 0 > tracing_on : quick way to disable tracing\n"
4083 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4084 " Important files:\n"
4085 " trace\t\t\t- The static contents of the buffer\n"
4086 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4087 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4088 " current_tracer\t- function and latency tracers\n"
4089 " available_tracers\t- list of configured tracers for current_tracer\n"
4090 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4091 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4092 " trace_clock\t\t-change the clock used to order events\n"
4093 " local: Per cpu clock but may not be synced across CPUs\n"
4094 " global: Synced across CPUs but slows tracing down.\n"
4095 " counter: Not a clock, but just an increment\n"
4096 " uptime: Jiffy counter from time of boot\n"
4097 " perf: Same clock that perf events use\n"
4098#ifdef CONFIG_X86_64
4099 " x86-tsc: TSC cycle counter\n"
4100#endif
4101 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4102 " tracing_cpumask\t- Limit which CPUs to trace\n"
4103 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4104 "\t\t\t Remove sub-buffer with rmdir\n"
4105 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004106 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4107 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004108 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004109#ifdef CONFIG_DYNAMIC_FTRACE
4110 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004111 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4112 "\t\t\t functions\n"
4113 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4114 "\t modules: Can select a group via module\n"
4115 "\t Format: :mod:<module-name>\n"
4116 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4117 "\t triggers: a command to perform when function is hit\n"
4118 "\t Format: <function>:<trigger>[:count]\n"
4119 "\t trigger: traceon, traceoff\n"
4120 "\t\t enable_event:<system>:<event>\n"
4121 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004122#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004123 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004124#endif
4125#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004126 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004127#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004128 "\t\t dump\n"
4129 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004130 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4131 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4132 "\t The first one will disable tracing every time do_fault is hit\n"
4133 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4134 "\t The first time do trap is hit and it disables tracing, the\n"
4135 "\t counter will decrement to 2. If tracing is already disabled,\n"
4136 "\t the counter will not decrement. It only decrements when the\n"
4137 "\t trigger did work\n"
4138 "\t To remove trigger without count:\n"
4139 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4140 "\t To remove trigger with a count:\n"
4141 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004142 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004143 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4144 "\t modules: Can select a group via module command :mod:\n"
4145 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004146#endif /* CONFIG_DYNAMIC_FTRACE */
4147#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004148 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4149 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004150#endif
4151#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4152 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004153 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004154 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4155#endif
4156#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004157 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4158 "\t\t\t snapshot buffer. Read the contents for more\n"
4159 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004160#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004161#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004162 " stack_trace\t\t- Shows the max stack trace when active\n"
4163 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004164 "\t\t\t Write into this file to reset the max size (trigger a\n"
4165 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004166#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004167 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4168 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004169#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004170#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06004171 " events/\t\t- Directory containing all trace event subsystems:\n"
4172 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4173 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004174 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4175 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004176 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004177 " events/<system>/<event>/\t- Directory containing control files for\n"
4178 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004179 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4180 " filter\t\t- If set, only events passing filter are traced\n"
4181 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004182 "\t Format: <trigger>[:count][if <filter>]\n"
4183 "\t trigger: traceon, traceoff\n"
4184 "\t enable_event:<system>:<event>\n"
4185 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004186#ifdef CONFIG_HIST_TRIGGERS
4187 "\t enable_hist:<system>:<event>\n"
4188 "\t disable_hist:<system>:<event>\n"
4189#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004190#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004191 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004192#endif
4193#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004194 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004195#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004196#ifdef CONFIG_HIST_TRIGGERS
4197 "\t\t hist (see below)\n"
4198#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004199 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4200 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4201 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4202 "\t events/block/block_unplug/trigger\n"
4203 "\t The first disables tracing every time block_unplug is hit.\n"
4204 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4205 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4206 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4207 "\t Like function triggers, the counter is only decremented if it\n"
4208 "\t enabled or disabled tracing.\n"
4209 "\t To remove a trigger without a count:\n"
4210 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4211 "\t To remove a trigger with a count:\n"
4212 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4213 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004214#ifdef CONFIG_HIST_TRIGGERS
4215 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004216 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004217 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004218 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004219 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004220 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004221 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004222 "\t [if <filter>]\n\n"
4223 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004224 "\t table using the key(s) and value(s) named, and the value of a\n"
4225 "\t sum called 'hitcount' is incremented. Keys and values\n"
4226 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004227 "\t can be any field, or the special string 'stacktrace'.\n"
4228 "\t Compound keys consisting of up to two fields can be specified\n"
4229 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4230 "\t fields. Sort keys consisting of up to two fields can be\n"
4231 "\t specified using the 'sort' keyword. The sort direction can\n"
4232 "\t be modified by appending '.descending' or '.ascending' to a\n"
4233 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004234 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4235 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4236 "\t its histogram data will be shared with other triggers of the\n"
4237 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004238 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004239 "\t table in its entirety to stdout. If there are multiple hist\n"
4240 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004241 "\t trigger in the output. The table displayed for a named\n"
4242 "\t trigger will be the same as any other instance having the\n"
4243 "\t same name. The default format used to display a given field\n"
4244 "\t can be modified by appending any of the following modifiers\n"
4245 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004246 "\t .hex display a number as a hex value\n"
4247 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004248 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004249 "\t .execname display a common_pid as a program name\n"
4250 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004251 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004252 "\t The 'pause' parameter can be used to pause an existing hist\n"
4253 "\t trigger or to start a hist trigger but not log any events\n"
4254 "\t until told to do so. 'continue' can be used to start or\n"
4255 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004256 "\t The 'clear' parameter will clear the contents of a running\n"
4257 "\t hist trigger and leave its current paused/active state\n"
4258 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004259 "\t The enable_hist and disable_hist triggers can be used to\n"
4260 "\t have one event conditionally start and stop another event's\n"
4261 "\t already-attached hist trigger. The syntax is analagous to\n"
4262 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004263#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004264;
4265
4266static ssize_t
4267tracing_readme_read(struct file *filp, char __user *ubuf,
4268 size_t cnt, loff_t *ppos)
4269{
4270 return simple_read_from_buffer(ubuf, cnt, ppos,
4271 readme_msg, strlen(readme_msg));
4272}
4273
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004274static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004275 .open = tracing_open_generic,
4276 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004277 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004278};
4279
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004280static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004281{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004282 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004283
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004284 if (*pos || m->count)
4285 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004286
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004287 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004288
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004289 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4290 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004291 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004292 continue;
4293
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004294 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004295 }
4296
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004297 return NULL;
4298}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004299
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004300static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4301{
4302 void *v;
4303 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004304
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004305 preempt_disable();
4306 arch_spin_lock(&trace_cmdline_lock);
4307
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004308 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004309 while (l <= *pos) {
4310 v = saved_cmdlines_next(m, v, &l);
4311 if (!v)
4312 return NULL;
4313 }
4314
4315 return v;
4316}
4317
4318static void saved_cmdlines_stop(struct seq_file *m, void *v)
4319{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004320 arch_spin_unlock(&trace_cmdline_lock);
4321 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004322}
4323
4324static int saved_cmdlines_show(struct seq_file *m, void *v)
4325{
4326 char buf[TASK_COMM_LEN];
4327 unsigned int *pid = v;
4328
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004329 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004330 seq_printf(m, "%d %s\n", *pid, buf);
4331 return 0;
4332}
4333
4334static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4335 .start = saved_cmdlines_start,
4336 .next = saved_cmdlines_next,
4337 .stop = saved_cmdlines_stop,
4338 .show = saved_cmdlines_show,
4339};
4340
4341static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4342{
4343 if (tracing_disabled)
4344 return -ENODEV;
4345
4346 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004347}
4348
4349static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004350 .open = tracing_saved_cmdlines_open,
4351 .read = seq_read,
4352 .llseek = seq_lseek,
4353 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004354};
4355
4356static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004357tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4358 size_t cnt, loff_t *ppos)
4359{
4360 char buf[64];
4361 int r;
4362
4363 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004364 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004365 arch_spin_unlock(&trace_cmdline_lock);
4366
4367 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4368}
4369
4370static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4371{
4372 kfree(s->saved_cmdlines);
4373 kfree(s->map_cmdline_to_pid);
4374 kfree(s);
4375}
4376
4377static int tracing_resize_saved_cmdlines(unsigned int val)
4378{
4379 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4380
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004381 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004382 if (!s)
4383 return -ENOMEM;
4384
4385 if (allocate_cmdlines_buffer(val, s) < 0) {
4386 kfree(s);
4387 return -ENOMEM;
4388 }
4389
4390 arch_spin_lock(&trace_cmdline_lock);
4391 savedcmd_temp = savedcmd;
4392 savedcmd = s;
4393 arch_spin_unlock(&trace_cmdline_lock);
4394 free_saved_cmdlines_buffer(savedcmd_temp);
4395
4396 return 0;
4397}
4398
4399static ssize_t
4400tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4401 size_t cnt, loff_t *ppos)
4402{
4403 unsigned long val;
4404 int ret;
4405
4406 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4407 if (ret)
4408 return ret;
4409
4410 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4411 if (!val || val > PID_MAX_DEFAULT)
4412 return -EINVAL;
4413
4414 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4415 if (ret < 0)
4416 return ret;
4417
4418 *ppos += cnt;
4419
4420 return cnt;
4421}
4422
4423static const struct file_operations tracing_saved_cmdlines_size_fops = {
4424 .open = tracing_open_generic,
4425 .read = tracing_saved_cmdlines_size_read,
4426 .write = tracing_saved_cmdlines_size_write,
4427};
4428
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004429#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4430static union trace_enum_map_item *
4431update_enum_map(union trace_enum_map_item *ptr)
4432{
4433 if (!ptr->map.enum_string) {
4434 if (ptr->tail.next) {
4435 ptr = ptr->tail.next;
4436 /* Set ptr to the next real item (skip head) */
4437 ptr++;
4438 } else
4439 return NULL;
4440 }
4441 return ptr;
4442}
4443
4444static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4445{
4446 union trace_enum_map_item *ptr = v;
4447
4448 /*
4449 * Paranoid! If ptr points to end, we don't want to increment past it.
4450 * This really should never happen.
4451 */
4452 ptr = update_enum_map(ptr);
4453 if (WARN_ON_ONCE(!ptr))
4454 return NULL;
4455
4456 ptr++;
4457
4458 (*pos)++;
4459
4460 ptr = update_enum_map(ptr);
4461
4462 return ptr;
4463}
4464
4465static void *enum_map_start(struct seq_file *m, loff_t *pos)
4466{
4467 union trace_enum_map_item *v;
4468 loff_t l = 0;
4469
4470 mutex_lock(&trace_enum_mutex);
4471
4472 v = trace_enum_maps;
4473 if (v)
4474 v++;
4475
4476 while (v && l < *pos) {
4477 v = enum_map_next(m, v, &l);
4478 }
4479
4480 return v;
4481}
4482
4483static void enum_map_stop(struct seq_file *m, void *v)
4484{
4485 mutex_unlock(&trace_enum_mutex);
4486}
4487
4488static int enum_map_show(struct seq_file *m, void *v)
4489{
4490 union trace_enum_map_item *ptr = v;
4491
4492 seq_printf(m, "%s %ld (%s)\n",
4493 ptr->map.enum_string, ptr->map.enum_value,
4494 ptr->map.system);
4495
4496 return 0;
4497}
4498
4499static const struct seq_operations tracing_enum_map_seq_ops = {
4500 .start = enum_map_start,
4501 .next = enum_map_next,
4502 .stop = enum_map_stop,
4503 .show = enum_map_show,
4504};
4505
4506static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4507{
4508 if (tracing_disabled)
4509 return -ENODEV;
4510
4511 return seq_open(filp, &tracing_enum_map_seq_ops);
4512}
4513
4514static const struct file_operations tracing_enum_map_fops = {
4515 .open = tracing_enum_map_open,
4516 .read = seq_read,
4517 .llseek = seq_lseek,
4518 .release = seq_release,
4519};
4520
4521static inline union trace_enum_map_item *
4522trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4523{
4524 /* Return tail of array given the head */
4525 return ptr + ptr->head.length + 1;
4526}
4527
4528static void
4529trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4530 int len)
4531{
4532 struct trace_enum_map **stop;
4533 struct trace_enum_map **map;
4534 union trace_enum_map_item *map_array;
4535 union trace_enum_map_item *ptr;
4536
4537 stop = start + len;
4538
4539 /*
4540 * The trace_enum_maps contains the map plus a head and tail item,
4541 * where the head holds the module and length of array, and the
4542 * tail holds a pointer to the next list.
4543 */
4544 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4545 if (!map_array) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07004546 pr_warn("Unable to allocate trace enum mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004547 return;
4548 }
4549
4550 mutex_lock(&trace_enum_mutex);
4551
4552 if (!trace_enum_maps)
4553 trace_enum_maps = map_array;
4554 else {
4555 ptr = trace_enum_maps;
4556 for (;;) {
4557 ptr = trace_enum_jmp_to_tail(ptr);
4558 if (!ptr->tail.next)
4559 break;
4560 ptr = ptr->tail.next;
4561
4562 }
4563 ptr->tail.next = map_array;
4564 }
4565 map_array->head.mod = mod;
4566 map_array->head.length = len;
4567 map_array++;
4568
4569 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4570 map_array->map = **map;
4571 map_array++;
4572 }
4573 memset(map_array, 0, sizeof(*map_array));
4574
4575 mutex_unlock(&trace_enum_mutex);
4576}
4577
4578static void trace_create_enum_file(struct dentry *d_tracer)
4579{
4580 trace_create_file("enum_map", 0444, d_tracer,
4581 NULL, &tracing_enum_map_fops);
4582}
4583
4584#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4585static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4586static inline void trace_insert_enum_map_file(struct module *mod,
4587 struct trace_enum_map **start, int len) { }
4588#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4589
4590static void trace_insert_enum_map(struct module *mod,
4591 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004592{
4593 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004594
4595 if (len <= 0)
4596 return;
4597
4598 map = start;
4599
4600 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004601
4602 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004603}
4604
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004605static ssize_t
Jamie Gennis13b625d2012-11-21 15:04:25 -08004606tracing_saved_tgids_read(struct file *file, char __user *ubuf,
4607 size_t cnt, loff_t *ppos)
4608{
4609 char *file_buf;
4610 char *buf;
4611 int len = 0;
4612 int pid;
4613 int i;
4614
Dmitry Shmidtcb575f72015-10-28 10:45:04 -07004615 file_buf = kmalloc(SAVED_CMDLINES_DEFAULT*(16+1+16), GFP_KERNEL);
Jamie Gennis13b625d2012-11-21 15:04:25 -08004616 if (!file_buf)
4617 return -ENOMEM;
4618
4619 buf = file_buf;
4620
Dmitry Shmidtcb575f72015-10-28 10:45:04 -07004621 for (i = 0; i < SAVED_CMDLINES_DEFAULT; i++) {
Jamie Gennis13b625d2012-11-21 15:04:25 -08004622 int tgid;
4623 int r;
4624
Dmitry Shmidtcb575f72015-10-28 10:45:04 -07004625 pid = savedcmd->map_cmdline_to_pid[i];
Jamie Gennis13b625d2012-11-21 15:04:25 -08004626 if (pid == -1 || pid == NO_CMDLINE_MAP)
4627 continue;
4628
4629 tgid = trace_find_tgid(pid);
4630 r = sprintf(buf, "%d %d\n", pid, tgid);
4631 buf += r;
4632 len += r;
4633 }
4634
4635 len = simple_read_from_buffer(ubuf, cnt, ppos,
4636 file_buf, len);
4637
4638 kfree(file_buf);
4639
4640 return len;
4641}
4642
4643static const struct file_operations tracing_saved_tgids_fops = {
4644 .open = tracing_open_generic,
4645 .read = tracing_saved_tgids_read,
4646 .llseek = generic_file_llseek,
4647};
4648
4649static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004650tracing_set_trace_read(struct file *filp, char __user *ubuf,
4651 size_t cnt, loff_t *ppos)
4652{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004653 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004654 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004655 int r;
4656
4657 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004658 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004659 mutex_unlock(&trace_types_lock);
4660
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004661 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004662}
4663
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004664int tracer_init(struct tracer *t, struct trace_array *tr)
4665{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004666 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004667 return t->init(tr);
4668}
4669
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004670static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004671{
4672 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004673
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004674 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004675 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004676}
4677
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004678#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004679/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004680static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4681 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004682{
4683 int cpu, ret = 0;
4684
4685 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4686 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004687 ret = ring_buffer_resize(trace_buf->buffer,
4688 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004689 if (ret < 0)
4690 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004691 per_cpu_ptr(trace_buf->data, cpu)->entries =
4692 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004693 }
4694 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004695 ret = ring_buffer_resize(trace_buf->buffer,
4696 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004697 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004698 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4699 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004700 }
4701
4702 return ret;
4703}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004704#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004705
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004706static int __tracing_resize_ring_buffer(struct trace_array *tr,
4707 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004708{
4709 int ret;
4710
4711 /*
4712 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004713 * we use the size that was given, and we can forget about
4714 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004715 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004716 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004717
Steven Rostedtb382ede62012-10-10 21:44:34 -04004718 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004719 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004720 return 0;
4721
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004722 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004723 if (ret < 0)
4724 return ret;
4725
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004726#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004727 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4728 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004729 goto out;
4730
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004731 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004732 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004733 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4734 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004735 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004736 /*
4737 * AARGH! We are left with different
4738 * size max buffer!!!!
4739 * The max buffer is our "snapshot" buffer.
4740 * When a tracer needs a snapshot (one of the
4741 * latency tracers), it swaps the max buffer
4742 * with the saved snap shot. We succeeded to
4743 * update the size of the main buffer, but failed to
4744 * update the size of the max buffer. But when we tried
4745 * to reset the main buffer to the original size, we
4746 * failed there too. This is very unlikely to
4747 * happen, but if it does, warn and kill all
4748 * tracing.
4749 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004750 WARN_ON(1);
4751 tracing_disabled = 1;
4752 }
4753 return ret;
4754 }
4755
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004756 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004757 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004758 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004759 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004760
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004761 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004762#endif /* CONFIG_TRACER_MAX_TRACE */
4763
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004764 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004765 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004766 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004767 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004768
4769 return ret;
4770}
4771
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004772static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4773 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004774{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004775 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004776
4777 mutex_lock(&trace_types_lock);
4778
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004779 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4780 /* make sure, this cpu is enabled in the mask */
4781 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4782 ret = -EINVAL;
4783 goto out;
4784 }
4785 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004786
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004787 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004788 if (ret < 0)
4789 ret = -ENOMEM;
4790
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004791out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004792 mutex_unlock(&trace_types_lock);
4793
4794 return ret;
4795}
4796
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004797
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004798/**
4799 * tracing_update_buffers - used by tracing facility to expand ring buffers
4800 *
4801 * To save on memory when the tracing is never used on a system with it
4802 * configured in. The ring buffers are set to a minimum size. But once
4803 * a user starts to use the tracing facility, then they need to grow
4804 * to their default size.
4805 *
4806 * This function is to be called when a tracer is about to be used.
4807 */
4808int tracing_update_buffers(void)
4809{
4810 int ret = 0;
4811
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004812 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004813 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004814 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004815 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004816 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004817
4818 return ret;
4819}
4820
Steven Rostedt577b7852009-02-26 23:43:05 -05004821struct trace_option_dentry;
4822
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004823static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004824create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004825
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004826/*
4827 * Used to clear out the tracer before deletion of an instance.
4828 * Must have trace_types_lock held.
4829 */
4830static void tracing_set_nop(struct trace_array *tr)
4831{
4832 if (tr->current_trace == &nop_trace)
4833 return;
4834
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004835 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004836
4837 if (tr->current_trace->reset)
4838 tr->current_trace->reset(tr);
4839
4840 tr->current_trace = &nop_trace;
4841}
4842
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004843static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004844{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004845 /* Only enable if the directory has been created already. */
4846 if (!tr->dir)
4847 return;
4848
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004849 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004850}
4851
4852static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4853{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004854 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004855#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004856 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004857#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004858 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004859
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004860 mutex_lock(&trace_types_lock);
4861
Steven Rostedt73c51622009-03-11 13:42:01 -04004862 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004863 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004864 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004865 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004866 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004867 ret = 0;
4868 }
4869
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004870 for (t = trace_types; t; t = t->next) {
4871 if (strcmp(t->name, buf) == 0)
4872 break;
4873 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004874 if (!t) {
4875 ret = -EINVAL;
4876 goto out;
4877 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004878 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004879 goto out;
4880
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004881 /* Some tracers are only allowed for the top level buffer */
4882 if (!trace_ok_for_array(t, tr)) {
4883 ret = -EINVAL;
4884 goto out;
4885 }
4886
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004887 /* If trace pipe files are being read, we can't change the tracer */
4888 if (tr->current_trace->ref) {
4889 ret = -EBUSY;
4890 goto out;
4891 }
4892
Steven Rostedt9f029e82008-11-12 15:24:24 -05004893 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004894
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004895 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004896
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004897 if (tr->current_trace->reset)
4898 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004899
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004900 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004901 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004902
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004903#ifdef CONFIG_TRACER_MAX_TRACE
4904 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004905
4906 if (had_max_tr && !t->use_max_tr) {
4907 /*
4908 * We need to make sure that the update_max_tr sees that
4909 * current_trace changed to nop_trace to keep it from
4910 * swapping the buffers after we resize it.
4911 * The update_max_tr is called from interrupts disabled
4912 * so a synchronized_sched() is sufficient.
4913 */
4914 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004915 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004916 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004917#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004918
4919#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004920 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004921 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004922 if (ret < 0)
4923 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004924 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004925#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004926
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004927 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004928 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004929 if (ret)
4930 goto out;
4931 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004932
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004933 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004934 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004935 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004936 out:
4937 mutex_unlock(&trace_types_lock);
4938
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004939 return ret;
4940}
4941
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004942static ssize_t
4943tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4944 size_t cnt, loff_t *ppos)
4945{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004946 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004947 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004948 int i;
4949 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004950 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004951
Steven Rostedt60063a62008-10-28 10:44:24 -04004952 ret = cnt;
4953
Li Zefanee6c2c12009-09-18 14:06:47 +08004954 if (cnt > MAX_TRACER_SIZE)
4955 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004956
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004957 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004958 return -EFAULT;
4959
4960 buf[cnt] = 0;
4961
4962 /* strip ending whitespace. */
4963 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4964 buf[i] = 0;
4965
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004966 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004967 if (err)
4968 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004969
Jiri Olsacf8517c2009-10-23 19:36:16 -04004970 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004971
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004972 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004973}
4974
4975static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004976tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4977 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004978{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004979 char buf[64];
4980 int r;
4981
Steven Rostedtcffae432008-05-12 21:21:00 +02004982 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004983 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004984 if (r > sizeof(buf))
4985 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004986 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004987}
4988
4989static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004990tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4991 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004992{
Hannes Eder5e398412009-02-10 19:44:34 +01004993 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004994 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004995
Peter Huewe22fe9b52011-06-07 21:58:27 +02004996 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4997 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004998 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004999
5000 *ptr = val * 1000;
5001
5002 return cnt;
5003}
5004
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005005static ssize_t
5006tracing_thresh_read(struct file *filp, char __user *ubuf,
5007 size_t cnt, loff_t *ppos)
5008{
5009 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5010}
5011
5012static ssize_t
5013tracing_thresh_write(struct file *filp, const char __user *ubuf,
5014 size_t cnt, loff_t *ppos)
5015{
5016 struct trace_array *tr = filp->private_data;
5017 int ret;
5018
5019 mutex_lock(&trace_types_lock);
5020 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5021 if (ret < 0)
5022 goto out;
5023
5024 if (tr->current_trace->update_thresh) {
5025 ret = tr->current_trace->update_thresh(tr);
5026 if (ret < 0)
5027 goto out;
5028 }
5029
5030 ret = cnt;
5031out:
5032 mutex_unlock(&trace_types_lock);
5033
5034 return ret;
5035}
5036
Chen Gange428abb2015-11-10 05:15:15 +08005037#ifdef CONFIG_TRACER_MAX_TRACE
5038
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005039static ssize_t
5040tracing_max_lat_read(struct file *filp, char __user *ubuf,
5041 size_t cnt, loff_t *ppos)
5042{
5043 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5044}
5045
5046static ssize_t
5047tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5048 size_t cnt, loff_t *ppos)
5049{
5050 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5051}
5052
Chen Gange428abb2015-11-10 05:15:15 +08005053#endif
5054
Steven Rostedtb3806b42008-05-12 21:20:46 +02005055static int tracing_open_pipe(struct inode *inode, struct file *filp)
5056{
Oleg Nesterov15544202013-07-23 17:25:57 +02005057 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005058 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005059 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005060
5061 if (tracing_disabled)
5062 return -ENODEV;
5063
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005064 if (trace_array_get(tr) < 0)
5065 return -ENODEV;
5066
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005067 mutex_lock(&trace_types_lock);
5068
Steven Rostedtb3806b42008-05-12 21:20:46 +02005069 /* create a buffer to store the information to pass to userspace */
5070 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005071 if (!iter) {
5072 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005073 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005074 goto out;
5075 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005076
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005077 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005078 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005079
5080 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5081 ret = -ENOMEM;
5082 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305083 }
5084
Steven Rostedta3097202008-11-07 22:36:02 -05005085 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305086 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005087
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005088 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005089 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5090
David Sharp8be07092012-11-13 12:18:22 -08005091 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005092 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005093 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5094
Oleg Nesterov15544202013-07-23 17:25:57 +02005095 iter->tr = tr;
5096 iter->trace_buffer = &tr->trace_buffer;
5097 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005098 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005099 filp->private_data = iter;
5100
Steven Rostedt107bad82008-05-12 21:21:01 +02005101 if (iter->trace->pipe_open)
5102 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005103
Arnd Bergmannb4447862010-07-07 23:40:11 +02005104 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005105
5106 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005107out:
5108 mutex_unlock(&trace_types_lock);
5109 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005110
5111fail:
5112 kfree(iter->trace);
5113 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005114 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005115 mutex_unlock(&trace_types_lock);
5116 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005117}
5118
5119static int tracing_release_pipe(struct inode *inode, struct file *file)
5120{
5121 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005122 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005123
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005124 mutex_lock(&trace_types_lock);
5125
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005126 tr->current_trace->ref--;
5127
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005128 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005129 iter->trace->pipe_close(iter);
5130
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005131 mutex_unlock(&trace_types_lock);
5132
Rusty Russell44623442009-01-01 10:12:23 +10305133 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005134 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005135 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005136
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005137 trace_array_put(tr);
5138
Steven Rostedtb3806b42008-05-12 21:20:46 +02005139 return 0;
5140}
5141
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005142static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005143trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005144{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005145 struct trace_array *tr = iter->tr;
5146
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005147 /* Iterators are static, they should be filled or empty */
5148 if (trace_buffer_iter(iter, iter->cpu_file))
5149 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005150
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005151 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005152 /*
5153 * Always select as readable when in blocking mode
5154 */
5155 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005156 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005157 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005158 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005159}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005160
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005161static unsigned int
5162tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5163{
5164 struct trace_iterator *iter = filp->private_data;
5165
5166 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005167}
5168
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005169/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005170static int tracing_wait_pipe(struct file *filp)
5171{
5172 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005173 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005174
5175 while (trace_empty(iter)) {
5176
5177 if ((filp->f_flags & O_NONBLOCK)) {
5178 return -EAGAIN;
5179 }
5180
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005181 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005182 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005183 * We still block if tracing is disabled, but we have never
5184 * read anything. This allows a user to cat this file, and
5185 * then enable tracing. But after we have read something,
5186 * we give an EOF when tracing is again disabled.
5187 *
5188 * iter->pos will be 0 if we haven't read anything.
5189 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005190 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005191 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005192
5193 mutex_unlock(&iter->mutex);
5194
Rabin Vincente30f53a2014-11-10 19:46:34 +01005195 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005196
5197 mutex_lock(&iter->mutex);
5198
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005199 if (ret)
5200 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005201 }
5202
5203 return 1;
5204}
5205
Steven Rostedtb3806b42008-05-12 21:20:46 +02005206/*
5207 * Consumer reader.
5208 */
5209static ssize_t
5210tracing_read_pipe(struct file *filp, char __user *ubuf,
5211 size_t cnt, loff_t *ppos)
5212{
5213 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005214 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005215
5216 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005217 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5218 if (sret != -EBUSY)
5219 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005220
Steven Rostedtf9520752009-03-02 14:04:40 -05005221 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005222
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005223 /*
5224 * Avoid more than one consumer on a single file descriptor
5225 * This is just a matter of traces coherency, the ring buffer itself
5226 * is protected.
5227 */
5228 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005229 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005230 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5231 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005232 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005233 }
5234
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005235waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005236 sret = tracing_wait_pipe(filp);
5237 if (sret <= 0)
5238 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005239
5240 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005241 if (trace_empty(iter)) {
5242 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005243 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005244 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005245
5246 if (cnt >= PAGE_SIZE)
5247 cnt = PAGE_SIZE - 1;
5248
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005249 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005250 memset(&iter->seq, 0,
5251 sizeof(struct trace_iterator) -
5252 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005253 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005254 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005255
Lai Jiangshan4f535962009-05-18 19:35:34 +08005256 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005257 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005258 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005259 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005260 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005261
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005262 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005263 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005264 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005265 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005266 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005267 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005268 if (ret != TRACE_TYPE_NO_CONSUME)
5269 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005270
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005271 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005272 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005273
5274 /*
5275 * Setting the full flag means we reached the trace_seq buffer
5276 * size and we should leave by partial output condition above.
5277 * One of the trace_seq_* functions is not used properly.
5278 */
5279 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5280 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005281 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005282 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005283 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005284
Steven Rostedtb3806b42008-05-12 21:20:46 +02005285 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005286 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005287 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005288 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005289
5290 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005291 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005292 * entries, go back to wait for more entries.
5293 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005294 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005295 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005296
Steven Rostedt107bad82008-05-12 21:21:01 +02005297out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005298 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005299
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005300 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005301}
5302
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005303static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5304 unsigned int idx)
5305{
5306 __free_page(spd->pages[idx]);
5307}
5308
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005309static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005310 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005311 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005312 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005313 .steal = generic_pipe_buf_steal,
5314 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005315};
5316
Steven Rostedt34cd4992009-02-09 12:06:29 -05005317static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005318tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005319{
5320 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005321 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005322 int ret;
5323
5324 /* Seq buffer is page-sized, exactly what we need. */
5325 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005326 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005327 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005328
5329 if (trace_seq_has_overflowed(&iter->seq)) {
5330 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005331 break;
5332 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005333
5334 /*
5335 * This should not be hit, because it should only
5336 * be set if the iter->seq overflowed. But check it
5337 * anyway to be safe.
5338 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005339 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005340 iter->seq.seq.len = save_len;
5341 break;
5342 }
5343
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005344 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005345 if (rem < count) {
5346 rem = 0;
5347 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005348 break;
5349 }
5350
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005351 if (ret != TRACE_TYPE_NO_CONSUME)
5352 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005353 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005354 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005355 rem = 0;
5356 iter->ent = NULL;
5357 break;
5358 }
5359 }
5360
5361 return rem;
5362}
5363
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005364static ssize_t tracing_splice_read_pipe(struct file *filp,
5365 loff_t *ppos,
5366 struct pipe_inode_info *pipe,
5367 size_t len,
5368 unsigned int flags)
5369{
Jens Axboe35f3d142010-05-20 10:43:18 +02005370 struct page *pages_def[PIPE_DEF_BUFFERS];
5371 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005372 struct trace_iterator *iter = filp->private_data;
5373 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005374 .pages = pages_def,
5375 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005376 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005377 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005378 .flags = flags,
5379 .ops = &tracing_pipe_buf_ops,
5380 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005381 };
5382 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005383 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005384 unsigned int i;
5385
Jens Axboe35f3d142010-05-20 10:43:18 +02005386 if (splice_grow_spd(pipe, &spd))
5387 return -ENOMEM;
5388
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005389 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005390
5391 if (iter->trace->splice_read) {
5392 ret = iter->trace->splice_read(iter, filp,
5393 ppos, pipe, len, flags);
5394 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005395 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005396 }
5397
5398 ret = tracing_wait_pipe(filp);
5399 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005400 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005401
Jason Wessel955b61e2010-08-05 09:22:23 -05005402 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005403 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005404 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005405 }
5406
Lai Jiangshan4f535962009-05-18 19:35:34 +08005407 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005408 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005409
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005410 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005411 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005412 spd.pages[i] = alloc_page(GFP_KERNEL);
5413 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005414 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005415
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005416 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005417
5418 /* Copy the data into the page, so we can start over. */
5419 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005420 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005421 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005422 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005423 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005424 break;
5425 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005426 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005427 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005428
Steven Rostedtf9520752009-03-02 14:04:40 -05005429 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005430 }
5431
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005432 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005433 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005434 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005435
5436 spd.nr_pages = i;
5437
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005438 if (i)
5439 ret = splice_to_pipe(pipe, &spd);
5440 else
5441 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005442out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005443 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005444 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005445
Steven Rostedt34cd4992009-02-09 12:06:29 -05005446out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005447 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005448 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005449}
5450
Steven Rostedta98a3c32008-05-12 21:20:59 +02005451static ssize_t
5452tracing_entries_read(struct file *filp, char __user *ubuf,
5453 size_t cnt, loff_t *ppos)
5454{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005455 struct inode *inode = file_inode(filp);
5456 struct trace_array *tr = inode->i_private;
5457 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005458 char buf[64];
5459 int r = 0;
5460 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005461
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005462 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005463
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005464 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005465 int cpu, buf_size_same;
5466 unsigned long size;
5467
5468 size = 0;
5469 buf_size_same = 1;
5470 /* check if all cpu sizes are same */
5471 for_each_tracing_cpu(cpu) {
5472 /* fill in the size from first enabled cpu */
5473 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005474 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5475 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005476 buf_size_same = 0;
5477 break;
5478 }
5479 }
5480
5481 if (buf_size_same) {
5482 if (!ring_buffer_expanded)
5483 r = sprintf(buf, "%lu (expanded: %lu)\n",
5484 size >> 10,
5485 trace_buf_size >> 10);
5486 else
5487 r = sprintf(buf, "%lu\n", size >> 10);
5488 } else
5489 r = sprintf(buf, "X\n");
5490 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005491 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005492
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005493 mutex_unlock(&trace_types_lock);
5494
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005495 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5496 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005497}
5498
5499static ssize_t
5500tracing_entries_write(struct file *filp, const char __user *ubuf,
5501 size_t cnt, loff_t *ppos)
5502{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005503 struct inode *inode = file_inode(filp);
5504 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005505 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005506 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005507
Peter Huewe22fe9b52011-06-07 21:58:27 +02005508 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5509 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005510 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005511
5512 /* must have at least 1 entry */
5513 if (!val)
5514 return -EINVAL;
5515
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005516 /* value is in KB */
5517 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005518 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005519 if (ret < 0)
5520 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005521
Jiri Olsacf8517c2009-10-23 19:36:16 -04005522 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005523
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005524 return cnt;
5525}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005526
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005527static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005528tracing_total_entries_read(struct file *filp, char __user *ubuf,
5529 size_t cnt, loff_t *ppos)
5530{
5531 struct trace_array *tr = filp->private_data;
5532 char buf[64];
5533 int r, cpu;
5534 unsigned long size = 0, expanded_size = 0;
5535
5536 mutex_lock(&trace_types_lock);
5537 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005538 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005539 if (!ring_buffer_expanded)
5540 expanded_size += trace_buf_size >> 10;
5541 }
5542 if (ring_buffer_expanded)
5543 r = sprintf(buf, "%lu\n", size);
5544 else
5545 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5546 mutex_unlock(&trace_types_lock);
5547
5548 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5549}
5550
5551static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005552tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5553 size_t cnt, loff_t *ppos)
5554{
5555 /*
5556 * There is no need to read what the user has written, this function
5557 * is just to make sure that there is no error when "echo" is used
5558 */
5559
5560 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005561
5562 return cnt;
5563}
5564
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005565static int
5566tracing_free_buffer_release(struct inode *inode, struct file *filp)
5567{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005568 struct trace_array *tr = inode->i_private;
5569
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005570 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005571 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005572 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005573 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005574 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005575
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005576 trace_array_put(tr);
5577
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005578 return 0;
5579}
5580
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005581static ssize_t
5582tracing_mark_write(struct file *filp, const char __user *ubuf,
5583 size_t cnt, loff_t *fpos)
5584{
Steven Rostedtd696b582011-09-22 11:50:27 -04005585 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005586 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005587 struct ring_buffer_event *event;
5588 struct ring_buffer *buffer;
5589 struct print_entry *entry;
5590 unsigned long irq_flags;
5591 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005592 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005593 int nr_pages = 1;
5594 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005595 int offset;
5596 int size;
5597 int len;
5598 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005599 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005600
Steven Rostedtc76f0692008-11-07 22:36:02 -05005601 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005602 return -EINVAL;
5603
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005604 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005605 return -EINVAL;
5606
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005607 if (cnt > TRACE_BUF_SIZE)
5608 cnt = TRACE_BUF_SIZE;
5609
Steven Rostedtd696b582011-09-22 11:50:27 -04005610 /*
5611 * Userspace is injecting traces into the kernel trace buffer.
5612 * We want to be as non intrusive as possible.
5613 * To do so, we do not want to allocate any special buffers
5614 * or take any locks, but instead write the userspace data
5615 * straight into the ring buffer.
5616 *
5617 * First we need to pin the userspace buffer into memory,
5618 * which, most likely it is, because it just referenced it.
5619 * But there's no guarantee that it is. By using get_user_pages_fast()
5620 * and kmap_atomic/kunmap_atomic() we can get access to the
5621 * pages directly. We then write the data directly into the
5622 * ring buffer.
5623 */
5624 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005625
Steven Rostedtd696b582011-09-22 11:50:27 -04005626 /* check if we cross pages */
5627 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5628 nr_pages = 2;
5629
5630 offset = addr & (PAGE_SIZE - 1);
5631 addr &= PAGE_MASK;
5632
5633 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5634 if (ret < nr_pages) {
5635 while (--ret >= 0)
5636 put_page(pages[ret]);
5637 written = -EFAULT;
5638 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005639 }
5640
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005641 for (i = 0; i < nr_pages; i++)
5642 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005643
5644 local_save_flags(irq_flags);
5645 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005646 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005647 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5648 irq_flags, preempt_count());
5649 if (!event) {
5650 /* Ring buffer disabled, return as if not open for write */
5651 written = -EBADF;
5652 goto out_unlock;
5653 }
5654
5655 entry = ring_buffer_event_data(event);
5656 entry->ip = _THIS_IP_;
5657
5658 if (nr_pages == 2) {
5659 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005660 memcpy(&entry->buf, map_page[0] + offset, len);
5661 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005662 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005663 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005664
5665 if (entry->buf[cnt - 1] != '\n') {
5666 entry->buf[cnt] = '\n';
5667 entry->buf[cnt + 1] = '\0';
5668 } else
5669 entry->buf[cnt] = '\0';
5670
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005671 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005672
5673 written = cnt;
5674
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005675 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005676
Steven Rostedtd696b582011-09-22 11:50:27 -04005677 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005678 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005679 kunmap_atomic(map_page[i]);
5680 put_page(pages[i]);
5681 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005682 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005683 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005684}
5685
Li Zefan13f16d22009-12-08 11:16:11 +08005686static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005687{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005688 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005689 int i;
5690
5691 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005692 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005693 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005694 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5695 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005696 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005697
Li Zefan13f16d22009-12-08 11:16:11 +08005698 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005699}
5700
Steven Rostedte1e232c2014-02-10 23:38:46 -05005701static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005702{
Zhaolei5079f322009-08-25 16:12:56 +08005703 int i;
5704
Zhaolei5079f322009-08-25 16:12:56 +08005705 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5706 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5707 break;
5708 }
5709 if (i == ARRAY_SIZE(trace_clocks))
5710 return -EINVAL;
5711
Zhaolei5079f322009-08-25 16:12:56 +08005712 mutex_lock(&trace_types_lock);
5713
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005714 tr->clock_id = i;
5715
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005716 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005717
David Sharp60303ed2012-10-11 16:27:52 -07005718 /*
5719 * New clock may not be consistent with the previous clock.
5720 * Reset the buffer so that it doesn't have incomparable timestamps.
5721 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005722 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005723
5724#ifdef CONFIG_TRACER_MAX_TRACE
5725 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5726 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005727 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005728#endif
David Sharp60303ed2012-10-11 16:27:52 -07005729
Zhaolei5079f322009-08-25 16:12:56 +08005730 mutex_unlock(&trace_types_lock);
5731
Steven Rostedte1e232c2014-02-10 23:38:46 -05005732 return 0;
5733}
5734
5735static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5736 size_t cnt, loff_t *fpos)
5737{
5738 struct seq_file *m = filp->private_data;
5739 struct trace_array *tr = m->private;
5740 char buf[64];
5741 const char *clockstr;
5742 int ret;
5743
5744 if (cnt >= sizeof(buf))
5745 return -EINVAL;
5746
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005747 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05005748 return -EFAULT;
5749
5750 buf[cnt] = 0;
5751
5752 clockstr = strstrip(buf);
5753
5754 ret = tracing_set_clock(tr, clockstr);
5755 if (ret)
5756 return ret;
5757
Zhaolei5079f322009-08-25 16:12:56 +08005758 *fpos += cnt;
5759
5760 return cnt;
5761}
5762
Li Zefan13f16d22009-12-08 11:16:11 +08005763static int tracing_clock_open(struct inode *inode, struct file *file)
5764{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005765 struct trace_array *tr = inode->i_private;
5766 int ret;
5767
Li Zefan13f16d22009-12-08 11:16:11 +08005768 if (tracing_disabled)
5769 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005770
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005771 if (trace_array_get(tr))
5772 return -ENODEV;
5773
5774 ret = single_open(file, tracing_clock_show, inode->i_private);
5775 if (ret < 0)
5776 trace_array_put(tr);
5777
5778 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005779}
5780
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005781struct ftrace_buffer_info {
5782 struct trace_iterator iter;
5783 void *spare;
5784 unsigned int read;
5785};
5786
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005787#ifdef CONFIG_TRACER_SNAPSHOT
5788static int tracing_snapshot_open(struct inode *inode, struct file *file)
5789{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005790 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005791 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005792 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005793 int ret = 0;
5794
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005795 if (trace_array_get(tr) < 0)
5796 return -ENODEV;
5797
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005798 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005799 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005800 if (IS_ERR(iter))
5801 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005802 } else {
5803 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005804 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005805 m = kzalloc(sizeof(*m), GFP_KERNEL);
5806 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005807 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005808 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5809 if (!iter) {
5810 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005811 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005812 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005813 ret = 0;
5814
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005815 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005816 iter->trace_buffer = &tr->max_buffer;
5817 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005818 m->private = iter;
5819 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005820 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005821out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005822 if (ret < 0)
5823 trace_array_put(tr);
5824
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005825 return ret;
5826}
5827
5828static ssize_t
5829tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5830 loff_t *ppos)
5831{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005832 struct seq_file *m = filp->private_data;
5833 struct trace_iterator *iter = m->private;
5834 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005835 unsigned long val;
5836 int ret;
5837
5838 ret = tracing_update_buffers();
5839 if (ret < 0)
5840 return ret;
5841
5842 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5843 if (ret)
5844 return ret;
5845
5846 mutex_lock(&trace_types_lock);
5847
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005848 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005849 ret = -EBUSY;
5850 goto out;
5851 }
5852
5853 switch (val) {
5854 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005855 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5856 ret = -EINVAL;
5857 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005858 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005859 if (tr->allocated_snapshot)
5860 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005861 break;
5862 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005863/* Only allow per-cpu swap if the ring buffer supports it */
5864#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5865 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5866 ret = -EINVAL;
5867 break;
5868 }
5869#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005870 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005871 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005872 if (ret < 0)
5873 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005874 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005875 local_irq_disable();
5876 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005877 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005878 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005879 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005880 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005881 local_irq_enable();
5882 break;
5883 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005884 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005885 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5886 tracing_reset_online_cpus(&tr->max_buffer);
5887 else
5888 tracing_reset(&tr->max_buffer, iter->cpu_file);
5889 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005890 break;
5891 }
5892
5893 if (ret >= 0) {
5894 *ppos += cnt;
5895 ret = cnt;
5896 }
5897out:
5898 mutex_unlock(&trace_types_lock);
5899 return ret;
5900}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005901
5902static int tracing_snapshot_release(struct inode *inode, struct file *file)
5903{
5904 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005905 int ret;
5906
5907 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005908
5909 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005910 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005911
5912 /* If write only, the seq_file is just a stub */
5913 if (m)
5914 kfree(m->private);
5915 kfree(m);
5916
5917 return 0;
5918}
5919
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005920static int tracing_buffers_open(struct inode *inode, struct file *filp);
5921static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5922 size_t count, loff_t *ppos);
5923static int tracing_buffers_release(struct inode *inode, struct file *file);
5924static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5925 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5926
5927static int snapshot_raw_open(struct inode *inode, struct file *filp)
5928{
5929 struct ftrace_buffer_info *info;
5930 int ret;
5931
5932 ret = tracing_buffers_open(inode, filp);
5933 if (ret < 0)
5934 return ret;
5935
5936 info = filp->private_data;
5937
5938 if (info->iter.trace->use_max_tr) {
5939 tracing_buffers_release(inode, filp);
5940 return -EBUSY;
5941 }
5942
5943 info->iter.snapshot = true;
5944 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5945
5946 return ret;
5947}
5948
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005949#endif /* CONFIG_TRACER_SNAPSHOT */
5950
5951
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005952static const struct file_operations tracing_thresh_fops = {
5953 .open = tracing_open_generic,
5954 .read = tracing_thresh_read,
5955 .write = tracing_thresh_write,
5956 .llseek = generic_file_llseek,
5957};
5958
Chen Gange428abb2015-11-10 05:15:15 +08005959#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005960static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005961 .open = tracing_open_generic,
5962 .read = tracing_max_lat_read,
5963 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005964 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005965};
Chen Gange428abb2015-11-10 05:15:15 +08005966#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005967
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005968static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005969 .open = tracing_open_generic,
5970 .read = tracing_set_trace_read,
5971 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005972 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005973};
5974
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005975static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005976 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005977 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005978 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005979 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005980 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005981 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005982};
5983
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005984static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005985 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005986 .read = tracing_entries_read,
5987 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005988 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005989 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005990};
5991
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005992static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005993 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005994 .read = tracing_total_entries_read,
5995 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005996 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005997};
5998
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005999static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006000 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006001 .write = tracing_free_buffer_write,
6002 .release = tracing_free_buffer_release,
6003};
6004
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006005static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006006 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006007 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006008 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006009 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006010};
6011
Zhaolei5079f322009-08-25 16:12:56 +08006012static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006013 .open = tracing_clock_open,
6014 .read = seq_read,
6015 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006016 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006017 .write = tracing_clock_write,
6018};
6019
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006020#ifdef CONFIG_TRACER_SNAPSHOT
6021static const struct file_operations snapshot_fops = {
6022 .open = tracing_snapshot_open,
6023 .read = seq_read,
6024 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006025 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006026 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006027};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006028
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006029static const struct file_operations snapshot_raw_fops = {
6030 .open = snapshot_raw_open,
6031 .read = tracing_buffers_read,
6032 .release = tracing_buffers_release,
6033 .splice_read = tracing_buffers_splice_read,
6034 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006035};
6036
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006037#endif /* CONFIG_TRACER_SNAPSHOT */
6038
Steven Rostedt2cadf912008-12-01 22:20:19 -05006039static int tracing_buffers_open(struct inode *inode, struct file *filp)
6040{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006041 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006042 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006043 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006044
6045 if (tracing_disabled)
6046 return -ENODEV;
6047
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006048 if (trace_array_get(tr) < 0)
6049 return -ENODEV;
6050
Steven Rostedt2cadf912008-12-01 22:20:19 -05006051 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006052 if (!info) {
6053 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006054 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006055 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006056
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006057 mutex_lock(&trace_types_lock);
6058
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006059 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006060 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006061 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006062 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006063 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006064 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006065 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006066
6067 filp->private_data = info;
6068
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006069 tr->current_trace->ref++;
6070
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006071 mutex_unlock(&trace_types_lock);
6072
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006073 ret = nonseekable_open(inode, filp);
6074 if (ret < 0)
6075 trace_array_put(tr);
6076
6077 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006078}
6079
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006080static unsigned int
6081tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6082{
6083 struct ftrace_buffer_info *info = filp->private_data;
6084 struct trace_iterator *iter = &info->iter;
6085
6086 return trace_poll(iter, filp, poll_table);
6087}
6088
Steven Rostedt2cadf912008-12-01 22:20:19 -05006089static ssize_t
6090tracing_buffers_read(struct file *filp, char __user *ubuf,
6091 size_t count, loff_t *ppos)
6092{
6093 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006094 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006095 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006096 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006097
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006098 if (!count)
6099 return 0;
6100
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006101#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006102 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6103 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006104#endif
6105
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006106 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006107 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6108 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006109 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006110 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006111
Steven Rostedt2cadf912008-12-01 22:20:19 -05006112 /* Do we have previous read data to read? */
6113 if (info->read < PAGE_SIZE)
6114 goto read;
6115
Steven Rostedtb6273442013-02-28 13:44:11 -05006116 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006117 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006118 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006119 &info->spare,
6120 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006121 iter->cpu_file, 0);
6122 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006123
6124 if (ret < 0) {
6125 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006126 if ((filp->f_flags & O_NONBLOCK))
6127 return -EAGAIN;
6128
Rabin Vincente30f53a2014-11-10 19:46:34 +01006129 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006130 if (ret)
6131 return ret;
6132
Steven Rostedtb6273442013-02-28 13:44:11 -05006133 goto again;
6134 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006135 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006136 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006137
Steven Rostedt436fc282011-10-14 10:44:25 -04006138 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006139 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006140 size = PAGE_SIZE - info->read;
6141 if (size > count)
6142 size = count;
6143
6144 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006145 if (ret == size)
6146 return -EFAULT;
6147
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006148 size -= ret;
6149
Steven Rostedt2cadf912008-12-01 22:20:19 -05006150 *ppos += size;
6151 info->read += size;
6152
6153 return size;
6154}
6155
6156static int tracing_buffers_release(struct inode *inode, struct file *file)
6157{
6158 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006159 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006160
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006161 mutex_lock(&trace_types_lock);
6162
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006163 iter->tr->current_trace->ref--;
6164
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006165 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006166
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006167 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006168 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006169 kfree(info);
6170
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006171 mutex_unlock(&trace_types_lock);
6172
Steven Rostedt2cadf912008-12-01 22:20:19 -05006173 return 0;
6174}
6175
6176struct buffer_ref {
6177 struct ring_buffer *buffer;
6178 void *page;
6179 int ref;
6180};
6181
6182static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6183 struct pipe_buffer *buf)
6184{
6185 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6186
6187 if (--ref->ref)
6188 return;
6189
6190 ring_buffer_free_read_page(ref->buffer, ref->page);
6191 kfree(ref);
6192 buf->private = 0;
6193}
6194
Steven Rostedt2cadf912008-12-01 22:20:19 -05006195static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6196 struct pipe_buffer *buf)
6197{
6198 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6199
6200 ref->ref++;
6201}
6202
6203/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006204static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006205 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006206 .confirm = generic_pipe_buf_confirm,
6207 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006208 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006209 .get = buffer_pipe_buf_get,
6210};
6211
6212/*
6213 * Callback from splice_to_pipe(), if we need to release some pages
6214 * at the end of the spd in case we error'ed out in filling the pipe.
6215 */
6216static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6217{
6218 struct buffer_ref *ref =
6219 (struct buffer_ref *)spd->partial[i].private;
6220
6221 if (--ref->ref)
6222 return;
6223
6224 ring_buffer_free_read_page(ref->buffer, ref->page);
6225 kfree(ref);
6226 spd->partial[i].private = 0;
6227}
6228
6229static ssize_t
6230tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6231 struct pipe_inode_info *pipe, size_t len,
6232 unsigned int flags)
6233{
6234 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006235 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006236 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6237 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006238 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006239 .pages = pages_def,
6240 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006241 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006242 .flags = flags,
6243 .ops = &buffer_pipe_buf_ops,
6244 .spd_release = buffer_spd_release,
6245 };
6246 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04006247 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006248 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006249
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006250#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006251 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6252 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006253#endif
6254
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006255 if (splice_grow_spd(pipe, &spd))
6256 return -ENOMEM;
Jens Axboe35f3d142010-05-20 10:43:18 +02006257
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006258 if (*ppos & (PAGE_SIZE - 1))
6259 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006260
6261 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006262 if (len < PAGE_SIZE)
6263 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006264 len &= PAGE_MASK;
6265 }
6266
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006267 again:
6268 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006269 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006270
Al Viroa786c062014-04-11 12:01:03 -04006271 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006272 struct page *page;
6273 int r;
6274
6275 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006276 if (!ref) {
6277 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006278 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006279 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006280
Steven Rostedt7267fa62009-04-29 00:16:21 -04006281 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006282 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006283 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006284 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006285 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006286 kfree(ref);
6287 break;
6288 }
6289
6290 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006291 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006292 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07006293 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006294 kfree(ref);
6295 break;
6296 }
6297
6298 /*
6299 * zero out any left over data, this is going to
6300 * user land.
6301 */
6302 size = ring_buffer_page_len(ref->page);
6303 if (size < PAGE_SIZE)
6304 memset(ref->page + size, 0, PAGE_SIZE - size);
6305
6306 page = virt_to_page(ref->page);
6307
6308 spd.pages[i] = page;
6309 spd.partial[i].len = PAGE_SIZE;
6310 spd.partial[i].offset = 0;
6311 spd.partial[i].private = (unsigned long)ref;
6312 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006313 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006314
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006315 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006316 }
6317
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006318 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006319 spd.nr_pages = i;
6320
6321 /* did we read anything? */
6322 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006323 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006324 return ret;
Rabin Vincent07906da2014-11-06 22:26:07 +01006325
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006326 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6327 return -EAGAIN;
6328
Rabin Vincente30f53a2014-11-10 19:46:34 +01006329 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006330 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006331 return ret;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006332
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006333 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006334 }
6335
6336 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02006337 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006338
Steven Rostedt2cadf912008-12-01 22:20:19 -05006339 return ret;
6340}
6341
6342static const struct file_operations tracing_buffers_fops = {
6343 .open = tracing_buffers_open,
6344 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006345 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006346 .release = tracing_buffers_release,
6347 .splice_read = tracing_buffers_splice_read,
6348 .llseek = no_llseek,
6349};
6350
Steven Rostedtc8d77182009-04-29 18:03:45 -04006351static ssize_t
6352tracing_stats_read(struct file *filp, char __user *ubuf,
6353 size_t count, loff_t *ppos)
6354{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006355 struct inode *inode = file_inode(filp);
6356 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006357 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006358 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006359 struct trace_seq *s;
6360 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006361 unsigned long long t;
6362 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006363
Li Zefane4f2d102009-06-15 10:57:28 +08006364 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006365 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006366 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006367
6368 trace_seq_init(s);
6369
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006370 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006371 trace_seq_printf(s, "entries: %ld\n", cnt);
6372
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006373 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006374 trace_seq_printf(s, "overrun: %ld\n", cnt);
6375
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006376 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006377 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6378
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006379 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006380 trace_seq_printf(s, "bytes: %ld\n", cnt);
6381
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006382 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006383 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006384 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006385 usec_rem = do_div(t, USEC_PER_SEC);
6386 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6387 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006388
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006389 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006390 usec_rem = do_div(t, USEC_PER_SEC);
6391 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6392 } else {
6393 /* counter or tsc mode for trace_clock */
6394 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006395 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006396
6397 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006398 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006399 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006400
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006401 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006402 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6403
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006404 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006405 trace_seq_printf(s, "read events: %ld\n", cnt);
6406
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006407 count = simple_read_from_buffer(ubuf, count, ppos,
6408 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006409
6410 kfree(s);
6411
6412 return count;
6413}
6414
6415static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006416 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006417 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006418 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006419 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006420};
6421
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006422#ifdef CONFIG_DYNAMIC_FTRACE
6423
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006424int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006425{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006426 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006427}
6428
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006429static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006430tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006431 size_t cnt, loff_t *ppos)
6432{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006433 static char ftrace_dyn_info_buffer[1024];
6434 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006435 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006436 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006437 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006438 int r;
6439
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006440 mutex_lock(&dyn_info_mutex);
6441 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006442
Steven Rostedta26a2a22008-10-31 00:03:22 -04006443 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006444 buf[r++] = '\n';
6445
6446 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6447
6448 mutex_unlock(&dyn_info_mutex);
6449
6450 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006451}
6452
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006453static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006454 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006455 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006456 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006457};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006458#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006459
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006460#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6461static void
6462ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006463{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006464 tracing_snapshot();
6465}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006466
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006467static void
6468ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6469{
6470 unsigned long *count = (long *)data;
6471
6472 if (!*count)
6473 return;
6474
6475 if (*count != -1)
6476 (*count)--;
6477
6478 tracing_snapshot();
6479}
6480
6481static int
6482ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6483 struct ftrace_probe_ops *ops, void *data)
6484{
6485 long count = (long)data;
6486
6487 seq_printf(m, "%ps:", (void *)ip);
6488
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006489 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006490
6491 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006492 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006493 else
6494 seq_printf(m, ":count=%ld\n", count);
6495
6496 return 0;
6497}
6498
6499static struct ftrace_probe_ops snapshot_probe_ops = {
6500 .func = ftrace_snapshot,
6501 .print = ftrace_snapshot_print,
6502};
6503
6504static struct ftrace_probe_ops snapshot_count_probe_ops = {
6505 .func = ftrace_count_snapshot,
6506 .print = ftrace_snapshot_print,
6507};
6508
6509static int
6510ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6511 char *glob, char *cmd, char *param, int enable)
6512{
6513 struct ftrace_probe_ops *ops;
6514 void *count = (void *)-1;
6515 char *number;
6516 int ret;
6517
6518 /* hash funcs only work with set_ftrace_filter */
6519 if (!enable)
6520 return -EINVAL;
6521
6522 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6523
6524 if (glob[0] == '!') {
6525 unregister_ftrace_function_probe_func(glob+1, ops);
6526 return 0;
6527 }
6528
6529 if (!param)
6530 goto out_reg;
6531
6532 number = strsep(&param, ":");
6533
6534 if (!strlen(number))
6535 goto out_reg;
6536
6537 /*
6538 * We use the callback data field (which is a pointer)
6539 * as our counter.
6540 */
6541 ret = kstrtoul(number, 0, (unsigned long *)&count);
6542 if (ret)
6543 return ret;
6544
6545 out_reg:
6546 ret = register_ftrace_function_probe(glob, ops, count);
6547
6548 if (ret >= 0)
6549 alloc_snapshot(&global_trace);
6550
6551 return ret < 0 ? ret : 0;
6552}
6553
6554static struct ftrace_func_command ftrace_snapshot_cmd = {
6555 .name = "snapshot",
6556 .func = ftrace_trace_snapshot_callback,
6557};
6558
Tom Zanussi38de93a2013-10-24 08:34:18 -05006559static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006560{
6561 return register_ftrace_command(&ftrace_snapshot_cmd);
6562}
6563#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006564static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006565#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006566
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006567static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006568{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006569 if (WARN_ON(!tr->dir))
6570 return ERR_PTR(-ENODEV);
6571
6572 /* Top directory uses NULL as the parent */
6573 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6574 return NULL;
6575
6576 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006577 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006578}
6579
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006580static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6581{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006582 struct dentry *d_tracer;
6583
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006584 if (tr->percpu_dir)
6585 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006586
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006587 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006588 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006589 return NULL;
6590
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006591 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006592
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006593 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006594 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006595
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006596 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006597}
6598
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006599static struct dentry *
6600trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6601 void *data, long cpu, const struct file_operations *fops)
6602{
6603 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6604
6605 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006606 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006607 return ret;
6608}
6609
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006610static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006611tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006612{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006613 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006614 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006615 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006616
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006617 if (!d_percpu)
6618 return;
6619
Steven Rostedtdd49a382010-10-20 21:51:26 -04006620 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006621 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006622 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006623 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006624 return;
6625 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006626
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006627 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006628 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006629 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006630
6631 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006632 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006633 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006634
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006635 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006636 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006637
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006638 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006639 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006640
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006641 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006642 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006643
6644#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006645 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006646 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006647
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006648 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006649 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006650#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006651}
6652
Steven Rostedt60a11772008-05-12 21:20:44 +02006653#ifdef CONFIG_FTRACE_SELFTEST
6654/* Let selftest have access to static functions in this file */
6655#include "trace_selftest.c"
6656#endif
6657
Steven Rostedt577b7852009-02-26 23:43:05 -05006658static ssize_t
6659trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6660 loff_t *ppos)
6661{
6662 struct trace_option_dentry *topt = filp->private_data;
6663 char *buf;
6664
6665 if (topt->flags->val & topt->opt->bit)
6666 buf = "1\n";
6667 else
6668 buf = "0\n";
6669
6670 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6671}
6672
6673static ssize_t
6674trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6675 loff_t *ppos)
6676{
6677 struct trace_option_dentry *topt = filp->private_data;
6678 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006679 int ret;
6680
Peter Huewe22fe9b52011-06-07 21:58:27 +02006681 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6682 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006683 return ret;
6684
Li Zefan8d18eaa2009-12-08 11:17:06 +08006685 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006686 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006687
6688 if (!!(topt->flags->val & topt->opt->bit) != val) {
6689 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006690 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006691 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006692 mutex_unlock(&trace_types_lock);
6693 if (ret)
6694 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006695 }
6696
6697 *ppos += cnt;
6698
6699 return cnt;
6700}
6701
6702
6703static const struct file_operations trace_options_fops = {
6704 .open = tracing_open_generic,
6705 .read = trace_options_read,
6706 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006707 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006708};
6709
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006710/*
6711 * In order to pass in both the trace_array descriptor as well as the index
6712 * to the flag that the trace option file represents, the trace_array
6713 * has a character array of trace_flags_index[], which holds the index
6714 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6715 * The address of this character array is passed to the flag option file
6716 * read/write callbacks.
6717 *
6718 * In order to extract both the index and the trace_array descriptor,
6719 * get_tr_index() uses the following algorithm.
6720 *
6721 * idx = *ptr;
6722 *
6723 * As the pointer itself contains the address of the index (remember
6724 * index[1] == 1).
6725 *
6726 * Then to get the trace_array descriptor, by subtracting that index
6727 * from the ptr, we get to the start of the index itself.
6728 *
6729 * ptr - idx == &index[0]
6730 *
6731 * Then a simple container_of() from that pointer gets us to the
6732 * trace_array descriptor.
6733 */
6734static void get_tr_index(void *data, struct trace_array **ptr,
6735 unsigned int *pindex)
6736{
6737 *pindex = *(unsigned char *)data;
6738
6739 *ptr = container_of(data - *pindex, struct trace_array,
6740 trace_flags_index);
6741}
6742
Steven Rostedta8259072009-02-26 22:19:12 -05006743static ssize_t
6744trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6745 loff_t *ppos)
6746{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006747 void *tr_index = filp->private_data;
6748 struct trace_array *tr;
6749 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006750 char *buf;
6751
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006752 get_tr_index(tr_index, &tr, &index);
6753
6754 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006755 buf = "1\n";
6756 else
6757 buf = "0\n";
6758
6759 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6760}
6761
6762static ssize_t
6763trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6764 loff_t *ppos)
6765{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006766 void *tr_index = filp->private_data;
6767 struct trace_array *tr;
6768 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006769 unsigned long val;
6770 int ret;
6771
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006772 get_tr_index(tr_index, &tr, &index);
6773
Peter Huewe22fe9b52011-06-07 21:58:27 +02006774 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6775 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006776 return ret;
6777
Zhaoleif2d84b62009-08-07 18:55:48 +08006778 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006779 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006780
6781 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006782 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006783 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006784
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006785 if (ret < 0)
6786 return ret;
6787
Steven Rostedta8259072009-02-26 22:19:12 -05006788 *ppos += cnt;
6789
6790 return cnt;
6791}
6792
Steven Rostedta8259072009-02-26 22:19:12 -05006793static const struct file_operations trace_options_core_fops = {
6794 .open = tracing_open_generic,
6795 .read = trace_options_core_read,
6796 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006797 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006798};
6799
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006800struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006801 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006802 struct dentry *parent,
6803 void *data,
6804 const struct file_operations *fops)
6805{
6806 struct dentry *ret;
6807
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006808 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006809 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07006810 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006811
6812 return ret;
6813}
6814
6815
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006816static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006817{
6818 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006819
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006820 if (tr->options)
6821 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006822
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006823 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006824 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006825 return NULL;
6826
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006827 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006828 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006829 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006830 return NULL;
6831 }
6832
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006833 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006834}
6835
Steven Rostedt577b7852009-02-26 23:43:05 -05006836static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006837create_trace_option_file(struct trace_array *tr,
6838 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006839 struct tracer_flags *flags,
6840 struct tracer_opt *opt)
6841{
6842 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006843
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006844 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006845 if (!t_options)
6846 return;
6847
6848 topt->flags = flags;
6849 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006850 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006851
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006852 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006853 &trace_options_fops);
6854
Steven Rostedt577b7852009-02-26 23:43:05 -05006855}
6856
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006857static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006858create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006859{
6860 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006861 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05006862 struct tracer_flags *flags;
6863 struct tracer_opt *opts;
6864 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006865 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05006866
6867 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006868 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05006869
6870 flags = tracer->flags;
6871
6872 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006873 return;
6874
6875 /*
6876 * If this is an instance, only create flags for tracers
6877 * the instance may have.
6878 */
6879 if (!trace_ok_for_array(tracer, tr))
6880 return;
6881
6882 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08006883 /* Make sure there's no duplicate flags. */
6884 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006885 return;
6886 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006887
6888 opts = flags->opts;
6889
6890 for (cnt = 0; opts[cnt].name; cnt++)
6891 ;
6892
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006893 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006894 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006895 return;
6896
6897 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6898 GFP_KERNEL);
6899 if (!tr_topts) {
6900 kfree(topts);
6901 return;
6902 }
6903
6904 tr->topts = tr_topts;
6905 tr->topts[tr->nr_topts].tracer = tracer;
6906 tr->topts[tr->nr_topts].topts = topts;
6907 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05006908
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006909 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006910 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006911 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006912 WARN_ONCE(topts[cnt].entry == NULL,
6913 "Failed to create trace option: %s",
6914 opts[cnt].name);
6915 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006916}
6917
Steven Rostedta8259072009-02-26 22:19:12 -05006918static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006919create_trace_option_core_file(struct trace_array *tr,
6920 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006921{
6922 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006923
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006924 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006925 if (!t_options)
6926 return NULL;
6927
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006928 return trace_create_file(option, 0644, t_options,
6929 (void *)&tr->trace_flags_index[index],
6930 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006931}
6932
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006933static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006934{
6935 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006936 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006937 int i;
6938
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006939 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006940 if (!t_options)
6941 return;
6942
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006943 for (i = 0; trace_options[i]; i++) {
6944 if (top_level ||
6945 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6946 create_trace_option_core_file(tr, trace_options[i], i);
6947 }
Steven Rostedta8259072009-02-26 22:19:12 -05006948}
6949
Steven Rostedt499e5472012-02-22 15:50:28 -05006950static ssize_t
6951rb_simple_read(struct file *filp, char __user *ubuf,
6952 size_t cnt, loff_t *ppos)
6953{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006954 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006955 char buf[64];
6956 int r;
6957
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006958 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006959 r = sprintf(buf, "%d\n", r);
6960
6961 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6962}
6963
6964static ssize_t
6965rb_simple_write(struct file *filp, const char __user *ubuf,
6966 size_t cnt, loff_t *ppos)
6967{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006968 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006969 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006970 unsigned long val;
6971 int ret;
6972
6973 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6974 if (ret)
6975 return ret;
6976
6977 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006978 mutex_lock(&trace_types_lock);
6979 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006980 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006981 if (tr->current_trace->start)
6982 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006983 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006984 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006985 if (tr->current_trace->stop)
6986 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006987 }
6988 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006989 }
6990
6991 (*ppos)++;
6992
6993 return cnt;
6994}
6995
6996static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006997 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006998 .read = rb_simple_read,
6999 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007000 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007001 .llseek = default_llseek,
7002};
7003
Steven Rostedt277ba042012-08-03 16:10:49 -04007004struct dentry *trace_instance_dir;
7005
7006static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007007init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007008
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007009static int
7010allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007011{
7012 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007013
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007014 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007015
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007016 buf->tr = tr;
7017
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007018 buf->buffer = ring_buffer_alloc(size, rb_flags);
7019 if (!buf->buffer)
7020 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007021
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007022 buf->data = alloc_percpu(struct trace_array_cpu);
7023 if (!buf->data) {
7024 ring_buffer_free(buf->buffer);
7025 return -ENOMEM;
7026 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007027
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007028 /* Allocate the first page for all buffers */
7029 set_buffer_entries(&tr->trace_buffer,
7030 ring_buffer_size(tr->trace_buffer.buffer, 0));
7031
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007032 return 0;
7033}
7034
7035static int allocate_trace_buffers(struct trace_array *tr, int size)
7036{
7037 int ret;
7038
7039 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7040 if (ret)
7041 return ret;
7042
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007043#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007044 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7045 allocate_snapshot ? size : 1);
7046 if (WARN_ON(ret)) {
7047 ring_buffer_free(tr->trace_buffer.buffer);
7048 free_percpu(tr->trace_buffer.data);
7049 return -ENOMEM;
7050 }
7051 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007052
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007053 /*
7054 * Only the top level trace array gets its snapshot allocated
7055 * from the kernel command line.
7056 */
7057 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007058#endif
7059 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007060}
7061
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007062static void free_trace_buffer(struct trace_buffer *buf)
7063{
7064 if (buf->buffer) {
7065 ring_buffer_free(buf->buffer);
7066 buf->buffer = NULL;
7067 free_percpu(buf->data);
7068 buf->data = NULL;
7069 }
7070}
7071
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007072static void free_trace_buffers(struct trace_array *tr)
7073{
7074 if (!tr)
7075 return;
7076
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007077 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007078
7079#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007080 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007081#endif
7082}
7083
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007084static void init_trace_flags_index(struct trace_array *tr)
7085{
7086 int i;
7087
7088 /* Used by the trace options files */
7089 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7090 tr->trace_flags_index[i] = i;
7091}
7092
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007093static void __update_tracer_options(struct trace_array *tr)
7094{
7095 struct tracer *t;
7096
7097 for (t = trace_types; t; t = t->next)
7098 add_tracer_options(tr, t);
7099}
7100
7101static void update_tracer_options(struct trace_array *tr)
7102{
7103 mutex_lock(&trace_types_lock);
7104 __update_tracer_options(tr);
7105 mutex_unlock(&trace_types_lock);
7106}
7107
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007108static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007109{
Steven Rostedt277ba042012-08-03 16:10:49 -04007110 struct trace_array *tr;
7111 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007112
7113 mutex_lock(&trace_types_lock);
7114
7115 ret = -EEXIST;
7116 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7117 if (tr->name && strcmp(tr->name, name) == 0)
7118 goto out_unlock;
7119 }
7120
7121 ret = -ENOMEM;
7122 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7123 if (!tr)
7124 goto out_unlock;
7125
7126 tr->name = kstrdup(name, GFP_KERNEL);
7127 if (!tr->name)
7128 goto out_free_tr;
7129
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007130 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7131 goto out_free_tr;
7132
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007133 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007134
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007135 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7136
Steven Rostedt277ba042012-08-03 16:10:49 -04007137 raw_spin_lock_init(&tr->start_lock);
7138
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007139 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7140
Steven Rostedt277ba042012-08-03 16:10:49 -04007141 tr->current_trace = &nop_trace;
7142
7143 INIT_LIST_HEAD(&tr->systems);
7144 INIT_LIST_HEAD(&tr->events);
7145
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007146 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007147 goto out_free_tr;
7148
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007149 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007150 if (!tr->dir)
7151 goto out_free_tr;
7152
7153 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007154 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007155 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007156 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007157 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007158
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007159 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007160 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007161 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007162
7163 list_add(&tr->list, &ftrace_trace_arrays);
7164
7165 mutex_unlock(&trace_types_lock);
7166
7167 return 0;
7168
7169 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007170 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007171 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007172 kfree(tr->name);
7173 kfree(tr);
7174
7175 out_unlock:
7176 mutex_unlock(&trace_types_lock);
7177
7178 return ret;
7179
7180}
7181
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007182static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007183{
7184 struct trace_array *tr;
7185 int found = 0;
7186 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007187 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007188
7189 mutex_lock(&trace_types_lock);
7190
7191 ret = -ENODEV;
7192 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7193 if (tr->name && strcmp(tr->name, name) == 0) {
7194 found = 1;
7195 break;
7196 }
7197 }
7198 if (!found)
7199 goto out_unlock;
7200
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007201 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007202 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007203 goto out_unlock;
7204
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007205 list_del(&tr->list);
7206
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007207 /* Disable all the flags that were enabled coming in */
7208 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7209 if ((1 << i) & ZEROED_TRACE_FLAGS)
7210 set_tracer_flag(tr, 1 << i, 0);
7211 }
7212
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007213 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007214 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007215 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007216 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007217 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007218
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007219 for (i = 0; i < tr->nr_topts; i++) {
7220 kfree(tr->topts[i].topts);
7221 }
7222 kfree(tr->topts);
7223
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007224 kfree(tr->name);
7225 kfree(tr);
7226
7227 ret = 0;
7228
7229 out_unlock:
7230 mutex_unlock(&trace_types_lock);
7231
7232 return ret;
7233}
7234
Steven Rostedt277ba042012-08-03 16:10:49 -04007235static __init void create_trace_instances(struct dentry *d_tracer)
7236{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007237 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7238 instance_mkdir,
7239 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007240 if (WARN_ON(!trace_instance_dir))
7241 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007242}
7243
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007244static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007245init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007246{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007247 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007248
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007249 trace_create_file("available_tracers", 0444, d_tracer,
7250 tr, &show_traces_fops);
7251
7252 trace_create_file("current_tracer", 0644, d_tracer,
7253 tr, &set_tracer_fops);
7254
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007255 trace_create_file("tracing_cpumask", 0644, d_tracer,
7256 tr, &tracing_cpumask_fops);
7257
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007258 trace_create_file("trace_options", 0644, d_tracer,
7259 tr, &tracing_iter_fops);
7260
7261 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007262 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007263
7264 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007265 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007266
7267 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007268 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007269
7270 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7271 tr, &tracing_total_entries_fops);
7272
Wang YanQing238ae932013-05-26 16:52:01 +08007273 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007274 tr, &tracing_free_buffer_fops);
7275
7276 trace_create_file("trace_marker", 0220, d_tracer,
7277 tr, &tracing_mark_fops);
7278
Jamie Gennis13b625d2012-11-21 15:04:25 -08007279 trace_create_file("saved_tgids", 0444, d_tracer,
7280 tr, &tracing_saved_tgids_fops);
7281
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007282 trace_create_file("trace_clock", 0644, d_tracer, tr,
7283 &trace_clock_fops);
7284
7285 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007286 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007287
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007288 create_trace_options_dir(tr);
7289
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007290#ifdef CONFIG_TRACER_MAX_TRACE
7291 trace_create_file("tracing_max_latency", 0644, d_tracer,
7292 &tr->max_latency, &tracing_max_lat_fops);
7293#endif
7294
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007295 if (ftrace_create_function_files(tr, d_tracer))
7296 WARN(1, "Could not allocate function filter files");
7297
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007298#ifdef CONFIG_TRACER_SNAPSHOT
7299 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007300 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007301#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007302
7303 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007304 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007305
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007306 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007307}
7308
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007309static struct vfsmount *trace_automount(void *ingore)
7310{
7311 struct vfsmount *mnt;
7312 struct file_system_type *type;
7313
7314 /*
7315 * To maintain backward compatibility for tools that mount
7316 * debugfs to get to the tracing facility, tracefs is automatically
7317 * mounted to the debugfs/tracing directory.
7318 */
7319 type = get_fs_type("tracefs");
7320 if (!type)
7321 return NULL;
7322 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
7323 put_filesystem(type);
7324 if (IS_ERR(mnt))
7325 return NULL;
7326 mntget(mnt);
7327
7328 return mnt;
7329}
7330
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007331/**
7332 * tracing_init_dentry - initialize top level trace array
7333 *
7334 * This is called when creating files or directories in the tracing
7335 * directory. It is called via fs_initcall() by any of the boot up code
7336 * and expects to return the dentry of the top level tracing directory.
7337 */
7338struct dentry *tracing_init_dentry(void)
7339{
7340 struct trace_array *tr = &global_trace;
7341
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007342 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007343 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007344 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007345
Jiaxing Wang8b129192015-11-06 16:04:16 +08007346 if (WARN_ON(!tracefs_initialized()) ||
7347 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7348 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007349 return ERR_PTR(-ENODEV);
7350
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007351 /*
7352 * As there may still be users that expect the tracing
7353 * files to exist in debugfs/tracing, we must automount
7354 * the tracefs file system there, so older tools still
7355 * work with the newer kerenl.
7356 */
7357 tr->dir = debugfs_create_automount("tracing", NULL,
7358 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007359 if (!tr->dir) {
7360 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7361 return ERR_PTR(-ENOMEM);
7362 }
7363
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007364 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007365}
7366
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007367extern struct trace_enum_map *__start_ftrace_enum_maps[];
7368extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7369
7370static void __init trace_enum_init(void)
7371{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007372 int len;
7373
7374 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007375 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007376}
7377
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007378#ifdef CONFIG_MODULES
7379static void trace_module_add_enums(struct module *mod)
7380{
7381 if (!mod->num_trace_enums)
7382 return;
7383
7384 /*
7385 * Modules with bad taint do not have events created, do
7386 * not bother with enums either.
7387 */
7388 if (trace_module_has_bad_taint(mod))
7389 return;
7390
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007391 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007392}
7393
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007394#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7395static void trace_module_remove_enums(struct module *mod)
7396{
7397 union trace_enum_map_item *map;
7398 union trace_enum_map_item **last = &trace_enum_maps;
7399
7400 if (!mod->num_trace_enums)
7401 return;
7402
7403 mutex_lock(&trace_enum_mutex);
7404
7405 map = trace_enum_maps;
7406
7407 while (map) {
7408 if (map->head.mod == mod)
7409 break;
7410 map = trace_enum_jmp_to_tail(map);
7411 last = &map->tail.next;
7412 map = map->tail.next;
7413 }
7414 if (!map)
7415 goto out;
7416
7417 *last = trace_enum_jmp_to_tail(map)->tail.next;
7418 kfree(map);
7419 out:
7420 mutex_unlock(&trace_enum_mutex);
7421}
7422#else
7423static inline void trace_module_remove_enums(struct module *mod) { }
7424#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7425
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007426static int trace_module_notify(struct notifier_block *self,
7427 unsigned long val, void *data)
7428{
7429 struct module *mod = data;
7430
7431 switch (val) {
7432 case MODULE_STATE_COMING:
7433 trace_module_add_enums(mod);
7434 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007435 case MODULE_STATE_GOING:
7436 trace_module_remove_enums(mod);
7437 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007438 }
7439
7440 return 0;
7441}
7442
7443static struct notifier_block trace_module_nb = {
7444 .notifier_call = trace_module_notify,
7445 .priority = 0,
7446};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007447#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007448
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007449static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007450{
7451 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007452
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007453 trace_access_lock_init();
7454
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007455 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007456 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007457 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007458
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007459 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007460 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007461
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007462 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007463 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007464
Li Zefan339ae5d2009-04-17 10:34:30 +08007465 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007466 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007467
Avadh Patel69abe6a2009-04-10 16:04:48 -04007468 trace_create_file("saved_cmdlines", 0444, d_tracer,
7469 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007470
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007471 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7472 NULL, &tracing_saved_cmdlines_size_fops);
7473
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007474 trace_enum_init();
7475
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007476 trace_create_enum_file(d_tracer);
7477
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007478#ifdef CONFIG_MODULES
7479 register_module_notifier(&trace_module_nb);
7480#endif
7481
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007482#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007483 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7484 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007485#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007486
Steven Rostedt277ba042012-08-03 16:10:49 -04007487 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007488
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007489 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007490
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007491 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007492}
7493
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007494static int trace_panic_handler(struct notifier_block *this,
7495 unsigned long event, void *unused)
7496{
Steven Rostedt944ac422008-10-23 19:26:08 -04007497 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007498 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007499 return NOTIFY_OK;
7500}
7501
7502static struct notifier_block trace_panic_notifier = {
7503 .notifier_call = trace_panic_handler,
7504 .next = NULL,
7505 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7506};
7507
7508static int trace_die_handler(struct notifier_block *self,
7509 unsigned long val,
7510 void *data)
7511{
7512 switch (val) {
7513 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007514 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007515 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007516 break;
7517 default:
7518 break;
7519 }
7520 return NOTIFY_OK;
7521}
7522
7523static struct notifier_block trace_die_notifier = {
7524 .notifier_call = trace_die_handler,
7525 .priority = 200
7526};
7527
7528/*
7529 * printk is set to max of 1024, we really don't need it that big.
7530 * Nothing should be printing 1000 characters anyway.
7531 */
7532#define TRACE_MAX_PRINT 1000
7533
7534/*
7535 * Define here KERN_TRACE so that we have one place to modify
7536 * it if we decide to change what log level the ftrace dump
7537 * should be at.
7538 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007539#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007540
Jason Wessel955b61e2010-08-05 09:22:23 -05007541void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007542trace_printk_seq(struct trace_seq *s)
7543{
7544 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007545 if (s->seq.len >= TRACE_MAX_PRINT)
7546 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007547
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007548 /*
7549 * More paranoid code. Although the buffer size is set to
7550 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7551 * an extra layer of protection.
7552 */
7553 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7554 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007555
7556 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007557 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007558
7559 printk(KERN_TRACE "%s", s->buffer);
7560
Steven Rostedtf9520752009-03-02 14:04:40 -05007561 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007562}
7563
Jason Wessel955b61e2010-08-05 09:22:23 -05007564void trace_init_global_iter(struct trace_iterator *iter)
7565{
7566 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007567 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007568 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007569 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007570
7571 if (iter->trace && iter->trace->open)
7572 iter->trace->open(iter);
7573
7574 /* Annotate start of buffers if we had overruns */
7575 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7576 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7577
7578 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7579 if (trace_clocks[iter->tr->clock_id].in_ns)
7580 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007581}
7582
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007583void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007584{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007585 /* use static because iter can be a bit big for the stack */
7586 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007587 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007588 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007589 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007590 unsigned long flags;
7591 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007592
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007593 /* Only allow one dump user at a time. */
7594 if (atomic_inc_return(&dump_running) != 1) {
7595 atomic_dec(&dump_running);
7596 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007597 }
7598
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007599 /*
7600 * Always turn off tracing when we dump.
7601 * We don't need to show trace output of what happens
7602 * between multiple crashes.
7603 *
7604 * If the user does a sysrq-z, then they can re-enable
7605 * tracing with echo 1 > tracing_on.
7606 */
7607 tracing_off();
7608
7609 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007610
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007611 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007612 trace_init_global_iter(&iter);
7613
Steven Rostedtd7690412008-10-01 00:29:53 -04007614 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307615 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007616 }
7617
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007618 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007619
Török Edwinb54d3de2008-11-22 13:28:48 +02007620 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007621 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007622
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007623 switch (oops_dump_mode) {
7624 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007625 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007626 break;
7627 case DUMP_ORIG:
7628 iter.cpu_file = raw_smp_processor_id();
7629 break;
7630 case DUMP_NONE:
7631 goto out_enable;
7632 default:
7633 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007634 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007635 }
7636
7637 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007638
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007639 /* Did function tracer already get disabled? */
7640 if (ftrace_is_dead()) {
7641 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7642 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7643 }
7644
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007645 /*
7646 * We need to stop all tracing on all CPUS to read the
7647 * the next buffer. This is a bit expensive, but is
7648 * not done often. We fill all what we can read,
7649 * and then release the locks again.
7650 */
7651
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007652 while (!trace_empty(&iter)) {
7653
7654 if (!cnt)
7655 printk(KERN_TRACE "---------------------------------\n");
7656
7657 cnt++;
7658
7659 /* reset all but tr, trace, and overruns */
7660 memset(&iter.seq, 0,
7661 sizeof(struct trace_iterator) -
7662 offsetof(struct trace_iterator, seq));
7663 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7664 iter.pos = -1;
7665
Jason Wessel955b61e2010-08-05 09:22:23 -05007666 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007667 int ret;
7668
7669 ret = print_trace_line(&iter);
7670 if (ret != TRACE_TYPE_NO_CONSUME)
7671 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007672 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007673 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007674
7675 trace_printk_seq(&iter.seq);
7676 }
7677
7678 if (!cnt)
7679 printk(KERN_TRACE " (ftrace buffer empty)\n");
7680 else
7681 printk(KERN_TRACE "---------------------------------\n");
7682
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007683 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007684 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007685
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007686 for_each_tracing_cpu(cpu) {
7687 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007688 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007689 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007690 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007691}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007692EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007693
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007694__init static int tracer_alloc_buffers(void)
7695{
Steven Rostedt73c51622009-03-11 13:42:01 -04007696 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307697 int ret = -ENOMEM;
7698
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007699 /*
7700 * Make sure we don't accidently add more trace options
7701 * than we have bits for.
7702 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007703 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007704
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307705 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7706 goto out;
7707
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007708 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307709 goto out_free_buffer_mask;
7710
Steven Rostedt07d777f2011-09-22 14:01:55 -04007711 /* Only allocate trace_printk buffers if a trace_printk exists */
7712 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007713 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007714 trace_printk_init_buffers();
7715
Steven Rostedt73c51622009-03-11 13:42:01 -04007716 /* To save memory, keep the ring buffer size to its minimum */
7717 if (ring_buffer_expanded)
7718 ring_buf_size = trace_buf_size;
7719 else
7720 ring_buf_size = 1;
7721
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307722 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007723 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007724
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007725 raw_spin_lock_init(&global_trace.start_lock);
7726
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007727 /* Used for event triggers */
7728 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7729 if (!temp_buffer)
7730 goto out_free_cpumask;
7731
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007732 if (trace_create_savedcmd() < 0)
7733 goto out_free_temp_buffer;
7734
Steven Rostedtab464282008-05-12 21:21:00 +02007735 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007736 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007737 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7738 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007739 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007740 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007741
Steven Rostedt499e5472012-02-22 15:50:28 -05007742 if (global_trace.buffer_disabled)
7743 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007744
Steven Rostedte1e232c2014-02-10 23:38:46 -05007745 if (trace_boot_clock) {
7746 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7747 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007748 pr_warn("Trace clock %s not defined, going back to default\n",
7749 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05007750 }
7751
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007752 /*
7753 * register_tracer() might reference current_trace, so it
7754 * needs to be set before we register anything. This is
7755 * just a bootstrap of current_trace anyway.
7756 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007757 global_trace.current_trace = &nop_trace;
7758
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007759 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7760
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007761 ftrace_init_global_array_ops(&global_trace);
7762
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007763 init_trace_flags_index(&global_trace);
7764
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007765 register_tracer(&nop_trace);
7766
Steven Rostedt60a11772008-05-12 21:20:44 +02007767 /* All seems OK, enable tracing */
7768 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007769
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007770 atomic_notifier_chain_register(&panic_notifier_list,
7771 &trace_panic_notifier);
7772
7773 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007774
Steven Rostedtae63b312012-05-03 23:09:03 -04007775 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7776
7777 INIT_LIST_HEAD(&global_trace.systems);
7778 INIT_LIST_HEAD(&global_trace.events);
7779 list_add(&global_trace.list, &ftrace_trace_arrays);
7780
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007781 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007782
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007783 register_snapshot_cmd();
7784
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007785 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007786
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007787out_free_savedcmd:
7788 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007789out_free_temp_buffer:
7790 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307791out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007792 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307793out_free_buffer_mask:
7794 free_cpumask_var(tracing_buffer_mask);
7795out:
7796 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007797}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007798
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007799void __init trace_init(void)
7800{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007801 if (tracepoint_printk) {
7802 tracepoint_print_iter =
7803 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7804 if (WARN_ON(!tracepoint_print_iter))
7805 tracepoint_printk = 0;
7806 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007807 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007808 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007809}
7810
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007811__init static int clear_boot_tracer(void)
7812{
7813 /*
7814 * The default tracer at boot buffer is an init section.
7815 * This function is called in lateinit. If we did not
7816 * find the boot tracer, then clear it out, to prevent
7817 * later registration from accessing the buffer that is
7818 * about to be freed.
7819 */
7820 if (!default_bootup_tracer)
7821 return 0;
7822
7823 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7824 default_bootup_tracer);
7825 default_bootup_tracer = NULL;
7826
7827 return 0;
7828}
7829
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007830fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007831late_initcall(clear_boot_tracer);