blob: 3b73cad5505397b570d187ff1bff72bcb9fe3810 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050077static int
78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010079{
80 return 0;
81}
Steven Rostedt0f048702008-11-05 16:05:44 -050082
83/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040084 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
86 * occurred.
87 */
88static DEFINE_PER_CPU(bool, trace_cmdline_save);
89
90/*
Steven Rostedt0f048702008-11-05 16:05:44 -050091 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
94 * this back to zero.
95 */
Hannes Eder4fd27352009-02-10 19:44:12 +010096static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050097
Jason Wessel955b61e2010-08-05 09:22:23 -050098cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +020099
Steven Rostedt944ac422008-10-23 19:26:08 -0400100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400114 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115
116enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400117
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
122/* Map of enums to their values, for "enum_map" file */
123struct trace_enum_map_head {
124 struct module *mod;
125 unsigned long length;
126};
127
128union trace_enum_map_item;
129
130struct trace_enum_map_tail {
131 /*
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
134 */
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
137};
138
139static DEFINE_MUTEX(trace_enum_mutex);
140
141/*
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
147 */
148union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
152};
153
154static union trace_enum_map_item *trace_enum_maps;
155#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
156
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500158
Li Zefanee6c2c12009-09-18 14:06:47 +0800159#define MAX_TRACER_SIZE 100
160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500161static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100162
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500163static bool allocate_snapshot;
164
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200165static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100166{
Chen Gang67012ab2013-04-08 12:06:44 +0800167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400169 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100171 return 1;
172}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200173__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100174
Steven Rostedt944ac422008-10-23 19:26:08 -0400175static int __init set_ftrace_dump_on_oops(char *str)
176{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
179 return 1;
180 }
181
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
184 return 1;
185 }
186
187 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400188}
189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200190
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400191static int __init stop_trace_on_warning(char *str)
192{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195 return 1;
196}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200197__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400198
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400199static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500200{
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
204 return 1;
205}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400206__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500207
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400208
209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static int __init set_trace_boot_options(char *str)
212{
Chen Gang67012ab2013-04-08 12:06:44 +0800213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400214 return 0;
215}
216__setup("trace_options=", set_trace_boot_options);
217
Steven Rostedte1e232c2014-02-10 23:38:46 -0500218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219static char *trace_boot_clock __initdata;
220
221static int __init set_trace_boot_clock(char *str)
222{
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
225 return 0;
226}
227__setup("trace_clock=", set_trace_boot_clock);
228
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500229static int __init set_tracepoint_printk(char *str)
230{
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
233 return 1;
234}
235__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400236
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800237unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200238{
239 nsec += 500;
240 do_div(nsec, 1000);
241 return nsec;
242}
243
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400244/* trace_flags holds trace_options default values */
245#define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
251
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400252/* trace_options that are only supported by global_trace */
253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
255
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400256/* trace_flags that are default zero for instances */
257#define ZEROED_TRACE_FLAGS \
258 TRACE_ITER_EVENT_FORK
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400259
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200260/*
261 * The global_trace is the descriptor that holds the tracing
262 * buffers for the live tracing. For each CPU, it contains
263 * a link list of pages that will store trace entries. The
264 * page descriptor of the pages in the memory is used to hold
265 * the link list by linking the lru item in the page descriptor
266 * to each of the pages in the buffer per CPU.
267 *
268 * For each active CPU there is a data field that holds the
269 * pages for the buffer for that CPU. Each CPU has the same number
270 * of pages allocated for its buffer.
271 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400272static struct trace_array global_trace = {
273 .trace_flags = TRACE_DEFAULT_FLAGS,
274};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200275
Steven Rostedtae63b312012-05-03 23:09:03 -0400276LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400278int trace_array_get(struct trace_array *this_tr)
279{
280 struct trace_array *tr;
281 int ret = -ENODEV;
282
283 mutex_lock(&trace_types_lock);
284 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
285 if (tr == this_tr) {
286 tr->ref++;
287 ret = 0;
288 break;
289 }
290 }
291 mutex_unlock(&trace_types_lock);
292
293 return ret;
294}
295
296static void __trace_array_put(struct trace_array *this_tr)
297{
298 WARN_ON(!this_tr->ref);
299 this_tr->ref--;
300}
301
302void trace_array_put(struct trace_array *this_tr)
303{
304 mutex_lock(&trace_types_lock);
305 __trace_array_put(this_tr);
306 mutex_unlock(&trace_types_lock);
307}
308
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400309int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 struct ring_buffer *buffer,
311 struct ring_buffer_event *event)
312{
313 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
314 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400315 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500316 return 1;
317 }
318
319 return 0;
320}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500321
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400322void trace_free_pid_list(struct trace_pid_list *pid_list)
323{
324 vfree(pid_list->pids);
325 kfree(pid_list);
326}
327
Steven Rostedtd8275c42016-04-14 12:15:22 -0400328/**
329 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
330 * @filtered_pids: The list of pids to check
331 * @search_pid: The PID to find in @filtered_pids
332 *
333 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
334 */
335bool
336trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
337{
338 /*
339 * If pid_max changed after filtered_pids was created, we
340 * by default ignore all pids greater than the previous pid_max.
341 */
342 if (search_pid >= filtered_pids->pid_max)
343 return false;
344
345 return test_bit(search_pid, filtered_pids->pids);
346}
347
348/**
349 * trace_ignore_this_task - should a task be ignored for tracing
350 * @filtered_pids: The list of pids to check
351 * @task: The task that should be ignored if not filtered
352 *
353 * Checks if @task should be traced or not from @filtered_pids.
354 * Returns true if @task should *NOT* be traced.
355 * Returns false if @task should be traced.
356 */
357bool
358trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
359{
360 /*
361 * Return false, because if filtered_pids does not exist,
362 * all pids are good to trace.
363 */
364 if (!filtered_pids)
365 return false;
366
367 return !trace_find_filtered_pid(filtered_pids, task->pid);
368}
369
370/**
371 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
372 * @pid_list: The list to modify
373 * @self: The current task for fork or NULL for exit
374 * @task: The task to add or remove
375 *
376 * If adding a task, if @self is defined, the task is only added if @self
377 * is also included in @pid_list. This happens on fork and tasks should
378 * only be added when the parent is listed. If @self is NULL, then the
379 * @task pid will be removed from the list, which would happen on exit
380 * of a task.
381 */
382void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
383 struct task_struct *self,
384 struct task_struct *task)
385{
386 if (!pid_list)
387 return;
388
389 /* For forks, we only add if the forking task is listed */
390 if (self) {
391 if (!trace_find_filtered_pid(pid_list, self->pid))
392 return;
393 }
394
395 /* Sorry, but we don't support pid_max changing after setting */
396 if (task->pid >= pid_list->pid_max)
397 return;
398
399 /* "self" is set for forks, and NULL for exits */
400 if (self)
401 set_bit(task->pid, pid_list->pids);
402 else
403 clear_bit(task->pid, pid_list->pids);
404}
405
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400406/**
407 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
408 * @pid_list: The pid list to show
409 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
410 * @pos: The position of the file
411 *
412 * This is used by the seq_file "next" operation to iterate the pids
413 * listed in a trace_pid_list structure.
414 *
415 * Returns the pid+1 as we want to display pid of zero, but NULL would
416 * stop the iteration.
417 */
418void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
419{
420 unsigned long pid = (unsigned long)v;
421
422 (*pos)++;
423
424 /* pid already is +1 of the actual prevous bit */
425 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
426
427 /* Return pid + 1 to allow zero to be represented */
428 if (pid < pid_list->pid_max)
429 return (void *)(pid + 1);
430
431 return NULL;
432}
433
434/**
435 * trace_pid_start - Used for seq_file to start reading pid lists
436 * @pid_list: The pid list to show
437 * @pos: The position of the file
438 *
439 * This is used by seq_file "start" operation to start the iteration
440 * of listing pids.
441 *
442 * Returns the pid+1 as we want to display pid of zero, but NULL would
443 * stop the iteration.
444 */
445void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
446{
447 unsigned long pid;
448 loff_t l = 0;
449
450 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
451 if (pid >= pid_list->pid_max)
452 return NULL;
453
454 /* Return pid + 1 so that zero can be the exit value */
455 for (pid++; pid && l < *pos;
456 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
457 ;
458 return (void *)pid;
459}
460
461/**
462 * trace_pid_show - show the current pid in seq_file processing
463 * @m: The seq_file structure to write into
464 * @v: A void pointer of the pid (+1) value to display
465 *
466 * Can be directly used by seq_file operations to display the current
467 * pid value.
468 */
469int trace_pid_show(struct seq_file *m, void *v)
470{
471 unsigned long pid = (unsigned long)v - 1;
472
473 seq_printf(m, "%lu\n", pid);
474 return 0;
475}
476
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400477/* 128 should be much more than enough */
478#define PID_BUF_SIZE 127
479
480int trace_pid_write(struct trace_pid_list *filtered_pids,
481 struct trace_pid_list **new_pid_list,
482 const char __user *ubuf, size_t cnt)
483{
484 struct trace_pid_list *pid_list;
485 struct trace_parser parser;
486 unsigned long val;
487 int nr_pids = 0;
488 ssize_t read = 0;
489 ssize_t ret = 0;
490 loff_t pos;
491 pid_t pid;
492
493 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
494 return -ENOMEM;
495
496 /*
497 * Always recreate a new array. The write is an all or nothing
498 * operation. Always create a new array when adding new pids by
499 * the user. If the operation fails, then the current list is
500 * not modified.
501 */
502 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang3ddc2992019-04-19 21:22:59 -0500503 if (!pid_list) {
504 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400505 return -ENOMEM;
Wenwen Wang3ddc2992019-04-19 21:22:59 -0500506 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400507
508 pid_list->pid_max = READ_ONCE(pid_max);
509
510 /* Only truncating will shrink pid_max */
511 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
512 pid_list->pid_max = filtered_pids->pid_max;
513
514 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
515 if (!pid_list->pids) {
Wenwen Wang3ddc2992019-04-19 21:22:59 -0500516 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400517 kfree(pid_list);
518 return -ENOMEM;
519 }
520
521 if (filtered_pids) {
522 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000523 for_each_set_bit(pid, filtered_pids->pids,
524 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400525 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400526 nr_pids++;
527 }
528 }
529
530 while (cnt > 0) {
531
532 pos = 0;
533
534 ret = trace_get_user(&parser, ubuf, cnt, &pos);
535 if (ret < 0 || !trace_parser_loaded(&parser))
536 break;
537
538 read += ret;
539 ubuf += ret;
540 cnt -= ret;
541
542 parser.buffer[parser.idx] = 0;
543
544 ret = -EINVAL;
545 if (kstrtoul(parser.buffer, 0, &val))
546 break;
547 if (val >= pid_list->pid_max)
548 break;
549
550 pid = (pid_t)val;
551
552 set_bit(pid, pid_list->pids);
553 nr_pids++;
554
555 trace_parser_clear(&parser);
556 ret = 0;
557 }
558 trace_parser_put(&parser);
559
560 if (ret < 0) {
561 trace_free_pid_list(pid_list);
562 return ret;
563 }
564
565 if (!nr_pids) {
566 /* Cleared the list of pids */
567 trace_free_pid_list(pid_list);
568 read = ret;
569 pid_list = NULL;
570 }
571
572 *new_pid_list = pid_list;
573
574 return read;
575}
576
Fabian Frederickad1438a2014-04-17 21:44:42 +0200577static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400578{
579 u64 ts;
580
581 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700582 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400583 return trace_clock_local();
584
Alexander Z Lam94571582013-08-02 18:36:16 -0700585 ts = ring_buffer_time_stamp(buf->buffer, cpu);
586 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400587
588 return ts;
589}
590
Alexander Z Lam94571582013-08-02 18:36:16 -0700591cycle_t ftrace_now(int cpu)
592{
593 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
594}
595
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400596/**
597 * tracing_is_enabled - Show if global_trace has been disabled
598 *
599 * Shows if the global trace has been enabled or not. It uses the
600 * mirror flag "buffer_disabled" to be used in fast paths such as for
601 * the irqsoff tracer. But it may be inaccurate due to races. If you
602 * need to know the accurate state, use tracing_is_on() which is a little
603 * slower, but accurate.
604 */
Steven Rostedt90369902008-11-05 16:05:44 -0500605int tracing_is_enabled(void)
606{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400607 /*
608 * For quick access (irqsoff uses this in fast path), just
609 * return the mirror variable of the state of the ring buffer.
610 * It's a little racy, but we don't really care.
611 */
612 smp_rmb();
613 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500614}
615
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617 * trace_buf_size is the size in bytes that is allocated
618 * for a buffer. Note, the number of bytes is always rounded
619 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400620 *
621 * This number is purposely set to a low number of 16384.
622 * If the dump on oops happens, it will be much appreciated
623 * to not have to wait for all that output. Anyway this can be
624 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400626#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400627
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400628static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200629
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200630/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200631static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200632
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200633/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200634 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200635 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700636DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200637
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800638/*
639 * serialize the access of the ring buffer
640 *
641 * ring buffer serializes readers, but it is low level protection.
642 * The validity of the events (which returns by ring_buffer_peek() ..etc)
643 * are not protected by ring buffer.
644 *
645 * The content of events may become garbage if we allow other process consumes
646 * these events concurrently:
647 * A) the page of the consumed events may become a normal page
648 * (not reader page) in ring buffer, and this page will be rewrited
649 * by events producer.
650 * B) The page of the consumed events may become a page for splice_read,
651 * and this page will be returned to system.
652 *
653 * These primitives allow multi process access to different cpu ring buffer
654 * concurrently.
655 *
656 * These primitives don't distinguish read-only and read-consume access.
657 * Multi read-only access are also serialized.
658 */
659
660#ifdef CONFIG_SMP
661static DECLARE_RWSEM(all_cpu_access_lock);
662static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
663
664static inline void trace_access_lock(int cpu)
665{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500666 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800667 /* gain it for accessing the whole ring buffer. */
668 down_write(&all_cpu_access_lock);
669 } else {
670 /* gain it for accessing a cpu ring buffer. */
671
Steven Rostedtae3b5092013-01-23 15:22:59 -0500672 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800673 down_read(&all_cpu_access_lock);
674
675 /* Secondly block other access to this @cpu ring buffer. */
676 mutex_lock(&per_cpu(cpu_access_lock, cpu));
677 }
678}
679
680static inline void trace_access_unlock(int cpu)
681{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500682 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800683 up_write(&all_cpu_access_lock);
684 } else {
685 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
686 up_read(&all_cpu_access_lock);
687 }
688}
689
690static inline void trace_access_lock_init(void)
691{
692 int cpu;
693
694 for_each_possible_cpu(cpu)
695 mutex_init(&per_cpu(cpu_access_lock, cpu));
696}
697
698#else
699
700static DEFINE_MUTEX(access_lock);
701
702static inline void trace_access_lock(int cpu)
703{
704 (void)cpu;
705 mutex_lock(&access_lock);
706}
707
708static inline void trace_access_unlock(int cpu)
709{
710 (void)cpu;
711 mutex_unlock(&access_lock);
712}
713
714static inline void trace_access_lock_init(void)
715{
716}
717
718#endif
719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#ifdef CONFIG_STACKTRACE
721static void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400724static inline void ftrace_trace_stack(struct trace_array *tr,
725 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400726 unsigned long flags,
727 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400728
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400729#else
730static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
731 unsigned long flags,
732 int skip, int pc, struct pt_regs *regs)
733{
734}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400735static inline void ftrace_trace_stack(struct trace_array *tr,
736 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400737 unsigned long flags,
738 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400739{
740}
741
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400742#endif
743
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400744static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400745{
746 if (tr->trace_buffer.buffer)
747 ring_buffer_record_on(tr->trace_buffer.buffer);
748 /*
749 * This flag is looked at when buffers haven't been allocated
750 * yet, or by some tracers (like irqsoff), that just want to
751 * know if the ring buffer has been disabled, but it can handle
752 * races of where it gets disabled but we still do a record.
753 * As the check is in the fast path of the tracers, it is more
754 * important to be fast than accurate.
755 */
756 tr->buffer_disabled = 0;
757 /* Make the flag seen by readers */
758 smp_wmb();
759}
760
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200761/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500762 * tracing_on - enable tracing buffers
763 *
764 * This function enables tracing buffers that may have been
765 * disabled with tracing_off.
766 */
767void tracing_on(void)
768{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400769 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500770}
771EXPORT_SYMBOL_GPL(tracing_on);
772
773/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500774 * __trace_puts - write a constant string into the trace buffer.
775 * @ip: The address of the caller
776 * @str: The constant string to write
777 * @size: The size of the string.
778 */
779int __trace_puts(unsigned long ip, const char *str, int size)
780{
781 struct ring_buffer_event *event;
782 struct ring_buffer *buffer;
783 struct print_entry *entry;
784 unsigned long irq_flags;
785 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800786 int pc;
787
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400788 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800789 return 0;
790
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800791 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500792
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500793 if (unlikely(tracing_selftest_running || tracing_disabled))
794 return 0;
795
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500796 alloc = sizeof(*entry) + size + 2; /* possible \n added */
797
798 local_save_flags(irq_flags);
799 buffer = global_trace.trace_buffer.buffer;
800 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800801 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500802 if (!event)
803 return 0;
804
805 entry = ring_buffer_event_data(event);
806 entry->ip = ip;
807
808 memcpy(&entry->buf, str, size);
809
810 /* Add a newline if necessary */
811 if (entry->buf[size - 1] != '\n') {
812 entry->buf[size] = '\n';
813 entry->buf[size + 1] = '\0';
814 } else
815 entry->buf[size] = '\0';
816
817 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400818 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500819
820 return size;
821}
822EXPORT_SYMBOL_GPL(__trace_puts);
823
824/**
825 * __trace_bputs - write the pointer to a constant string into trace buffer
826 * @ip: The address of the caller
827 * @str: The constant string to write to the buffer to
828 */
829int __trace_bputs(unsigned long ip, const char *str)
830{
831 struct ring_buffer_event *event;
832 struct ring_buffer *buffer;
833 struct bputs_entry *entry;
834 unsigned long irq_flags;
835 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800836 int pc;
837
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400838 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800839 return 0;
840
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800841 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500842
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500843 if (unlikely(tracing_selftest_running || tracing_disabled))
844 return 0;
845
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500846 local_save_flags(irq_flags);
847 buffer = global_trace.trace_buffer.buffer;
848 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800849 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500850 if (!event)
851 return 0;
852
853 entry = ring_buffer_event_data(event);
854 entry->ip = ip;
855 entry->str = str;
856
857 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400858 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500859
860 return 1;
861}
862EXPORT_SYMBOL_GPL(__trace_bputs);
863
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500864#ifdef CONFIG_TRACER_SNAPSHOT
865/**
866 * trace_snapshot - take a snapshot of the current buffer.
867 *
868 * This causes a swap between the snapshot buffer and the current live
869 * tracing buffer. You can use this to take snapshots of the live
870 * trace when some condition is triggered, but continue to trace.
871 *
872 * Note, make sure to allocate the snapshot with either
873 * a tracing_snapshot_alloc(), or by doing it manually
874 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
875 *
876 * If the snapshot buffer is not allocated, it will stop tracing.
877 * Basically making a permanent snapshot.
878 */
879void tracing_snapshot(void)
880{
881 struct trace_array *tr = &global_trace;
882 struct tracer *tracer = tr->current_trace;
883 unsigned long flags;
884
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500885 if (in_nmi()) {
886 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
887 internal_trace_puts("*** snapshot is being ignored ***\n");
888 return;
889 }
890
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500891 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500892 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
893 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500894 tracing_off();
895 return;
896 }
897
898 /* Note, snapshot can not be used when the tracer uses it */
899 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500900 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
901 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500902 return;
903 }
904
905 local_irq_save(flags);
906 update_max_tr(tr, current, smp_processor_id());
907 local_irq_restore(flags);
908}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500909EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500910
911static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
912 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400913static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
914
915static int alloc_snapshot(struct trace_array *tr)
916{
917 int ret;
918
919 if (!tr->allocated_snapshot) {
920
921 /* allocate spare buffer */
922 ret = resize_buffer_duplicate_size(&tr->max_buffer,
923 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
924 if (ret < 0)
925 return ret;
926
927 tr->allocated_snapshot = true;
928 }
929
930 return 0;
931}
932
Fabian Frederickad1438a2014-04-17 21:44:42 +0200933static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400934{
935 /*
936 * We don't free the ring buffer. instead, resize it because
937 * The max_tr ring buffer has some state (e.g. ring->clock) and
938 * we want preserve it.
939 */
940 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
941 set_buffer_entries(&tr->max_buffer, 1);
942 tracing_reset_online_cpus(&tr->max_buffer);
943 tr->allocated_snapshot = false;
944}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500945
946/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500947 * tracing_alloc_snapshot - allocate snapshot buffer.
948 *
949 * This only allocates the snapshot buffer if it isn't already
950 * allocated - it doesn't also take a snapshot.
951 *
952 * This is meant to be used in cases where the snapshot buffer needs
953 * to be set up for events that can't sleep but need to be able to
954 * trigger a snapshot.
955 */
956int tracing_alloc_snapshot(void)
957{
958 struct trace_array *tr = &global_trace;
959 int ret;
960
961 ret = alloc_snapshot(tr);
962 WARN_ON(ret < 0);
963
964 return ret;
965}
966EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
967
968/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500969 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
970 *
971 * This is similar to trace_snapshot(), but it will allocate the
972 * snapshot buffer if it isn't already allocated. Use this only
973 * where it is safe to sleep, as the allocation may sleep.
974 *
975 * This causes a swap between the snapshot buffer and the current live
976 * tracing buffer. You can use this to take snapshots of the live
977 * trace when some condition is triggered, but continue to trace.
978 */
979void tracing_snapshot_alloc(void)
980{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500981 int ret;
982
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500983 ret = tracing_alloc_snapshot();
984 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400985 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500986
987 tracing_snapshot();
988}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500989EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500990#else
991void tracing_snapshot(void)
992{
993 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
994}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500995EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500996int tracing_alloc_snapshot(void)
997{
998 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
999 return -ENODEV;
1000}
1001EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001002void tracing_snapshot_alloc(void)
1003{
1004 /* Give warning */
1005 tracing_snapshot();
1006}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001007EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001008#endif /* CONFIG_TRACER_SNAPSHOT */
1009
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -04001010static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001011{
1012 if (tr->trace_buffer.buffer)
1013 ring_buffer_record_off(tr->trace_buffer.buffer);
1014 /*
1015 * This flag is looked at when buffers haven't been allocated
1016 * yet, or by some tracers (like irqsoff), that just want to
1017 * know if the ring buffer has been disabled, but it can handle
1018 * races of where it gets disabled but we still do a record.
1019 * As the check is in the fast path of the tracers, it is more
1020 * important to be fast than accurate.
1021 */
1022 tr->buffer_disabled = 1;
1023 /* Make the flag seen by readers */
1024 smp_wmb();
1025}
1026
Steven Rostedt499e5472012-02-22 15:50:28 -05001027/**
1028 * tracing_off - turn off tracing buffers
1029 *
1030 * This function stops the tracing buffers from recording data.
1031 * It does not disable any overhead the tracers themselves may
1032 * be causing. This function simply causes all recording to
1033 * the ring buffers to fail.
1034 */
1035void tracing_off(void)
1036{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001037 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001038}
1039EXPORT_SYMBOL_GPL(tracing_off);
1040
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001041void disable_trace_on_warning(void)
1042{
1043 if (__disable_trace_on_warning)
1044 tracing_off();
1045}
1046
Steven Rostedt499e5472012-02-22 15:50:28 -05001047/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001048 * tracer_tracing_is_on - show real state of ring buffer enabled
1049 * @tr : the trace array to know if ring buffer is enabled
1050 *
1051 * Shows real state of the ring buffer if it is enabled or not.
1052 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001053int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001054{
1055 if (tr->trace_buffer.buffer)
1056 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1057 return !tr->buffer_disabled;
1058}
1059
Steven Rostedt499e5472012-02-22 15:50:28 -05001060/**
1061 * tracing_is_on - show state of ring buffers enabled
1062 */
1063int tracing_is_on(void)
1064{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001065 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001066}
1067EXPORT_SYMBOL_GPL(tracing_is_on);
1068
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001070{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001071 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001072
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073 if (!str)
1074 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001075 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001076 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001077 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001078 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001079 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001080 return 1;
1081}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001082__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001083
Tim Bird0e950172010-02-25 15:36:43 -08001084static int __init set_tracing_thresh(char *str)
1085{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001086 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001087 int ret;
1088
1089 if (!str)
1090 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001091 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001092 if (ret < 0)
1093 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001094 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001095 return 1;
1096}
1097__setup("tracing_thresh=", set_tracing_thresh);
1098
Steven Rostedt57f50be2008-05-12 21:20:44 +02001099unsigned long nsecs_to_usecs(unsigned long nsecs)
1100{
1101 return nsecs / 1000;
1102}
1103
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001104/*
1105 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1106 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1107 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1108 * of strings in the order that the enums were defined.
1109 */
1110#undef C
1111#define C(a, b) b
1112
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001114static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001115 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001116 NULL
1117};
1118
Zhaolei5079f322009-08-25 16:12:56 +08001119static struct {
1120 u64 (*func)(void);
1121 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001122 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001123} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001124 { trace_clock_local, "local", 1 },
1125 { trace_clock_global, "global", 1 },
1126 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001127 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001128 { trace_clock, "perf", 1 },
1129 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001130 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Joel Fernandes59cbbe32016-11-28 14:35:23 -08001131 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001132 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001133};
1134
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001135/*
1136 * trace_parser_get_init - gets the buffer for trace parser
1137 */
1138int trace_parser_get_init(struct trace_parser *parser, int size)
1139{
1140 memset(parser, 0, sizeof(*parser));
1141
1142 parser->buffer = kmalloc(size, GFP_KERNEL);
1143 if (!parser->buffer)
1144 return 1;
1145
1146 parser->size = size;
1147 return 0;
1148}
1149
1150/*
1151 * trace_parser_put - frees the buffer for trace parser
1152 */
1153void trace_parser_put(struct trace_parser *parser)
1154{
1155 kfree(parser->buffer);
1156}
1157
1158/*
1159 * trace_get_user - reads the user input string separated by space
1160 * (matched by isspace(ch))
1161 *
1162 * For each string found the 'struct trace_parser' is updated,
1163 * and the function returns.
1164 *
1165 * Returns number of bytes read.
1166 *
1167 * See kernel/trace/trace.h for 'struct trace_parser' details.
1168 */
1169int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1170 size_t cnt, loff_t *ppos)
1171{
1172 char ch;
1173 size_t read = 0;
1174 ssize_t ret;
1175
1176 if (!*ppos)
1177 trace_parser_clear(parser);
1178
1179 ret = get_user(ch, ubuf++);
1180 if (ret)
1181 goto out;
1182
1183 read++;
1184 cnt--;
1185
1186 /*
1187 * The parser is not finished with the last write,
1188 * continue reading the user input without skipping spaces.
1189 */
1190 if (!parser->cont) {
1191 /* skip white space */
1192 while (cnt && isspace(ch)) {
1193 ret = get_user(ch, ubuf++);
1194 if (ret)
1195 goto out;
1196 read++;
1197 cnt--;
1198 }
1199
1200 /* only spaces were written */
1201 if (isspace(ch)) {
1202 *ppos += read;
1203 ret = read;
1204 goto out;
1205 }
1206
1207 parser->idx = 0;
1208 }
1209
1210 /* read the non-space input */
1211 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001212 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001213 parser->buffer[parser->idx++] = ch;
1214 else {
1215 ret = -EINVAL;
1216 goto out;
1217 }
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221 read++;
1222 cnt--;
1223 }
1224
1225 /* We either got finished input or we have to wait for another call. */
1226 if (isspace(ch)) {
1227 parser->buffer[parser->idx] = 0;
1228 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001229 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001230 parser->cont = true;
1231 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001232 } else {
1233 ret = -EINVAL;
1234 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001235 }
1236
1237 *ppos += read;
1238 ret = read;
1239
1240out:
1241 return ret;
1242}
1243
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001244/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001245static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001246{
1247 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001248
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001249 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001250 return -EBUSY;
1251
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001252 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001253 if (cnt > len)
1254 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001255 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001256
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001257 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001258 return cnt;
1259}
1260
Tim Bird0e950172010-02-25 15:36:43 -08001261unsigned long __read_mostly tracing_thresh;
1262
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001263#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001264/*
1265 * Copy the new maximum trace into the separate maximum-trace
1266 * structure. (this way the maximum trace is permanently saved,
1267 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1268 */
1269static void
1270__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1271{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001272 struct trace_buffer *trace_buf = &tr->trace_buffer;
1273 struct trace_buffer *max_buf = &tr->max_buffer;
1274 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1275 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001276
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001277 max_buf->cpu = cpu;
1278 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001279
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001280 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001281 max_data->critical_start = data->critical_start;
1282 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001283
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001284 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001285 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001286 /*
1287 * If tsk == current, then use current_uid(), as that does not use
1288 * RCU. The irq tracer can be called out of RCU scope.
1289 */
1290 if (tsk == current)
1291 max_data->uid = current_uid();
1292 else
1293 max_data->uid = task_uid(tsk);
1294
Steven Rostedt8248ac02009-09-02 12:27:41 -04001295 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1296 max_data->policy = tsk->policy;
1297 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001298
1299 /* record this tasks comm */
1300 tracing_record_cmdline(tsk);
1301}
1302
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001303/**
1304 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1305 * @tr: tracer
1306 * @tsk: the task with the latency
1307 * @cpu: The cpu that initiated the trace.
1308 *
1309 * Flip the buffers between the @tr and the max_tr and record information
1310 * about which task was the cause of this latency.
1311 */
Ingo Molnare309b412008-05-12 21:20:51 +02001312void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001313update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1314{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001315 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001316
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001317 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001318 return;
1319
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001320 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001321
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001322 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001323 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001324 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001325 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001326 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001327
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001328 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001329
Masami Hiramatsua26030a2018-07-14 01:28:15 +09001330 /* Inherit the recordable setting from trace_buffer */
1331 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1332 ring_buffer_record_on(tr->max_buffer.buffer);
1333 else
1334 ring_buffer_record_off(tr->max_buffer.buffer);
1335
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001336 buf = tr->trace_buffer.buffer;
1337 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1338 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001339
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001340 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001341 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001342}
1343
1344/**
1345 * update_max_tr_single - only copy one trace over, and reset the rest
1346 * @tr - tracer
1347 * @tsk - task with the latency
1348 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001349 *
1350 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001351 */
Ingo Molnare309b412008-05-12 21:20:51 +02001352void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001353update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1354{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001355 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001356
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001357 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001358 return;
1359
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001360 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001361 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001362 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001364 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001365 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001366
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001367 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001368
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001369 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001370
Steven Rostedte8165db2009-09-03 19:13:05 -04001371 if (ret == -EBUSY) {
1372 /*
1373 * We failed to swap the buffer due to a commit taking
1374 * place on this CPU. We fail to record, but we reset
1375 * the max trace buffer (no one writes directly to it)
1376 * and flag that it failed.
1377 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001378 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001379 "Failed to swap buffers due to commit in progress\n");
1380 }
1381
Steven Rostedte8165db2009-09-03 19:13:05 -04001382 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001383
1384 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001385 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001386}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001387#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001388
Rabin Vincente30f53a2014-11-10 19:46:34 +01001389static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001390{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001391 /* Iterators are static, they should be filled or empty */
1392 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001393 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001394
Rabin Vincente30f53a2014-11-10 19:46:34 +01001395 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1396 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001397}
1398
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001399#ifdef CONFIG_FTRACE_STARTUP_TEST
1400static int run_tracer_selftest(struct tracer *type)
1401{
1402 struct trace_array *tr = &global_trace;
1403 struct tracer *saved_tracer = tr->current_trace;
1404 int ret;
1405
1406 if (!type->selftest || tracing_selftest_disabled)
1407 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001408
1409 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001410 * Run a selftest on this tracer.
1411 * Here we reset the trace buffer, and set the current
1412 * tracer to be this tracer. The tracer can then run some
1413 * internal tracing to verify that everything is in order.
1414 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001415 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001416 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001417
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001418 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001419
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001420#ifdef CONFIG_TRACER_MAX_TRACE
1421 if (type->use_max_tr) {
1422 /* If we expanded the buffers, make sure the max is expanded too */
1423 if (ring_buffer_expanded)
1424 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1425 RING_BUFFER_ALL_CPUS);
1426 tr->allocated_snapshot = true;
1427 }
1428#endif
1429
1430 /* the test is responsible for initializing and enabling */
1431 pr_info("Testing tracer %s: ", type->name);
1432 ret = type->selftest(type, tr);
1433 /* the test is responsible for resetting too */
1434 tr->current_trace = saved_tracer;
1435 if (ret) {
1436 printk(KERN_CONT "FAILED!\n");
1437 /* Add the warning after printing 'FAILED' */
1438 WARN_ON(1);
1439 return -1;
1440 }
1441 /* Only reset on passing, to avoid touching corrupted buffers */
1442 tracing_reset_online_cpus(&tr->trace_buffer);
1443
1444#ifdef CONFIG_TRACER_MAX_TRACE
1445 if (type->use_max_tr) {
1446 tr->allocated_snapshot = false;
1447
1448 /* Shrink the max buffer again */
1449 if (ring_buffer_expanded)
1450 ring_buffer_resize(tr->max_buffer.buffer, 1,
1451 RING_BUFFER_ALL_CPUS);
1452 }
1453#endif
1454
1455 printk(KERN_CONT "PASSED\n");
1456 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001457}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001458#else
1459static inline int run_tracer_selftest(struct tracer *type)
1460{
1461 return 0;
1462}
1463#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001464
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001465static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1466
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001467static void __init apply_trace_boot_options(void);
1468
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001469/**
1470 * register_tracer - register a tracer with the ftrace system.
1471 * @type - the plugin for the tracer
1472 *
1473 * Register a new plugin tracer.
1474 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001475int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001476{
1477 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001478 int ret = 0;
1479
1480 if (!type->name) {
1481 pr_info("Tracer must have a name\n");
1482 return -1;
1483 }
1484
Dan Carpenter24a461d2010-07-10 12:06:44 +02001485 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001486 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1487 return -1;
1488 }
1489
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001490 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001491
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001492 tracing_selftest_running = true;
1493
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001494 for (t = trace_types; t; t = t->next) {
1495 if (strcmp(type->name, t->name) == 0) {
1496 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001497 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001498 type->name);
1499 ret = -1;
1500 goto out;
1501 }
1502 }
1503
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001504 if (!type->set_flag)
1505 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001506 if (!type->flags) {
1507 /*allocate a dummy tracer_flags*/
1508 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001509 if (!type->flags) {
1510 ret = -ENOMEM;
1511 goto out;
1512 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001513 type->flags->val = 0;
1514 type->flags->opts = dummy_tracer_opt;
1515 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001516 if (!type->flags->opts)
1517 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001518
Chunyu Hud39cdd22016-03-08 21:37:01 +08001519 /* store the tracer for __set_tracer_option */
1520 type->flags->trace = type;
1521
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001522 ret = run_tracer_selftest(type);
1523 if (ret < 0)
1524 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001525
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526 type->next = trace_types;
1527 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001528 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001529
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001530 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001531 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001532 mutex_unlock(&trace_types_lock);
1533
Steven Rostedtdac74942009-02-05 01:13:38 -05001534 if (ret || !default_bootup_tracer)
1535 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001536
Li Zefanee6c2c12009-09-18 14:06:47 +08001537 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001538 goto out_unlock;
1539
1540 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1541 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001542 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001543 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001544
1545 apply_trace_boot_options();
1546
Steven Rostedtdac74942009-02-05 01:13:38 -05001547 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001548 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001549#ifdef CONFIG_FTRACE_STARTUP_TEST
1550 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1551 type->name);
1552#endif
1553
1554 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001555 return ret;
1556}
1557
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001558void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001559{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001560 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001561
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001562 if (!buffer)
1563 return;
1564
Steven Rostedtf6339032009-09-04 12:35:16 -04001565 ring_buffer_record_disable(buffer);
1566
1567 /* Make sure all commits have finished */
1568 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001569 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001570
1571 ring_buffer_record_enable(buffer);
1572}
1573
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001574void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001575{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001576 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001577 int cpu;
1578
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001579 if (!buffer)
1580 return;
1581
Steven Rostedt621968c2009-09-04 12:02:35 -04001582 ring_buffer_record_disable(buffer);
1583
1584 /* Make sure all commits have finished */
1585 synchronize_sched();
1586
Alexander Z Lam94571582013-08-02 18:36:16 -07001587 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001588
1589 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001590 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001591
1592 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001593}
1594
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001595/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001596void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001597{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001598 struct trace_array *tr;
1599
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001600 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001601 tracing_reset_online_cpus(&tr->trace_buffer);
1602#ifdef CONFIG_TRACER_MAX_TRACE
1603 tracing_reset_online_cpus(&tr->max_buffer);
1604#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001605 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001606}
1607
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001608#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001609#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001610static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001611struct saved_cmdlines_buffer {
1612 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1613 unsigned *map_cmdline_to_pid;
Adrian Salidoa06ea262017-04-18 11:44:33 -07001614 unsigned *map_cmdline_to_tgid;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001615 unsigned cmdline_num;
1616 int cmdline_idx;
1617 char *saved_cmdlines;
1618};
1619static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001620
Steven Rostedt25b0b442008-05-12 21:21:00 +02001621/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001622static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001623
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001624static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001625{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001626 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1627}
1628
1629static inline void set_cmdline(int idx, const char *cmdline)
1630{
1631 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1632}
1633
1634static int allocate_cmdlines_buffer(unsigned int val,
1635 struct saved_cmdlines_buffer *s)
1636{
1637 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1638 GFP_KERNEL);
1639 if (!s->map_cmdline_to_pid)
1640 return -ENOMEM;
1641
1642 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1643 if (!s->saved_cmdlines) {
1644 kfree(s->map_cmdline_to_pid);
1645 return -ENOMEM;
1646 }
1647
Adrian Salidoa06ea262017-04-18 11:44:33 -07001648 s->map_cmdline_to_tgid = kmalloc_array(val,
1649 sizeof(*s->map_cmdline_to_tgid),
1650 GFP_KERNEL);
1651 if (!s->map_cmdline_to_tgid) {
1652 kfree(s->map_cmdline_to_pid);
1653 kfree(s->saved_cmdlines);
1654 return -ENOMEM;
1655 }
1656
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001657 s->cmdline_idx = 0;
1658 s->cmdline_num = val;
1659 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1660 sizeof(s->map_pid_to_cmdline));
1661 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1662 val * sizeof(*s->map_cmdline_to_pid));
Adrian Salidoa06ea262017-04-18 11:44:33 -07001663 memset(s->map_cmdline_to_tgid, NO_CMDLINE_MAP,
1664 val * sizeof(*s->map_cmdline_to_tgid));
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001665
1666 return 0;
1667}
1668
1669static int trace_create_savedcmd(void)
1670{
1671 int ret;
1672
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001673 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001674 if (!savedcmd)
1675 return -ENOMEM;
1676
1677 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1678 if (ret < 0) {
1679 kfree(savedcmd);
1680 savedcmd = NULL;
1681 return -ENOMEM;
1682 }
1683
1684 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001685}
1686
Carsten Emdeb5130b12009-09-13 01:43:07 +02001687int is_tracing_stopped(void)
1688{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001689 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001690}
1691
Steven Rostedt0f048702008-11-05 16:05:44 -05001692/**
1693 * tracing_start - quick start of the tracer
1694 *
1695 * If tracing is enabled but was stopped by tracing_stop,
1696 * this will start the tracer back up.
1697 */
1698void tracing_start(void)
1699{
1700 struct ring_buffer *buffer;
1701 unsigned long flags;
1702
1703 if (tracing_disabled)
1704 return;
1705
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001706 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1707 if (--global_trace.stop_count) {
1708 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001709 /* Someone screwed up their debugging */
1710 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001711 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001712 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001713 goto out;
1714 }
1715
Steven Rostedta2f80712010-03-12 19:56:00 -05001716 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001717 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001718
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001719 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001720 if (buffer)
1721 ring_buffer_record_enable(buffer);
1722
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001723#ifdef CONFIG_TRACER_MAX_TRACE
1724 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001725 if (buffer)
1726 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001727#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001728
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001729 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001730
Steven Rostedt0f048702008-11-05 16:05:44 -05001731 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001732 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1733}
1734
1735static void tracing_start_tr(struct trace_array *tr)
1736{
1737 struct ring_buffer *buffer;
1738 unsigned long flags;
1739
1740 if (tracing_disabled)
1741 return;
1742
1743 /* If global, we need to also start the max tracer */
1744 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1745 return tracing_start();
1746
1747 raw_spin_lock_irqsave(&tr->start_lock, flags);
1748
1749 if (--tr->stop_count) {
1750 if (tr->stop_count < 0) {
1751 /* Someone screwed up their debugging */
1752 WARN_ON_ONCE(1);
1753 tr->stop_count = 0;
1754 }
1755 goto out;
1756 }
1757
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001758 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001759 if (buffer)
1760 ring_buffer_record_enable(buffer);
1761
1762 out:
1763 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001764}
1765
1766/**
1767 * tracing_stop - quick stop of the tracer
1768 *
1769 * Light weight way to stop tracing. Use in conjunction with
1770 * tracing_start.
1771 */
1772void tracing_stop(void)
1773{
1774 struct ring_buffer *buffer;
1775 unsigned long flags;
1776
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001777 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1778 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001779 goto out;
1780
Steven Rostedta2f80712010-03-12 19:56:00 -05001781 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001782 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001783
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001784 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001785 if (buffer)
1786 ring_buffer_record_disable(buffer);
1787
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001788#ifdef CONFIG_TRACER_MAX_TRACE
1789 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001790 if (buffer)
1791 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001792#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001793
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001794 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001795
Steven Rostedt0f048702008-11-05 16:05:44 -05001796 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001797 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1798}
1799
1800static void tracing_stop_tr(struct trace_array *tr)
1801{
1802 struct ring_buffer *buffer;
1803 unsigned long flags;
1804
1805 /* If global, we need to also stop the max tracer */
1806 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1807 return tracing_stop();
1808
1809 raw_spin_lock_irqsave(&tr->start_lock, flags);
1810 if (tr->stop_count++)
1811 goto out;
1812
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001813 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001814 if (buffer)
1815 ring_buffer_record_disable(buffer);
1816
1817 out:
1818 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001819}
1820
Ingo Molnare309b412008-05-12 21:20:51 +02001821void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001822
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001823static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001824{
Carsten Emdea635cf02009-03-18 09:00:41 +01001825 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001826
1827 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001828 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001829
Adrian Salidoa06ea262017-04-18 11:44:33 -07001830 preempt_disable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001831 /*
1832 * It's not the end of the world if we don't get
1833 * the lock, but we also don't want to spin
1834 * nor do we want to disable interrupts,
1835 * so if we miss here, then better luck next time.
1836 */
Adrian Salidoa06ea262017-04-18 11:44:33 -07001837 if (!arch_spin_trylock(&trace_cmdline_lock)) {
1838 preempt_enable();
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001839 return 0;
Adrian Salidoa06ea262017-04-18 11:44:33 -07001840 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001841
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001842 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001843 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001844 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001845
Carsten Emdea635cf02009-03-18 09:00:41 +01001846 /*
1847 * Check whether the cmdline buffer at idx has a pid
1848 * mapped. We are going to overwrite that entry so we
1849 * need to clear the map_pid_to_cmdline. Otherwise we
1850 * would read the new comm for the old pid.
1851 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001852 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001853 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001854 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001855
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001856 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1857 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001858
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001859 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001860 }
1861
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001862 set_cmdline(idx, tsk->comm);
Adrian Salidoa06ea262017-04-18 11:44:33 -07001863 savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001864 arch_spin_unlock(&trace_cmdline_lock);
Adrian Salidoa06ea262017-04-18 11:44:33 -07001865 preempt_enable();
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001866
1867 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001868}
1869
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001870static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001871{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001872 unsigned map;
1873
Steven Rostedt4ca530852009-03-16 19:20:15 -04001874 if (!pid) {
1875 strcpy(comm, "<idle>");
1876 return;
1877 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001878
Steven Rostedt74bf4072010-01-25 15:11:53 -05001879 if (WARN_ON_ONCE(pid < 0)) {
1880 strcpy(comm, "<XXX>");
1881 return;
1882 }
1883
Steven Rostedt4ca530852009-03-16 19:20:15 -04001884 if (pid > PID_MAX_DEFAULT) {
1885 strcpy(comm, "<...>");
1886 return;
1887 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001888
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001889 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001890 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001891 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001892 else
1893 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001894}
1895
1896void trace_find_cmdline(int pid, char comm[])
1897{
1898 preempt_disable();
1899 arch_spin_lock(&trace_cmdline_lock);
1900
1901 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001902
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001903 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001904 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001905}
1906
Adrian Salidoa06ea262017-04-18 11:44:33 -07001907static int __find_tgid_locked(int pid)
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08001908{
1909 unsigned map;
1910 int tgid;
1911
Dmitry Shmidtb96956e2015-10-28 10:45:04 -07001912 map = savedcmd->map_pid_to_cmdline[pid];
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08001913 if (map != NO_CMDLINE_MAP)
Adrian Salidoa06ea262017-04-18 11:44:33 -07001914 tgid = savedcmd->map_cmdline_to_tgid[map];
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08001915 else
1916 tgid = -1;
1917
Adrian Salidoa06ea262017-04-18 11:44:33 -07001918 return tgid;
1919}
1920
1921int trace_find_tgid(int pid)
1922{
1923 int tgid;
1924
1925 preempt_disable();
1926 arch_spin_lock(&trace_cmdline_lock);
1927
1928 tgid = __find_tgid_locked(pid);
1929
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08001930 arch_spin_unlock(&trace_cmdline_lock);
1931 preempt_enable();
1932
1933 return tgid;
1934}
1935
Ingo Molnare309b412008-05-12 21:20:51 +02001936void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001937{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001938 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001939 return;
1940
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001941 if (!__this_cpu_read(trace_cmdline_save))
1942 return;
1943
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001944 if (trace_save_cmdline(tsk))
1945 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001946}
1947
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001948void
Steven Rostedt38697052008-10-01 13:14:09 -04001949tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1950 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001951{
1952 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001953
Steven Rostedt777e2082008-09-29 23:02:42 -04001954 entry->preempt_count = pc & 0xff;
1955 entry->pid = (tsk) ? tsk->pid : 0;
1956 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001957#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001958 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001959#else
1960 TRACE_FLAG_IRQS_NOSUPPORT |
1961#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01001962 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001963 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondeti04e002a2016-12-09 21:50:17 +05301964 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001965 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1966 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001967}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001968EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001969
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001970static __always_inline void
1971trace_event_setup(struct ring_buffer_event *event,
1972 int type, unsigned long flags, int pc)
1973{
1974 struct trace_entry *ent = ring_buffer_event_data(event);
1975
1976 tracing_generic_entry_update(ent, flags, pc);
1977 ent->type = type;
1978}
1979
Steven Rostedte77405a2009-09-02 14:17:06 -04001980struct ring_buffer_event *
1981trace_buffer_lock_reserve(struct ring_buffer *buffer,
1982 int type,
1983 unsigned long len,
1984 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001985{
1986 struct ring_buffer_event *event;
1987
Steven Rostedte77405a2009-09-02 14:17:06 -04001988 event = ring_buffer_lock_reserve(buffer, len);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001989 if (event != NULL)
1990 trace_event_setup(event, type, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001991
1992 return event;
1993}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001994
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001995DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1996DEFINE_PER_CPU(int, trace_buffered_event_cnt);
1997static int trace_buffered_event_ref;
1998
1999/**
2000 * trace_buffered_event_enable - enable buffering events
2001 *
2002 * When events are being filtered, it is quicker to use a temporary
2003 * buffer to write the event data into if there's a likely chance
2004 * that it will not be committed. The discard of the ring buffer
2005 * is not as fast as committing, and is much slower than copying
2006 * a commit.
2007 *
2008 * When an event is to be filtered, allocate per cpu buffers to
2009 * write the event data into, and if the event is filtered and discarded
2010 * it is simply dropped, otherwise, the entire data is to be committed
2011 * in one shot.
2012 */
2013void trace_buffered_event_enable(void)
2014{
2015 struct ring_buffer_event *event;
2016 struct page *page;
2017 int cpu;
2018
2019 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2020
2021 if (trace_buffered_event_ref++)
2022 return;
2023
2024 for_each_tracing_cpu(cpu) {
2025 page = alloc_pages_node(cpu_to_node(cpu),
2026 GFP_KERNEL | __GFP_NORETRY, 0);
2027 if (!page)
2028 goto failed;
2029
2030 event = page_address(page);
2031 memset(event, 0, sizeof(*event));
2032
2033 per_cpu(trace_buffered_event, cpu) = event;
2034
2035 preempt_disable();
2036 if (cpu == smp_processor_id() &&
2037 this_cpu_read(trace_buffered_event) !=
2038 per_cpu(trace_buffered_event, cpu))
2039 WARN_ON_ONCE(1);
2040 preempt_enable();
2041 }
2042
2043 return;
2044 failed:
2045 trace_buffered_event_disable();
2046}
2047
2048static void enable_trace_buffered_event(void *data)
2049{
2050 /* Probably not needed, but do it anyway */
2051 smp_rmb();
2052 this_cpu_dec(trace_buffered_event_cnt);
2053}
2054
2055static void disable_trace_buffered_event(void *data)
2056{
2057 this_cpu_inc(trace_buffered_event_cnt);
2058}
2059
2060/**
2061 * trace_buffered_event_disable - disable buffering events
2062 *
2063 * When a filter is removed, it is faster to not use the buffered
2064 * events, and to commit directly into the ring buffer. Free up
2065 * the temp buffers when there are no more users. This requires
2066 * special synchronization with current events.
2067 */
2068void trace_buffered_event_disable(void)
2069{
2070 int cpu;
2071
2072 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2073
2074 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2075 return;
2076
2077 if (--trace_buffered_event_ref)
2078 return;
2079
2080 preempt_disable();
2081 /* For each CPU, set the buffer as used. */
2082 smp_call_function_many(tracing_buffer_mask,
2083 disable_trace_buffered_event, NULL, 1);
2084 preempt_enable();
2085
2086 /* Wait for all current users to finish */
2087 synchronize_sched();
2088
2089 for_each_tracing_cpu(cpu) {
2090 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2091 per_cpu(trace_buffered_event, cpu) = NULL;
2092 }
2093 /*
2094 * Make sure trace_buffered_event is NULL before clearing
2095 * trace_buffered_event_cnt.
2096 */
2097 smp_wmb();
2098
2099 preempt_disable();
2100 /* Do the work on each cpu */
2101 smp_call_function_many(tracing_buffer_mask,
2102 enable_trace_buffered_event, NULL, 1);
2103 preempt_enable();
2104}
2105
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002106void
2107__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
2108{
2109 __this_cpu_write(trace_cmdline_save, true);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002110
2111 /* If this is the temp buffer, we need to commit fully */
2112 if (this_cpu_read(trace_buffered_event) == event) {
2113 /* Length is in event->array[0] */
2114 ring_buffer_write(buffer, event->array[0], &event->array[1]);
2115 /* Release the temp buffer */
2116 this_cpu_dec(trace_buffered_event_cnt);
2117 } else
2118 ring_buffer_unlock_commit(buffer, event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002119}
2120
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002121static struct ring_buffer *temp_buffer;
2122
Steven Rostedtef5580d2009-02-27 19:38:04 -05002123struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002124trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002125 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002126 int type, unsigned long len,
2127 unsigned long flags, int pc)
2128{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002129 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002130 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002131
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002132 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002133
2134 if ((trace_file->flags &
2135 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2136 (entry = this_cpu_read(trace_buffered_event))) {
2137 /* Try to use the per cpu buffer first */
2138 val = this_cpu_inc_return(trace_buffered_event_cnt);
2139 if (val == 1) {
2140 trace_event_setup(entry, type, flags, pc);
2141 entry->array[0] = len;
2142 return entry;
2143 }
2144 this_cpu_dec(trace_buffered_event_cnt);
2145 }
2146
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002147 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002148 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002149 /*
2150 * If tracing is off, but we have triggers enabled
2151 * we still need to look at the event data. Use the temp_buffer
2152 * to store the trace event for the tigger to use. It's recusive
2153 * safe and will not be recorded anywhere.
2154 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002155 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002156 *current_rb = temp_buffer;
2157 entry = trace_buffer_lock_reserve(*current_rb,
2158 type, len, flags, pc);
2159 }
2160 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002161}
2162EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2163
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002164void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2165 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002166 struct ring_buffer_event *event,
2167 unsigned long flags, int pc,
2168 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002169{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002170 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002171
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002172 /*
2173 * If regs is not set, then skip the following callers:
2174 * trace_buffer_unlock_commit_regs
2175 * event_trigger_unlock_commit
2176 * trace_event_buffer_commit
2177 * trace_event_raw_event_sched_switch
2178 * Note, we can still get here via blktrace, wakeup tracer
2179 * and mmiotrace, but that's ok if they lose a function or
2180 * two. They are that meaningful.
2181 */
2182 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002183 ftrace_trace_userstack(buffer, flags, pc);
2184}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002185
Ingo Molnare309b412008-05-12 21:20:51 +02002186void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002187trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002188 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2189 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002190{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002191 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002192 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002193 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002194 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002195
Steven Rostedte77405a2009-09-02 14:17:06 -04002196 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002197 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002198 if (!event)
2199 return;
2200 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002201 entry->ip = ip;
2202 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002203
Tom Zanussif306cc82013-10-24 08:34:17 -05002204 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002205 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002206}
2207
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002208#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002209
2210#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2211struct ftrace_stack {
2212 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2213};
2214
2215static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2216static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2217
Steven Rostedte77405a2009-09-02 14:17:06 -04002218static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002219 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002220 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002221{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002222 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002223 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002224 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002225 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002226 int use_stack;
2227 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002228
2229 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002230 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002231
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002232 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002233 * Add two, for this function and the call to save_stack_trace()
2234 * If regs is set, then these functions will not be in the way.
2235 */
2236 if (!regs)
2237 trace.skip += 2;
2238
2239 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002240 * Since events can happen in NMIs there's no safe way to
2241 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2242 * or NMI comes in, it will just have to use the default
2243 * FTRACE_STACK_SIZE.
2244 */
2245 preempt_disable_notrace();
2246
Shan Wei82146522012-11-19 13:21:01 +08002247 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002248 /*
2249 * We don't need any atomic variables, just a barrier.
2250 * If an interrupt comes in, we don't care, because it would
2251 * have exited and put the counter back to what we want.
2252 * We just need a barrier to keep gcc from moving things
2253 * around.
2254 */
2255 barrier();
2256 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002257 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002258 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2259
2260 if (regs)
2261 save_stack_trace_regs(regs, &trace);
2262 else
2263 save_stack_trace(&trace);
2264
2265 if (trace.nr_entries > size)
2266 size = trace.nr_entries;
2267 } else
2268 /* From now on, use_stack is a boolean */
2269 use_stack = 0;
2270
2271 size *= sizeof(unsigned long);
2272
2273 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
2274 sizeof(*entry) + size, flags, pc);
2275 if (!event)
2276 goto out;
2277 entry = ring_buffer_event_data(event);
2278
2279 memset(&entry->caller, 0, size);
2280
2281 if (use_stack)
2282 memcpy(&entry->caller, trace.entries,
2283 trace.nr_entries * sizeof(unsigned long));
2284 else {
2285 trace.max_entries = FTRACE_STACK_ENTRIES;
2286 trace.entries = entry->caller;
2287 if (regs)
2288 save_stack_trace_regs(regs, &trace);
2289 else
2290 save_stack_trace(&trace);
2291 }
2292
2293 entry->size = trace.nr_entries;
2294
Tom Zanussif306cc82013-10-24 08:34:17 -05002295 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002296 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002297
2298 out:
2299 /* Again, don't let gcc optimize things here */
2300 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002301 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002302 preempt_enable_notrace();
2303
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002304}
2305
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002306static inline void ftrace_trace_stack(struct trace_array *tr,
2307 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002308 unsigned long flags,
2309 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002310{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002311 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002312 return;
2313
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002314 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002315}
2316
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002317void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2318 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002319{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002320 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04002321}
2322
Steven Rostedt03889382009-12-11 09:48:22 -05002323/**
2324 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002325 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002326 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002327void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002328{
2329 unsigned long flags;
2330
2331 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002332 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002333
2334 local_save_flags(flags);
2335
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002336 /*
2337 * Skip 3 more, seems to get us at the caller of
2338 * this function.
2339 */
2340 skip += 3;
2341 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2342 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002343}
2344
Steven Rostedt91e86e52010-11-10 12:56:12 +01002345static DEFINE_PER_CPU(int, user_stack_count);
2346
Steven Rostedte77405a2009-09-02 14:17:06 -04002347void
2348ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002349{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002350 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002351 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002352 struct userstack_entry *entry;
2353 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002354
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002355 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002356 return;
2357
Steven Rostedtb6345872010-03-12 20:03:30 -05002358 /*
2359 * NMIs can not handle page faults, even with fix ups.
2360 * The save user stack can (and often does) fault.
2361 */
2362 if (unlikely(in_nmi()))
2363 return;
2364
Steven Rostedt91e86e52010-11-10 12:56:12 +01002365 /*
2366 * prevent recursion, since the user stack tracing may
2367 * trigger other kernel events.
2368 */
2369 preempt_disable();
2370 if (__this_cpu_read(user_stack_count))
2371 goto out;
2372
2373 __this_cpu_inc(user_stack_count);
2374
Steven Rostedte77405a2009-09-02 14:17:06 -04002375 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002376 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002377 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002378 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002379 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002380
Steven Rostedt48659d32009-09-11 11:36:23 -04002381 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002382 memset(&entry->caller, 0, sizeof(entry->caller));
2383
2384 trace.nr_entries = 0;
2385 trace.max_entries = FTRACE_STACK_ENTRIES;
2386 trace.skip = 0;
2387 trace.entries = entry->caller;
2388
2389 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002390 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002391 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002392
Li Zefan1dbd1952010-12-09 15:47:56 +08002393 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002394 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002395 out:
2396 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002397}
2398
Hannes Eder4fd27352009-02-10 19:44:12 +01002399#ifdef UNUSED
2400static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002401{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002402 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002403}
Hannes Eder4fd27352009-02-10 19:44:12 +01002404#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002405
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002406#endif /* CONFIG_STACKTRACE */
2407
Steven Rostedt07d777f2011-09-22 14:01:55 -04002408/* created for use with alloc_percpu */
2409struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002410 int nesting;
2411 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002412};
2413
2414static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002415
2416/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002417 * Thise allows for lockless recording. If we're nested too deeply, then
2418 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002419 */
2420static char *get_trace_buf(void)
2421{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002422 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002423
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002424 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002425 return NULL;
2426
Steven Rostedt (VMware)96cf9182017-09-05 11:32:01 -04002427 buffer->nesting++;
2428
2429 /* Interrupts must see nesting incremented before we use the buffer */
2430 barrier();
2431 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002432}
2433
2434static void put_trace_buf(void)
2435{
Steven Rostedt (VMware)96cf9182017-09-05 11:32:01 -04002436 /* Don't let the decrement of nesting leak before this */
2437 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002438 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002439}
2440
2441static int alloc_percpu_trace_buffer(void)
2442{
2443 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002444
2445 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002446 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2447 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002448
2449 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002450 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002451}
2452
Steven Rostedt81698832012-10-11 10:15:05 -04002453static int buffers_allocated;
2454
Steven Rostedt07d777f2011-09-22 14:01:55 -04002455void trace_printk_init_buffers(void)
2456{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002457 if (buffers_allocated)
2458 return;
2459
2460 if (alloc_percpu_trace_buffer())
2461 return;
2462
Steven Rostedt2184db42014-05-28 13:14:40 -04002463 /* trace_printk() is for debug use only. Don't use it in production. */
2464
Joe Perchesa395d6a2016-03-22 14:28:09 -07002465 pr_warn("\n");
2466 pr_warn("**********************************************************\n");
2467 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2468 pr_warn("** **\n");
2469 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2470 pr_warn("** **\n");
2471 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2472 pr_warn("** unsafe for production use. **\n");
2473 pr_warn("** **\n");
2474 pr_warn("** If you see this message and you are not debugging **\n");
2475 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2476 pr_warn("** **\n");
2477 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2478 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002479
Steven Rostedtb382ede62012-10-10 21:44:34 -04002480 /* Expand the buffers to set size */
2481 tracing_update_buffers();
2482
Steven Rostedt07d777f2011-09-22 14:01:55 -04002483 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002484
2485 /*
2486 * trace_printk_init_buffers() can be called by modules.
2487 * If that happens, then we need to start cmdline recording
2488 * directly here. If the global_trace.buffer is already
2489 * allocated here, then this was called by module code.
2490 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002491 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002492 tracing_start_cmdline_record();
2493}
2494
2495void trace_printk_start_comm(void)
2496{
2497 /* Start tracing comms if trace printk is set */
2498 if (!buffers_allocated)
2499 return;
2500 tracing_start_cmdline_record();
2501}
2502
2503static void trace_printk_start_stop_comm(int enabled)
2504{
2505 if (!buffers_allocated)
2506 return;
2507
2508 if (enabled)
2509 tracing_start_cmdline_record();
2510 else
2511 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002512}
2513
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002514/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002515 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002516 *
2517 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002518int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002519{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002520 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002521 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002522 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002523 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002524 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002525 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002526 char *tbuffer;
2527 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002528
2529 if (unlikely(tracing_selftest_running || tracing_disabled))
2530 return 0;
2531
2532 /* Don't pollute graph traces with trace_vprintk internals */
2533 pause_graph_tracing();
2534
2535 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002536 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002537
Steven Rostedt07d777f2011-09-22 14:01:55 -04002538 tbuffer = get_trace_buf();
2539 if (!tbuffer) {
2540 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002541 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002542 }
2543
2544 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2545
2546 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002547 goto out;
2548
Steven Rostedt07d777f2011-09-22 14:01:55 -04002549 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002550 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002551 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002552 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2553 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002554 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002555 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002556 entry = ring_buffer_event_data(event);
2557 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002558 entry->fmt = fmt;
2559
Steven Rostedt07d777f2011-09-22 14:01:55 -04002560 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002561 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002562 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002563 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002564 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002565
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002566out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002567 put_trace_buf();
2568
2569out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002570 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002571 unpause_graph_tracing();
2572
2573 return len;
2574}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002575EXPORT_SYMBOL_GPL(trace_vbprintk);
2576
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002577__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002578static int
2579__trace_array_vprintk(struct ring_buffer *buffer,
2580 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002581{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002582 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002583 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002584 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002585 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002586 unsigned long flags;
2587 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002588
2589 if (tracing_disabled || tracing_selftest_running)
2590 return 0;
2591
Steven Rostedt07d777f2011-09-22 14:01:55 -04002592 /* Don't pollute graph traces with trace_vprintk internals */
2593 pause_graph_tracing();
2594
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002595 pc = preempt_count();
2596 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002597
Steven Rostedt07d777f2011-09-22 14:01:55 -04002598
2599 tbuffer = get_trace_buf();
2600 if (!tbuffer) {
2601 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002602 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002603 }
2604
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002605 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002606
Steven Rostedt07d777f2011-09-22 14:01:55 -04002607 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002608 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002609 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002610 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002611 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002612 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002613 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002614 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002615
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002616 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002617 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002618 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002619 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002620 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002621
2622out:
2623 put_trace_buf();
2624
2625out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002626 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002627 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002628
2629 return len;
2630}
Steven Rostedt659372d2009-09-03 19:11:07 -04002631
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002632__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002633int trace_array_vprintk(struct trace_array *tr,
2634 unsigned long ip, const char *fmt, va_list args)
2635{
2636 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2637}
2638
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002639__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002640int trace_array_printk(struct trace_array *tr,
2641 unsigned long ip, const char *fmt, ...)
2642{
2643 int ret;
2644 va_list ap;
2645
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002646 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002647 return 0;
2648
2649 va_start(ap, fmt);
2650 ret = trace_array_vprintk(tr, ip, fmt, ap);
2651 va_end(ap);
2652 return ret;
2653}
2654
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002655__printf(3, 4)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002656int trace_array_printk_buf(struct ring_buffer *buffer,
2657 unsigned long ip, const char *fmt, ...)
2658{
2659 int ret;
2660 va_list ap;
2661
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002662 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002663 return 0;
2664
2665 va_start(ap, fmt);
2666 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2667 va_end(ap);
2668 return ret;
2669}
2670
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002671__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04002672int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2673{
Steven Rostedta813a152009-10-09 01:41:35 -04002674 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002675}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002676EXPORT_SYMBOL_GPL(trace_vprintk);
2677
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002678static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002679{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002680 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2681
Steven Rostedt5a90f572008-09-03 17:42:51 -04002682 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002683 if (buf_iter)
2684 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002685}
2686
Ingo Molnare309b412008-05-12 21:20:51 +02002687static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002688peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2689 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002690{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002691 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002692 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002693
Steven Rostedtd7690412008-10-01 00:29:53 -04002694 if (buf_iter)
2695 event = ring_buffer_iter_peek(buf_iter, ts);
2696 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002697 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002698 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002699
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002700 if (event) {
2701 iter->ent_size = ring_buffer_event_length(event);
2702 return ring_buffer_event_data(event);
2703 }
2704 iter->ent_size = 0;
2705 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002706}
Steven Rostedtd7690412008-10-01 00:29:53 -04002707
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002708static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002709__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2710 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002711{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002712 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002713 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002714 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002715 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002716 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002717 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002718 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002719 int cpu;
2720
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002721 /*
2722 * If we are in a per_cpu trace file, don't bother by iterating over
2723 * all cpu and peek directly.
2724 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002725 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002726 if (ring_buffer_empty_cpu(buffer, cpu_file))
2727 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002728 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002729 if (ent_cpu)
2730 *ent_cpu = cpu_file;
2731
2732 return ent;
2733 }
2734
Steven Rostedtab464282008-05-12 21:21:00 +02002735 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002736
2737 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002738 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002739
Steven Rostedtbc21b472010-03-31 19:49:26 -04002740 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002741
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002742 /*
2743 * Pick the entry with the smallest timestamp:
2744 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002745 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002746 next = ent;
2747 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002748 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002749 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002750 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002751 }
2752 }
2753
Steven Rostedt12b5da32012-03-27 10:43:28 -04002754 iter->ent_size = next_size;
2755
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002756 if (ent_cpu)
2757 *ent_cpu = next_cpu;
2758
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002759 if (ent_ts)
2760 *ent_ts = next_ts;
2761
Steven Rostedtbc21b472010-03-31 19:49:26 -04002762 if (missing_events)
2763 *missing_events = next_lost;
2764
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002765 return next;
2766}
2767
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002768/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002769struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2770 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002771{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002772 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002773}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002774
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002775/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002776void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002777{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002778 iter->ent = __find_next_entry(iter, &iter->cpu,
2779 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002780
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002781 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002782 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002783
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002784 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002785}
2786
Ingo Molnare309b412008-05-12 21:20:51 +02002787static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002788{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002789 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002790 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002791}
2792
Ingo Molnare309b412008-05-12 21:20:51 +02002793static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002794{
2795 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002796 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002797 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002798
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002799 WARN_ON_ONCE(iter->leftover);
2800
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002801 (*pos)++;
2802
2803 /* can't go backwards */
2804 if (iter->idx > i)
2805 return NULL;
2806
2807 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002808 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002809 else
2810 ent = iter;
2811
2812 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002813 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002814
2815 iter->pos = *pos;
2816
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002817 return ent;
2818}
2819
Jason Wessel955b61e2010-08-05 09:22:23 -05002820void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002821{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002822 struct ring_buffer_event *event;
2823 struct ring_buffer_iter *buf_iter;
2824 unsigned long entries = 0;
2825 u64 ts;
2826
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002827 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002828
Steven Rostedt6d158a82012-06-27 20:46:14 -04002829 buf_iter = trace_buffer_iter(iter, cpu);
2830 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002831 return;
2832
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002833 ring_buffer_iter_reset(buf_iter);
2834
2835 /*
2836 * We could have the case with the max latency tracers
2837 * that a reset never took place on a cpu. This is evident
2838 * by the timestamp being before the start of the buffer.
2839 */
2840 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002841 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002842 break;
2843 entries++;
2844 ring_buffer_read(buf_iter, NULL);
2845 }
2846
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002847 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002848}
2849
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002850/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002851 * The current tracer is copied to avoid a global locking
2852 * all around.
2853 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002854static void *s_start(struct seq_file *m, loff_t *pos)
2855{
2856 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002857 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002858 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002859 void *p = NULL;
2860 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002861 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002862
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002863 /*
2864 * copy the tracer to avoid using a global lock all around.
2865 * iter->trace is a copy of current_trace, the pointer to the
2866 * name may be used instead of a strcmp(), as iter->trace->name
2867 * will point to the same string as current_trace->name.
2868 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002869 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002870 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2871 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002872 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002873
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002874#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002875 if (iter->snapshot && iter->trace->use_max_tr)
2876 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002877#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002878
2879 if (!iter->snapshot)
2880 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002881
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002882 if (*pos != iter->pos) {
2883 iter->ent = NULL;
2884 iter->cpu = 0;
2885 iter->idx = -1;
2886
Steven Rostedtae3b5092013-01-23 15:22:59 -05002887 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002888 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002889 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002890 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002891 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002892
Lai Jiangshanac91d852010-03-02 17:54:50 +08002893 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002894 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2895 ;
2896
2897 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002898 /*
2899 * If we overflowed the seq_file before, then we want
2900 * to just reuse the trace_seq buffer again.
2901 */
2902 if (iter->leftover)
2903 p = iter;
2904 else {
2905 l = *pos - 1;
2906 p = s_next(m, p, &l);
2907 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002908 }
2909
Lai Jiangshan4f535962009-05-18 19:35:34 +08002910 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002911 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002912 return p;
2913}
2914
2915static void s_stop(struct seq_file *m, void *p)
2916{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002917 struct trace_iterator *iter = m->private;
2918
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002919#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002920 if (iter->snapshot && iter->trace->use_max_tr)
2921 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002922#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002923
2924 if (!iter->snapshot)
2925 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002926
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002927 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002928 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002929}
2930
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002931static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002932get_total_entries(struct trace_buffer *buf,
2933 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002934{
2935 unsigned long count;
2936 int cpu;
2937
2938 *total = 0;
2939 *entries = 0;
2940
2941 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002942 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002943 /*
2944 * If this buffer has skipped entries, then we hold all
2945 * entries for the trace and we need to ignore the
2946 * ones before the time stamp.
2947 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002948 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2949 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002950 /* total is the same as the entries */
2951 *total += count;
2952 } else
2953 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002954 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002955 *entries += count;
2956 }
2957}
2958
Ingo Molnare309b412008-05-12 21:20:51 +02002959static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002960{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002961 seq_puts(m, "# _------=> CPU# \n"
2962 "# / _-----=> irqs-off \n"
2963 "# | / _----=> need-resched \n"
2964 "# || / _---=> hardirq/softirq \n"
2965 "# ||| / _--=> preempt-depth \n"
2966 "# |||| / delay \n"
2967 "# cmd pid ||||| time | caller \n"
2968 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002969}
2970
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002971static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002972{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002973 unsigned long total;
2974 unsigned long entries;
2975
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002976 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002977 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2978 entries, total, num_online_cpus());
2979 seq_puts(m, "#\n");
2980}
2981
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002982static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002983{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002984 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002985 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2986 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002987}
2988
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08002989static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
2990{
2991 print_event_info(buf, m);
2992 seq_puts(m, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
2993 seq_puts(m, "# | | | | | |\n");
2994}
2995
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002996static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002997{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002998 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002999 seq_puts(m, "# _-----=> irqs-off\n"
3000 "# / _----=> need-resched\n"
3001 "# | / _---=> hardirq/softirq\n"
3002 "# || / _--=> preempt-depth\n"
3003 "# ||| / delay\n"
3004 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
3005 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003006}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003007
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08003008static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
3009{
3010 print_event_info(buf, m);
3011 seq_puts(m, "# _-----=> irqs-off\n");
3012 seq_puts(m, "# / _----=> need-resched\n");
3013 seq_puts(m, "# | / _---=> hardirq/softirq\n");
3014 seq_puts(m, "# || / _--=> preempt-depth\n");
3015 seq_puts(m, "# ||| / delay\n");
3016 seq_puts(m, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
3017 seq_puts(m, "# | | | | |||| | |\n");
3018}
3019
Jiri Olsa62b915f2010-04-02 19:01:22 +02003020void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003021print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3022{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003023 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003024 struct trace_buffer *buf = iter->trace_buffer;
3025 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003026 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003027 unsigned long entries;
3028 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003029 const char *name = "preemption";
3030
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003031 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003032
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003033 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003034
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003035 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003036 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003037 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003038 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003039 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003040 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003041 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003042 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003043 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003044 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003045#if defined(CONFIG_PREEMPT_NONE)
3046 "server",
3047#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3048 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003049#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003050 "preempt",
3051#else
3052 "unknown",
3053#endif
3054 /* These are reserved for later use */
3055 0, 0, 0, 0);
3056#ifdef CONFIG_SMP
3057 seq_printf(m, " #P:%d)\n", num_online_cpus());
3058#else
3059 seq_puts(m, ")\n");
3060#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003061 seq_puts(m, "# -----------------\n");
3062 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003063 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003064 data->comm, data->pid,
3065 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003066 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003067 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003068
3069 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003070 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003071 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3072 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003073 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003074 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3075 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003076 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003077 }
3078
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003079 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003080}
3081
Steven Rostedta3097202008-11-07 22:36:02 -05003082static void test_cpu_buff_start(struct trace_iterator *iter)
3083{
3084 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003085 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003086
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003087 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003088 return;
3089
3090 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3091 return;
3092
Matthias Kaehlcke3341c562017-04-21 16:41:10 -07003093 if (cpumask_available(iter->started) &&
3094 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003095 return;
3096
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003097 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003098 return;
3099
Matthias Kaehlcke3341c562017-04-21 16:41:10 -07003100 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003101 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003102
3103 /* Don't print started cpu buffer for the first entry of the trace */
3104 if (iter->idx > 1)
3105 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3106 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003107}
3108
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003109static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003110{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003111 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003112 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003113 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003114 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003115 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003116
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003117 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003118
Steven Rostedta3097202008-11-07 22:36:02 -05003119 test_cpu_buff_start(iter);
3120
Steven Rostedtf633cef2008-12-23 23:24:13 -05003121 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003122
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003123 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003124 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3125 trace_print_lat_context(iter);
3126 else
3127 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003128 }
3129
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003130 if (trace_seq_has_overflowed(s))
3131 return TRACE_TYPE_PARTIAL_LINE;
3132
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003133 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003134 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003135
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003136 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003137
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003138 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003139}
3140
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003141static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003142{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003143 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003144 struct trace_seq *s = &iter->seq;
3145 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003146 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003147
3148 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003149
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003150 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003151 trace_seq_printf(s, "%d %d %llu ",
3152 entry->pid, iter->cpu, iter->ts);
3153
3154 if (trace_seq_has_overflowed(s))
3155 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003156
Steven Rostedtf633cef2008-12-23 23:24:13 -05003157 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003158 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003159 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003160
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003161 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003162
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003163 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003164}
3165
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003166static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003167{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003168 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003169 struct trace_seq *s = &iter->seq;
3170 unsigned char newline = '\n';
3171 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003172 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003173
3174 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003175
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003176 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003177 SEQ_PUT_HEX_FIELD(s, entry->pid);
3178 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3179 SEQ_PUT_HEX_FIELD(s, iter->ts);
3180 if (trace_seq_has_overflowed(s))
3181 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003182 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003183
Steven Rostedtf633cef2008-12-23 23:24:13 -05003184 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003185 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003186 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003187 if (ret != TRACE_TYPE_HANDLED)
3188 return ret;
3189 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003190
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003191 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003192
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003193 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003194}
3195
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003196static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003197{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003198 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003199 struct trace_seq *s = &iter->seq;
3200 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003201 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003202
3203 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003204
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003205 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003206 SEQ_PUT_FIELD(s, entry->pid);
3207 SEQ_PUT_FIELD(s, iter->cpu);
3208 SEQ_PUT_FIELD(s, iter->ts);
3209 if (trace_seq_has_overflowed(s))
3210 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003211 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003212
Steven Rostedtf633cef2008-12-23 23:24:13 -05003213 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003214 return event ? event->funcs->binary(iter, 0, event) :
3215 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003216}
3217
Jiri Olsa62b915f2010-04-02 19:01:22 +02003218int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003219{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003220 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003221 int cpu;
3222
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003223 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003224 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003225 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003226 buf_iter = trace_buffer_iter(iter, cpu);
3227 if (buf_iter) {
3228 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003229 return 0;
3230 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003231 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003232 return 0;
3233 }
3234 return 1;
3235 }
3236
Steven Rostedtab464282008-05-12 21:21:00 +02003237 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003238 buf_iter = trace_buffer_iter(iter, cpu);
3239 if (buf_iter) {
3240 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003241 return 0;
3242 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003243 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003244 return 0;
3245 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003246 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003247
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003248 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003249}
3250
Lai Jiangshan4f535962009-05-18 19:35:34 +08003251/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003252enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003253{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003254 struct trace_array *tr = iter->tr;
3255 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003256 enum print_line_t ret;
3257
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003258 if (iter->lost_events) {
3259 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3260 iter->cpu, iter->lost_events);
3261 if (trace_seq_has_overflowed(&iter->seq))
3262 return TRACE_TYPE_PARTIAL_LINE;
3263 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003264
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003265 if (iter->trace && iter->trace->print_line) {
3266 ret = iter->trace->print_line(iter);
3267 if (ret != TRACE_TYPE_UNHANDLED)
3268 return ret;
3269 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003270
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003271 if (iter->ent->type == TRACE_BPUTS &&
3272 trace_flags & TRACE_ITER_PRINTK &&
3273 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3274 return trace_print_bputs_msg_only(iter);
3275
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003276 if (iter->ent->type == TRACE_BPRINT &&
3277 trace_flags & TRACE_ITER_PRINTK &&
3278 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003279 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003280
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003281 if (iter->ent->type == TRACE_PRINT &&
3282 trace_flags & TRACE_ITER_PRINTK &&
3283 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003284 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003285
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003286 if (trace_flags & TRACE_ITER_BIN)
3287 return print_bin_fmt(iter);
3288
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003289 if (trace_flags & TRACE_ITER_HEX)
3290 return print_hex_fmt(iter);
3291
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003292 if (trace_flags & TRACE_ITER_RAW)
3293 return print_raw_fmt(iter);
3294
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003295 return print_trace_fmt(iter);
3296}
3297
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003298void trace_latency_header(struct seq_file *m)
3299{
3300 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003301 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003302
3303 /* print nothing if the buffers are empty */
3304 if (trace_empty(iter))
3305 return;
3306
3307 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3308 print_trace_header(m, iter);
3309
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003310 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003311 print_lat_help_header(m);
3312}
3313
Jiri Olsa62b915f2010-04-02 19:01:22 +02003314void trace_default_header(struct seq_file *m)
3315{
3316 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003317 struct trace_array *tr = iter->tr;
3318 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003319
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003320 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3321 return;
3322
Jiri Olsa62b915f2010-04-02 19:01:22 +02003323 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3324 /* print nothing if the buffers are empty */
3325 if (trace_empty(iter))
3326 return;
3327 print_trace_header(m, iter);
3328 if (!(trace_flags & TRACE_ITER_VERBOSE))
3329 print_lat_help_header(m);
3330 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003331 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3332 if (trace_flags & TRACE_ITER_IRQ_INFO)
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08003333 if (trace_flags & TRACE_ITER_TGID)
3334 print_func_help_header_irq_tgid(iter->trace_buffer, m);
3335 else
3336 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003337 else
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08003338 if (trace_flags & TRACE_ITER_TGID)
3339 print_func_help_header_tgid(iter->trace_buffer, m);
3340 else
3341 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003342 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003343 }
3344}
3345
Steven Rostedte0a413f2011-09-29 21:26:16 -04003346static void test_ftrace_alive(struct seq_file *m)
3347{
3348 if (!ftrace_is_dead())
3349 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003350 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3351 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003352}
3353
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003354#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003355static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003356{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003357 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3358 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3359 "# Takes a snapshot of the main buffer.\n"
3360 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3361 "# (Doesn't have to be '2' works with any number that\n"
3362 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003363}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003364
3365static void show_snapshot_percpu_help(struct seq_file *m)
3366{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003367 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003368#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003369 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3370 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003371#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003372 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3373 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003374#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003375 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3376 "# (Doesn't have to be '2' works with any number that\n"
3377 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003378}
3379
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003380static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3381{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003382 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003383 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003384 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003385 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003386
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003387 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003388 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3389 show_snapshot_main_help(m);
3390 else
3391 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003392}
3393#else
3394/* Should never be called */
3395static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3396#endif
3397
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003398static int s_show(struct seq_file *m, void *v)
3399{
3400 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003401 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003402
3403 if (iter->ent == NULL) {
3404 if (iter->tr) {
3405 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3406 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003407 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003408 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003409 if (iter->snapshot && trace_empty(iter))
3410 print_snapshot_help(m, iter);
3411 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003412 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003413 else
3414 trace_default_header(m);
3415
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003416 } else if (iter->leftover) {
3417 /*
3418 * If we filled the seq_file buffer earlier, we
3419 * want to just show it now.
3420 */
3421 ret = trace_print_seq(m, &iter->seq);
3422
3423 /* ret should this time be zero, but you never know */
3424 iter->leftover = ret;
3425
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003426 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003427 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003428 ret = trace_print_seq(m, &iter->seq);
3429 /*
3430 * If we overflow the seq_file buffer, then it will
3431 * ask us for this data again at start up.
3432 * Use that instead.
3433 * ret is 0 if seq_file write succeeded.
3434 * -1 otherwise.
3435 */
3436 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003437 }
3438
3439 return 0;
3440}
3441
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003442/*
3443 * Should be used after trace_array_get(), trace_types_lock
3444 * ensures that i_cdev was already initialized.
3445 */
3446static inline int tracing_get_cpu(struct inode *inode)
3447{
3448 if (inode->i_cdev) /* See trace_create_cpu_file() */
3449 return (long)inode->i_cdev - 1;
3450 return RING_BUFFER_ALL_CPUS;
3451}
3452
James Morris88e9d342009-09-22 16:43:43 -07003453static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003454 .start = s_start,
3455 .next = s_next,
3456 .stop = s_stop,
3457 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003458};
3459
Ingo Molnare309b412008-05-12 21:20:51 +02003460static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003461__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003462{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003463 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003464 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003465 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003466
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003467 if (tracing_disabled)
3468 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003469
Jiri Olsa50e18b92012-04-25 10:23:39 +02003470 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003471 if (!iter)
3472 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003473
Gil Fruchter72917232015-06-09 10:32:35 +03003474 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003475 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003476 if (!iter->buffer_iter)
3477 goto release;
3478
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003479 /*
3480 * We make a copy of the current tracer to avoid concurrent
3481 * changes on it while we are reading.
3482 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003483 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003484 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003485 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003486 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003487
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003488 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003489
Li Zefan79f55992009-06-15 14:58:26 +08003490 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003491 goto fail;
3492
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003493 iter->tr = tr;
3494
3495#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003496 /* Currently only the top directory has a snapshot */
3497 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003498 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003499 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003500#endif
3501 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003502 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003503 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003504 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003505 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003506
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003507 /* Notify the tracer early; before we stop tracing. */
3508 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003509 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003510
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003511 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003512 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003513 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3514
David Sharp8be07092012-11-13 12:18:22 -08003515 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003516 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003517 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3518
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003519 /* stop the trace while dumping if we are not opening "snapshot" */
3520 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003521 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003522
Steven Rostedtae3b5092013-01-23 15:22:59 -05003523 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003524 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003525 iter->buffer_iter[cpu] =
Douglas Anderson3085d412019-03-08 11:32:04 -08003526 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3527 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07003528 }
3529 ring_buffer_read_prepare_sync();
3530 for_each_tracing_cpu(cpu) {
3531 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003532 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003533 }
3534 } else {
3535 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003536 iter->buffer_iter[cpu] =
Douglas Anderson3085d412019-03-08 11:32:04 -08003537 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3538 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07003539 ring_buffer_read_prepare_sync();
3540 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003541 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003542 }
3543
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003544 mutex_unlock(&trace_types_lock);
3545
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003546 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003547
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003548 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003549 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003550 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003551 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003552release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003553 seq_release_private(inode, file);
3554 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003555}
3556
3557int tracing_open_generic(struct inode *inode, struct file *filp)
3558{
Steven Rostedt60a11772008-05-12 21:20:44 +02003559 if (tracing_disabled)
3560 return -ENODEV;
3561
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003562 filp->private_data = inode->i_private;
3563 return 0;
3564}
3565
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003566bool tracing_is_disabled(void)
3567{
3568 return (tracing_disabled) ? true: false;
3569}
3570
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003571/*
3572 * Open and update trace_array ref count.
3573 * Must have the current trace_array passed to it.
3574 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003575static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003576{
3577 struct trace_array *tr = inode->i_private;
3578
3579 if (tracing_disabled)
3580 return -ENODEV;
3581
3582 if (trace_array_get(tr) < 0)
3583 return -ENODEV;
3584
3585 filp->private_data = inode->i_private;
3586
3587 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003588}
3589
Hannes Eder4fd27352009-02-10 19:44:12 +01003590static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003591{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003592 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003593 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003594 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003595 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003596
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003597 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003598 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003599 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003600 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003601
Oleg Nesterov6484c712013-07-23 17:26:10 +02003602 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003603 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003604 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003605
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003606 for_each_tracing_cpu(cpu) {
3607 if (iter->buffer_iter[cpu])
3608 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3609 }
3610
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003611 if (iter->trace && iter->trace->close)
3612 iter->trace->close(iter);
3613
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003614 if (!iter->snapshot)
3615 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003616 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003617
3618 __trace_array_put(tr);
3619
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003620 mutex_unlock(&trace_types_lock);
3621
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003622 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003623 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003624 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003625 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003626 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003627
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003628 return 0;
3629}
3630
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003631static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3632{
3633 struct trace_array *tr = inode->i_private;
3634
3635 trace_array_put(tr);
3636 return 0;
3637}
3638
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003639static int tracing_single_release_tr(struct inode *inode, struct file *file)
3640{
3641 struct trace_array *tr = inode->i_private;
3642
3643 trace_array_put(tr);
3644
3645 return single_release(inode, file);
3646}
3647
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003648static int tracing_open(struct inode *inode, struct file *file)
3649{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003650 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003651 struct trace_iterator *iter;
3652 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003653
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003654 if (trace_array_get(tr) < 0)
3655 return -ENODEV;
3656
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003657 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003658 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3659 int cpu = tracing_get_cpu(inode);
Bo Yan5fb4be22017-09-18 10:03:35 -07003660 struct trace_buffer *trace_buf = &tr->trace_buffer;
3661
3662#ifdef CONFIG_TRACER_MAX_TRACE
3663 if (tr->current_trace->print_max)
3664 trace_buf = &tr->max_buffer;
3665#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02003666
3667 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan5fb4be22017-09-18 10:03:35 -07003668 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003669 else
Bo Yan5fb4be22017-09-18 10:03:35 -07003670 tracing_reset(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003671 }
3672
3673 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003674 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003675 if (IS_ERR(iter))
3676 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003677 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003678 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3679 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003680
3681 if (ret < 0)
3682 trace_array_put(tr);
3683
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003684 return ret;
3685}
3686
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003687/*
3688 * Some tracers are not suitable for instance buffers.
3689 * A tracer is always available for the global array (toplevel)
3690 * or if it explicitly states that it is.
3691 */
3692static bool
3693trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3694{
3695 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3696}
3697
3698/* Find the next tracer that this trace array may use */
3699static struct tracer *
3700get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3701{
3702 while (t && !trace_ok_for_array(t, tr))
3703 t = t->next;
3704
3705 return t;
3706}
3707
Ingo Molnare309b412008-05-12 21:20:51 +02003708static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003709t_next(struct seq_file *m, void *v, loff_t *pos)
3710{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003711 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003712 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003713
3714 (*pos)++;
3715
3716 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003717 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003718
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003719 return t;
3720}
3721
3722static void *t_start(struct seq_file *m, loff_t *pos)
3723{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003724 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003725 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003726 loff_t l = 0;
3727
3728 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003729
3730 t = get_tracer_for_array(tr, trace_types);
3731 for (; t && l < *pos; t = t_next(m, t, &l))
3732 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003733
3734 return t;
3735}
3736
3737static void t_stop(struct seq_file *m, void *p)
3738{
3739 mutex_unlock(&trace_types_lock);
3740}
3741
3742static int t_show(struct seq_file *m, void *v)
3743{
3744 struct tracer *t = v;
3745
3746 if (!t)
3747 return 0;
3748
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003749 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003750 if (t->next)
3751 seq_putc(m, ' ');
3752 else
3753 seq_putc(m, '\n');
3754
3755 return 0;
3756}
3757
James Morris88e9d342009-09-22 16:43:43 -07003758static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003759 .start = t_start,
3760 .next = t_next,
3761 .stop = t_stop,
3762 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003763};
3764
3765static int show_traces_open(struct inode *inode, struct file *file)
3766{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003767 struct trace_array *tr = inode->i_private;
3768 struct seq_file *m;
3769 int ret;
3770
Steven Rostedt60a11772008-05-12 21:20:44 +02003771 if (tracing_disabled)
3772 return -ENODEV;
3773
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003774 ret = seq_open(file, &show_traces_seq_ops);
3775 if (ret)
3776 return ret;
3777
3778 m = file->private_data;
3779 m->private = tr;
3780
3781 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003782}
3783
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003784static ssize_t
3785tracing_write_stub(struct file *filp, const char __user *ubuf,
3786 size_t count, loff_t *ppos)
3787{
3788 return count;
3789}
3790
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003791loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003792{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003793 int ret;
3794
Slava Pestov364829b2010-11-24 15:13:16 -08003795 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003796 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003797 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003798 file->f_pos = ret = 0;
3799
3800 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003801}
3802
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003803static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003804 .open = tracing_open,
3805 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003806 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003807 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003808 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003809};
3810
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003811static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003812 .open = show_traces_open,
3813 .read = seq_read,
3814 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003815 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003816};
3817
3818static ssize_t
3819tracing_cpumask_read(struct file *filp, char __user *ubuf,
3820 size_t count, loff_t *ppos)
3821{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003822 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Dud760f902017-11-30 11:39:43 +08003823 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003824 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003825
Changbin Dud760f902017-11-30 11:39:43 +08003826 len = snprintf(NULL, 0, "%*pb\n",
3827 cpumask_pr_args(tr->tracing_cpumask)) + 1;
3828 mask_str = kmalloc(len, GFP_KERNEL);
3829 if (!mask_str)
3830 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003831
Changbin Dud760f902017-11-30 11:39:43 +08003832 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08003833 cpumask_pr_args(tr->tracing_cpumask));
3834 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003835 count = -EINVAL;
3836 goto out_err;
3837 }
Changbin Dud760f902017-11-30 11:39:43 +08003838 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003839
3840out_err:
Changbin Dud760f902017-11-30 11:39:43 +08003841 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003842
3843 return count;
3844}
3845
3846static ssize_t
3847tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3848 size_t count, loff_t *ppos)
3849{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003850 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303851 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003852 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303853
3854 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3855 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003856
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303857 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003858 if (err)
3859 goto err_unlock;
3860
Steven Rostedta5e25882008-12-02 15:34:05 -05003861 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003862 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003863 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003864 /*
3865 * Increase/decrease the disabled counter if we are
3866 * about to flip a bit in the cpumask:
3867 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003868 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303869 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003870 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3871 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003872 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003873 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303874 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003875 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3876 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003877 }
3878 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003879 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003880 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003881
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003882 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303883 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003884
Ingo Molnarc7078de2008-05-12 21:20:52 +02003885 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003886
3887err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003888 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003889
3890 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003891}
3892
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003893static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003894 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003895 .read = tracing_cpumask_read,
3896 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003897 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003898 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003899};
3900
Li Zefanfdb372e2009-12-08 11:15:59 +08003901static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003902{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003903 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003904 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003905 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003906 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003907
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003908 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003909 tracer_flags = tr->current_trace->flags->val;
3910 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003911
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003912 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003913 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003914 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003915 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003916 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003917 }
3918
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003919 for (i = 0; trace_opts[i].name; i++) {
3920 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003921 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003922 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003923 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003924 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003925 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003926
Li Zefanfdb372e2009-12-08 11:15:59 +08003927 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003928}
3929
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003930static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003931 struct tracer_flags *tracer_flags,
3932 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003933{
Chunyu Hud39cdd22016-03-08 21:37:01 +08003934 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003935 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003936
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003937 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003938 if (ret)
3939 return ret;
3940
3941 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003942 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003943 else
Zhaolei77708412009-08-07 18:53:21 +08003944 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003945 return 0;
3946}
3947
Li Zefan8d18eaa2009-12-08 11:17:06 +08003948/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003949static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003950{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003951 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003952 struct tracer_flags *tracer_flags = trace->flags;
3953 struct tracer_opt *opts = NULL;
3954 int i;
3955
3956 for (i = 0; tracer_flags->opts[i].name; i++) {
3957 opts = &tracer_flags->opts[i];
3958
3959 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003960 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003961 }
3962
3963 return -EINVAL;
3964}
3965
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003966/* Some tracers require overwrite to stay enabled */
3967int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3968{
3969 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3970 return -1;
3971
3972 return 0;
3973}
3974
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003975int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003976{
3977 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003978 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003979 return 0;
3980
3981 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003982 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003983 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003984 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003985
3986 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003987 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003988 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003989 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003990
3991 if (mask == TRACE_ITER_RECORD_CMD)
3992 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003993
Steven Rostedtc37775d2016-04-13 16:59:18 -04003994 if (mask == TRACE_ITER_EVENT_FORK)
3995 trace_event_follow_fork(tr, enabled);
3996
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003997 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003998 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003999#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004000 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004001#endif
4002 }
Steven Rostedt81698832012-10-11 10:15:05 -04004003
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004004 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004005 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004006 trace_printk_control(enabled);
4007 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004008
4009 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004010}
4011
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004012static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004013{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004014 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004015 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004016 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004017 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004018 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004019
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004020 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004021
Li Zefan8d18eaa2009-12-08 11:17:06 +08004022 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004023 neg = 1;
4024 cmp += 2;
4025 }
4026
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004027 mutex_lock(&trace_types_lock);
4028
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004029 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08004030 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004031 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004032 break;
4033 }
4034 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004035
4036 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004037 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004038 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004039
4040 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004041
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004042 /*
4043 * If the first trailing whitespace is replaced with '\0' by strstrip,
4044 * turn it back into a space.
4045 */
4046 if (orig_len > strlen(option))
4047 option[strlen(option)] = ' ';
4048
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004049 return ret;
4050}
4051
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004052static void __init apply_trace_boot_options(void)
4053{
4054 char *buf = trace_boot_options_buf;
4055 char *option;
4056
4057 while (true) {
4058 option = strsep(&buf, ",");
4059
4060 if (!option)
4061 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004062
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004063 if (*option)
4064 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004065
4066 /* Put back the comma to allow this to be called again */
4067 if (buf)
4068 *(buf - 1) = ',';
4069 }
4070}
4071
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004072static ssize_t
4073tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4074 size_t cnt, loff_t *ppos)
4075{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004076 struct seq_file *m = filp->private_data;
4077 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004078 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004079 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004080
4081 if (cnt >= sizeof(buf))
4082 return -EINVAL;
4083
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004084 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004085 return -EFAULT;
4086
Steven Rostedta8dd2172013-01-09 20:54:17 -05004087 buf[cnt] = 0;
4088
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004089 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004090 if (ret < 0)
4091 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004092
Jiri Olsacf8517c2009-10-23 19:36:16 -04004093 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004094
4095 return cnt;
4096}
4097
Li Zefanfdb372e2009-12-08 11:15:59 +08004098static int tracing_trace_options_open(struct inode *inode, struct file *file)
4099{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004100 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004101 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004102
Li Zefanfdb372e2009-12-08 11:15:59 +08004103 if (tracing_disabled)
4104 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004105
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004106 if (trace_array_get(tr) < 0)
4107 return -ENODEV;
4108
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004109 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4110 if (ret < 0)
4111 trace_array_put(tr);
4112
4113 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004114}
4115
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004116static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004117 .open = tracing_trace_options_open,
4118 .read = seq_read,
4119 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004120 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004121 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004122};
4123
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004124static const char readme_msg[] =
4125 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004126 "# echo 0 > tracing_on : quick way to disable tracing\n"
4127 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4128 " Important files:\n"
4129 " trace\t\t\t- The static contents of the buffer\n"
4130 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4131 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4132 " current_tracer\t- function and latency tracers\n"
4133 " available_tracers\t- list of configured tracers for current_tracer\n"
4134 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4135 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4136 " trace_clock\t\t-change the clock used to order events\n"
4137 " local: Per cpu clock but may not be synced across CPUs\n"
4138 " global: Synced across CPUs but slows tracing down.\n"
4139 " counter: Not a clock, but just an increment\n"
4140 " uptime: Jiffy counter from time of boot\n"
4141 " perf: Same clock that perf events use\n"
4142#ifdef CONFIG_X86_64
4143 " x86-tsc: TSC cycle counter\n"
4144#endif
4145 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4146 " tracing_cpumask\t- Limit which CPUs to trace\n"
4147 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4148 "\t\t\t Remove sub-buffer with rmdir\n"
4149 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004150 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4151 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004152 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004153#ifdef CONFIG_DYNAMIC_FTRACE
4154 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004155 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4156 "\t\t\t functions\n"
4157 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4158 "\t modules: Can select a group via module\n"
4159 "\t Format: :mod:<module-name>\n"
4160 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4161 "\t triggers: a command to perform when function is hit\n"
4162 "\t Format: <function>:<trigger>[:count]\n"
4163 "\t trigger: traceon, traceoff\n"
4164 "\t\t enable_event:<system>:<event>\n"
4165 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004166#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004167 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004168#endif
4169#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004170 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004171#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004172 "\t\t dump\n"
4173 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004174 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4175 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4176 "\t The first one will disable tracing every time do_fault is hit\n"
4177 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4178 "\t The first time do trap is hit and it disables tracing, the\n"
4179 "\t counter will decrement to 2. If tracing is already disabled,\n"
4180 "\t the counter will not decrement. It only decrements when the\n"
4181 "\t trigger did work\n"
4182 "\t To remove trigger without count:\n"
4183 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4184 "\t To remove trigger with a count:\n"
4185 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004186 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004187 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4188 "\t modules: Can select a group via module command :mod:\n"
4189 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004190#endif /* CONFIG_DYNAMIC_FTRACE */
4191#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004192 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4193 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004194#endif
4195#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4196 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004197 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004198 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4199#endif
4200#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004201 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4202 "\t\t\t snapshot buffer. Read the contents for more\n"
4203 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004204#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004205#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004206 " stack_trace\t\t- Shows the max stack trace when active\n"
4207 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004208 "\t\t\t Write into this file to reset the max size (trigger a\n"
4209 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004210#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004211 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4212 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004213#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004214#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu86425622016-08-18 17:58:15 +09004215#ifdef CONFIG_KPROBE_EVENT
4216 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4217 "\t\t\t Write into this file to define/undefine new trace events.\n"
4218#endif
4219#ifdef CONFIG_UPROBE_EVENT
4220 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4221 "\t\t\t Write into this file to define/undefine new trace events.\n"
4222#endif
4223#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
4224 "\t accepts: event-definitions (one definition per line)\n"
4225 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4226 "\t -:[<group>/]<event>\n"
4227#ifdef CONFIG_KPROBE_EVENT
4228 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4229#endif
4230#ifdef CONFIG_UPROBE_EVENT
4231 "\t place: <path>:<offset>\n"
4232#endif
4233 "\t args: <name>=fetcharg[:type]\n"
4234 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4235 "\t $stack<index>, $stack, $retval, $comm\n"
4236 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4237 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4238#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004239 " events/\t\t- Directory containing all trace event subsystems:\n"
4240 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4241 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004242 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4243 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004244 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004245 " events/<system>/<event>/\t- Directory containing control files for\n"
4246 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004247 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4248 " filter\t\t- If set, only events passing filter are traced\n"
4249 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004250 "\t Format: <trigger>[:count][if <filter>]\n"
4251 "\t trigger: traceon, traceoff\n"
4252 "\t enable_event:<system>:<event>\n"
4253 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004254#ifdef CONFIG_HIST_TRIGGERS
4255 "\t enable_hist:<system>:<event>\n"
4256 "\t disable_hist:<system>:<event>\n"
4257#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004258#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004259 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004260#endif
4261#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004262 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004263#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004264#ifdef CONFIG_HIST_TRIGGERS
4265 "\t\t hist (see below)\n"
4266#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004267 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4268 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4269 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4270 "\t events/block/block_unplug/trigger\n"
4271 "\t The first disables tracing every time block_unplug is hit.\n"
4272 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4273 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4274 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4275 "\t Like function triggers, the counter is only decremented if it\n"
4276 "\t enabled or disabled tracing.\n"
4277 "\t To remove a trigger without a count:\n"
4278 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4279 "\t To remove a trigger with a count:\n"
4280 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4281 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004282#ifdef CONFIG_HIST_TRIGGERS
4283 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004284 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004285 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004286 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004287 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004288 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004289 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004290 "\t [if <filter>]\n\n"
4291 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004292 "\t table using the key(s) and value(s) named, and the value of a\n"
4293 "\t sum called 'hitcount' is incremented. Keys and values\n"
4294 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004295 "\t can be any field, or the special string 'stacktrace'.\n"
4296 "\t Compound keys consisting of up to two fields can be specified\n"
4297 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4298 "\t fields. Sort keys consisting of up to two fields can be\n"
4299 "\t specified using the 'sort' keyword. The sort direction can\n"
4300 "\t be modified by appending '.descending' or '.ascending' to a\n"
4301 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004302 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4303 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4304 "\t its histogram data will be shared with other triggers of the\n"
4305 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004306 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004307 "\t table in its entirety to stdout. If there are multiple hist\n"
4308 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004309 "\t trigger in the output. The table displayed for a named\n"
4310 "\t trigger will be the same as any other instance having the\n"
4311 "\t same name. The default format used to display a given field\n"
4312 "\t can be modified by appending any of the following modifiers\n"
4313 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004314 "\t .hex display a number as a hex value\n"
4315 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004316 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004317 "\t .execname display a common_pid as a program name\n"
4318 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004319 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004320 "\t The 'pause' parameter can be used to pause an existing hist\n"
4321 "\t trigger or to start a hist trigger but not log any events\n"
4322 "\t until told to do so. 'continue' can be used to start or\n"
4323 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004324 "\t The 'clear' parameter will clear the contents of a running\n"
4325 "\t hist trigger and leave its current paused/active state\n"
4326 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004327 "\t The enable_hist and disable_hist triggers can be used to\n"
4328 "\t have one event conditionally start and stop another event's\n"
4329 "\t already-attached hist trigger. The syntax is analagous to\n"
4330 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004331#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004332;
4333
4334static ssize_t
4335tracing_readme_read(struct file *filp, char __user *ubuf,
4336 size_t cnt, loff_t *ppos)
4337{
4338 return simple_read_from_buffer(ubuf, cnt, ppos,
4339 readme_msg, strlen(readme_msg));
4340}
4341
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004342static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004343 .open = tracing_open_generic,
4344 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004345 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004346};
4347
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004348static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004349{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004350 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004351
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004352 if (*pos || m->count)
4353 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004354
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004355 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004356
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004357 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4358 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004359 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004360 continue;
4361
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004362 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004363 }
4364
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004365 return NULL;
4366}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004367
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004368static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4369{
4370 void *v;
4371 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004372
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004373 preempt_disable();
4374 arch_spin_lock(&trace_cmdline_lock);
4375
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004376 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004377 while (l <= *pos) {
4378 v = saved_cmdlines_next(m, v, &l);
4379 if (!v)
4380 return NULL;
4381 }
4382
4383 return v;
4384}
4385
4386static void saved_cmdlines_stop(struct seq_file *m, void *v)
4387{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004388 arch_spin_unlock(&trace_cmdline_lock);
4389 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004390}
4391
4392static int saved_cmdlines_show(struct seq_file *m, void *v)
4393{
4394 char buf[TASK_COMM_LEN];
4395 unsigned int *pid = v;
4396
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004397 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004398 seq_printf(m, "%d %s\n", *pid, buf);
4399 return 0;
4400}
4401
4402static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4403 .start = saved_cmdlines_start,
4404 .next = saved_cmdlines_next,
4405 .stop = saved_cmdlines_stop,
4406 .show = saved_cmdlines_show,
4407};
4408
4409static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4410{
4411 if (tracing_disabled)
4412 return -ENODEV;
4413
4414 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004415}
4416
4417static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004418 .open = tracing_saved_cmdlines_open,
4419 .read = seq_read,
4420 .llseek = seq_lseek,
4421 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004422};
4423
4424static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004425tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4426 size_t cnt, loff_t *ppos)
4427{
4428 char buf[64];
4429 int r;
Adrian Salidoa06ea262017-04-18 11:44:33 -07004430 unsigned int n;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004431
Adrian Salidoa06ea262017-04-18 11:44:33 -07004432 preempt_disable();
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004433 arch_spin_lock(&trace_cmdline_lock);
Adrian Salidoa06ea262017-04-18 11:44:33 -07004434 n = savedcmd->cmdline_num;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004435 arch_spin_unlock(&trace_cmdline_lock);
Adrian Salidoa06ea262017-04-18 11:44:33 -07004436 preempt_enable();
4437
4438 r = scnprintf(buf, sizeof(buf), "%u\n", n);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004439
4440 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4441}
4442
4443static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4444{
4445 kfree(s->saved_cmdlines);
4446 kfree(s->map_cmdline_to_pid);
Adrian Salidoa06ea262017-04-18 11:44:33 -07004447 kfree(s->map_cmdline_to_tgid);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004448 kfree(s);
4449}
4450
4451static int tracing_resize_saved_cmdlines(unsigned int val)
4452{
4453 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4454
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004455 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004456 if (!s)
4457 return -ENOMEM;
4458
4459 if (allocate_cmdlines_buffer(val, s) < 0) {
4460 kfree(s);
4461 return -ENOMEM;
4462 }
4463
Adrian Salidoa06ea262017-04-18 11:44:33 -07004464 preempt_disable();
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004465 arch_spin_lock(&trace_cmdline_lock);
4466 savedcmd_temp = savedcmd;
4467 savedcmd = s;
4468 arch_spin_unlock(&trace_cmdline_lock);
Adrian Salidoa06ea262017-04-18 11:44:33 -07004469 preempt_enable();
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004470 free_saved_cmdlines_buffer(savedcmd_temp);
4471
4472 return 0;
4473}
4474
4475static ssize_t
4476tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4477 size_t cnt, loff_t *ppos)
4478{
4479 unsigned long val;
4480 int ret;
4481
4482 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4483 if (ret)
4484 return ret;
4485
4486 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4487 if (!val || val > PID_MAX_DEFAULT)
4488 return -EINVAL;
4489
4490 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4491 if (ret < 0)
4492 return ret;
4493
4494 *ppos += cnt;
4495
4496 return cnt;
4497}
4498
4499static const struct file_operations tracing_saved_cmdlines_size_fops = {
4500 .open = tracing_open_generic,
4501 .read = tracing_saved_cmdlines_size_read,
4502 .write = tracing_saved_cmdlines_size_write,
4503};
4504
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004505#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4506static union trace_enum_map_item *
4507update_enum_map(union trace_enum_map_item *ptr)
4508{
4509 if (!ptr->map.enum_string) {
4510 if (ptr->tail.next) {
4511 ptr = ptr->tail.next;
4512 /* Set ptr to the next real item (skip head) */
4513 ptr++;
4514 } else
4515 return NULL;
4516 }
4517 return ptr;
4518}
4519
4520static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4521{
4522 union trace_enum_map_item *ptr = v;
4523
4524 /*
4525 * Paranoid! If ptr points to end, we don't want to increment past it.
4526 * This really should never happen.
4527 */
4528 ptr = update_enum_map(ptr);
4529 if (WARN_ON_ONCE(!ptr))
4530 return NULL;
4531
4532 ptr++;
4533
4534 (*pos)++;
4535
4536 ptr = update_enum_map(ptr);
4537
4538 return ptr;
4539}
4540
4541static void *enum_map_start(struct seq_file *m, loff_t *pos)
4542{
4543 union trace_enum_map_item *v;
4544 loff_t l = 0;
4545
4546 mutex_lock(&trace_enum_mutex);
4547
4548 v = trace_enum_maps;
4549 if (v)
4550 v++;
4551
4552 while (v && l < *pos) {
4553 v = enum_map_next(m, v, &l);
4554 }
4555
4556 return v;
4557}
4558
4559static void enum_map_stop(struct seq_file *m, void *v)
4560{
4561 mutex_unlock(&trace_enum_mutex);
4562}
4563
4564static int enum_map_show(struct seq_file *m, void *v)
4565{
4566 union trace_enum_map_item *ptr = v;
4567
4568 seq_printf(m, "%s %ld (%s)\n",
4569 ptr->map.enum_string, ptr->map.enum_value,
4570 ptr->map.system);
4571
4572 return 0;
4573}
4574
4575static const struct seq_operations tracing_enum_map_seq_ops = {
4576 .start = enum_map_start,
4577 .next = enum_map_next,
4578 .stop = enum_map_stop,
4579 .show = enum_map_show,
4580};
4581
4582static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4583{
4584 if (tracing_disabled)
4585 return -ENODEV;
4586
4587 return seq_open(filp, &tracing_enum_map_seq_ops);
4588}
4589
4590static const struct file_operations tracing_enum_map_fops = {
4591 .open = tracing_enum_map_open,
4592 .read = seq_read,
4593 .llseek = seq_lseek,
4594 .release = seq_release,
4595};
4596
4597static inline union trace_enum_map_item *
4598trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4599{
4600 /* Return tail of array given the head */
4601 return ptr + ptr->head.length + 1;
4602}
4603
4604static void
4605trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4606 int len)
4607{
4608 struct trace_enum_map **stop;
4609 struct trace_enum_map **map;
4610 union trace_enum_map_item *map_array;
4611 union trace_enum_map_item *ptr;
4612
4613 stop = start + len;
4614
4615 /*
4616 * The trace_enum_maps contains the map plus a head and tail item,
4617 * where the head holds the module and length of array, and the
4618 * tail holds a pointer to the next list.
4619 */
4620 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4621 if (!map_array) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07004622 pr_warn("Unable to allocate trace enum mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004623 return;
4624 }
4625
4626 mutex_lock(&trace_enum_mutex);
4627
4628 if (!trace_enum_maps)
4629 trace_enum_maps = map_array;
4630 else {
4631 ptr = trace_enum_maps;
4632 for (;;) {
4633 ptr = trace_enum_jmp_to_tail(ptr);
4634 if (!ptr->tail.next)
4635 break;
4636 ptr = ptr->tail.next;
4637
4638 }
4639 ptr->tail.next = map_array;
4640 }
4641 map_array->head.mod = mod;
4642 map_array->head.length = len;
4643 map_array++;
4644
4645 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4646 map_array->map = **map;
4647 map_array++;
4648 }
4649 memset(map_array, 0, sizeof(*map_array));
4650
4651 mutex_unlock(&trace_enum_mutex);
4652}
4653
4654static void trace_create_enum_file(struct dentry *d_tracer)
4655{
4656 trace_create_file("enum_map", 0444, d_tracer,
4657 NULL, &tracing_enum_map_fops);
4658}
4659
4660#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4661static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4662static inline void trace_insert_enum_map_file(struct module *mod,
4663 struct trace_enum_map **start, int len) { }
4664#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4665
4666static void trace_insert_enum_map(struct module *mod,
4667 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004668{
4669 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004670
4671 if (len <= 0)
4672 return;
4673
4674 map = start;
4675
4676 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004677
4678 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004679}
4680
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004681static ssize_t
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004682tracing_saved_tgids_read(struct file *file, char __user *ubuf,
4683 size_t cnt, loff_t *ppos)
4684{
4685 char *file_buf;
4686 char *buf;
4687 int len = 0;
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004688 int i;
Adrian Salidoa06ea262017-04-18 11:44:33 -07004689 int *pids;
4690 int n = 0;
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004691
Adrian Salidoa06ea262017-04-18 11:44:33 -07004692 preempt_disable();
4693 arch_spin_lock(&trace_cmdline_lock);
4694
4695 pids = kmalloc_array(savedcmd->cmdline_num, 2*sizeof(int), GFP_KERNEL);
4696 if (!pids) {
4697 arch_spin_unlock(&trace_cmdline_lock);
4698 preempt_enable();
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004699 return -ENOMEM;
Adrian Salidoa06ea262017-04-18 11:44:33 -07004700 }
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004701
Adrian Salidoa06ea262017-04-18 11:44:33 -07004702 for (i = 0; i < savedcmd->cmdline_num; i++) {
4703 int pid;
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004704
Dmitry Shmidtb96956e2015-10-28 10:45:04 -07004705 pid = savedcmd->map_cmdline_to_pid[i];
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004706 if (pid == -1 || pid == NO_CMDLINE_MAP)
4707 continue;
4708
Adrian Salidoa06ea262017-04-18 11:44:33 -07004709 pids[n] = pid;
4710 pids[n+1] = __find_tgid_locked(pid);
4711 n += 2;
4712 }
4713 arch_spin_unlock(&trace_cmdline_lock);
4714 preempt_enable();
4715
4716 if (n == 0) {
4717 kfree(pids);
4718 return 0;
4719 }
4720
4721 /* enough to hold max pair of pids + space, lr and nul */
4722 len = n * 12;
4723 file_buf = kmalloc(len, GFP_KERNEL);
4724 if (!file_buf) {
4725 kfree(pids);
4726 return -ENOMEM;
4727 }
4728
4729 buf = file_buf;
4730 for (i = 0; i < n && len > 0; i += 2) {
4731 int r;
4732
4733 r = snprintf(buf, len, "%d %d\n", pids[i], pids[i+1]);
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004734 buf += r;
Adrian Salidoa06ea262017-04-18 11:44:33 -07004735 len -= r;
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004736 }
4737
4738 len = simple_read_from_buffer(ubuf, cnt, ppos,
Adrian Salidoa06ea262017-04-18 11:44:33 -07004739 file_buf, buf - file_buf);
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004740
4741 kfree(file_buf);
Adrian Salidoa06ea262017-04-18 11:44:33 -07004742 kfree(pids);
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08004743
4744 return len;
4745}
4746
4747static const struct file_operations tracing_saved_tgids_fops = {
4748 .open = tracing_open_generic,
4749 .read = tracing_saved_tgids_read,
4750 .llseek = generic_file_llseek,
4751};
4752
4753static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004754tracing_set_trace_read(struct file *filp, char __user *ubuf,
4755 size_t cnt, loff_t *ppos)
4756{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004757 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004758 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004759 int r;
4760
4761 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004762 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004763 mutex_unlock(&trace_types_lock);
4764
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004765 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004766}
4767
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004768int tracer_init(struct tracer *t, struct trace_array *tr)
4769{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004770 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004771 return t->init(tr);
4772}
4773
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004774static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004775{
4776 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004777
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004778 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004779 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004780}
4781
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004782#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004783/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004784static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4785 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004786{
4787 int cpu, ret = 0;
4788
4789 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4790 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004791 ret = ring_buffer_resize(trace_buf->buffer,
4792 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004793 if (ret < 0)
4794 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004795 per_cpu_ptr(trace_buf->data, cpu)->entries =
4796 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004797 }
4798 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004799 ret = ring_buffer_resize(trace_buf->buffer,
4800 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004801 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004802 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4803 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004804 }
4805
4806 return ret;
4807}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004808#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004809
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004810static int __tracing_resize_ring_buffer(struct trace_array *tr,
4811 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004812{
4813 int ret;
4814
4815 /*
4816 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004817 * we use the size that was given, and we can forget about
4818 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004819 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004820 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004821
Steven Rostedtb382ede62012-10-10 21:44:34 -04004822 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004823 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004824 return 0;
4825
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004826 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004827 if (ret < 0)
4828 return ret;
4829
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004830#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004831 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4832 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004833 goto out;
4834
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004835 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004836 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004837 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4838 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004839 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004840 /*
4841 * AARGH! We are left with different
4842 * size max buffer!!!!
4843 * The max buffer is our "snapshot" buffer.
4844 * When a tracer needs a snapshot (one of the
4845 * latency tracers), it swaps the max buffer
4846 * with the saved snap shot. We succeeded to
4847 * update the size of the main buffer, but failed to
4848 * update the size of the max buffer. But when we tried
4849 * to reset the main buffer to the original size, we
4850 * failed there too. This is very unlikely to
4851 * happen, but if it does, warn and kill all
4852 * tracing.
4853 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004854 WARN_ON(1);
4855 tracing_disabled = 1;
4856 }
4857 return ret;
4858 }
4859
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004860 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004861 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004862 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004863 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004864
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004865 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004866#endif /* CONFIG_TRACER_MAX_TRACE */
4867
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004868 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004869 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004870 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004871 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004872
4873 return ret;
4874}
4875
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004876static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4877 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004878{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004879 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004880
4881 mutex_lock(&trace_types_lock);
4882
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004883 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4884 /* make sure, this cpu is enabled in the mask */
4885 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4886 ret = -EINVAL;
4887 goto out;
4888 }
4889 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004890
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004891 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004892 if (ret < 0)
4893 ret = -ENOMEM;
4894
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004895out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004896 mutex_unlock(&trace_types_lock);
4897
4898 return ret;
4899}
4900
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004901
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004902/**
4903 * tracing_update_buffers - used by tracing facility to expand ring buffers
4904 *
4905 * To save on memory when the tracing is never used on a system with it
4906 * configured in. The ring buffers are set to a minimum size. But once
4907 * a user starts to use the tracing facility, then they need to grow
4908 * to their default size.
4909 *
4910 * This function is to be called when a tracer is about to be used.
4911 */
4912int tracing_update_buffers(void)
4913{
4914 int ret = 0;
4915
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004916 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004917 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004918 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004919 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004920 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004921
4922 return ret;
4923}
4924
Steven Rostedt577b7852009-02-26 23:43:05 -05004925struct trace_option_dentry;
4926
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004927static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004928create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004929
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004930/*
4931 * Used to clear out the tracer before deletion of an instance.
4932 * Must have trace_types_lock held.
4933 */
4934static void tracing_set_nop(struct trace_array *tr)
4935{
4936 if (tr->current_trace == &nop_trace)
4937 return;
4938
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004939 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004940
4941 if (tr->current_trace->reset)
4942 tr->current_trace->reset(tr);
4943
4944 tr->current_trace = &nop_trace;
4945}
4946
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004947static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004948{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004949 /* Only enable if the directory has been created already. */
4950 if (!tr->dir)
4951 return;
4952
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004953 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004954}
4955
4956static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4957{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004958 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004959#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004960 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004961#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004962 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004963
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004964 mutex_lock(&trace_types_lock);
4965
Steven Rostedt73c51622009-03-11 13:42:01 -04004966 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004967 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004968 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004969 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004970 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004971 ret = 0;
4972 }
4973
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004974 for (t = trace_types; t; t = t->next) {
4975 if (strcmp(t->name, buf) == 0)
4976 break;
4977 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004978 if (!t) {
4979 ret = -EINVAL;
4980 goto out;
4981 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004982 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004983 goto out;
4984
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004985 /* Some tracers are only allowed for the top level buffer */
4986 if (!trace_ok_for_array(t, tr)) {
4987 ret = -EINVAL;
4988 goto out;
4989 }
4990
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004991 /* If trace pipe files are being read, we can't change the tracer */
4992 if (tr->current_trace->ref) {
4993 ret = -EBUSY;
4994 goto out;
4995 }
4996
Steven Rostedt9f029e82008-11-12 15:24:24 -05004997 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004998
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004999 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005000
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005001 if (tr->current_trace->reset)
5002 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005003
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005004 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005005 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005006
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005007#ifdef CONFIG_TRACER_MAX_TRACE
5008 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005009
5010 if (had_max_tr && !t->use_max_tr) {
5011 /*
5012 * We need to make sure that the update_max_tr sees that
5013 * current_trace changed to nop_trace to keep it from
5014 * swapping the buffers after we resize it.
5015 * The update_max_tr is called from interrupts disabled
5016 * so a synchronized_sched() is sufficient.
5017 */
5018 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005019 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005020 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005021#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005022
5023#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005024 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005025 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005026 if (ret < 0)
5027 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005028 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005029#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005030
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005031 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005032 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005033 if (ret)
5034 goto out;
5035 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005036
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005037 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005038 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005039 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005040 out:
5041 mutex_unlock(&trace_types_lock);
5042
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005043 return ret;
5044}
5045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005046static ssize_t
5047tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5048 size_t cnt, loff_t *ppos)
5049{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005050 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005051 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005052 int i;
5053 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005054 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005055
Steven Rostedt60063a62008-10-28 10:44:24 -04005056 ret = cnt;
5057
Li Zefanee6c2c12009-09-18 14:06:47 +08005058 if (cnt > MAX_TRACER_SIZE)
5059 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005060
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005061 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005062 return -EFAULT;
5063
5064 buf[cnt] = 0;
5065
5066 /* strip ending whitespace. */
5067 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5068 buf[i] = 0;
5069
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005070 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005071 if (err)
5072 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005073
Jiri Olsacf8517c2009-10-23 19:36:16 -04005074 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005075
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005076 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005077}
5078
5079static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005080tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5081 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005082{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005083 char buf[64];
5084 int r;
5085
Steven Rostedtcffae432008-05-12 21:21:00 +02005086 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005087 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005088 if (r > sizeof(buf))
5089 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005090 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005091}
5092
5093static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005094tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5095 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005096{
Hannes Eder5e398412009-02-10 19:44:34 +01005097 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005098 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005099
Peter Huewe22fe9b52011-06-07 21:58:27 +02005100 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5101 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005102 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005103
5104 *ptr = val * 1000;
5105
5106 return cnt;
5107}
5108
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005109static ssize_t
5110tracing_thresh_read(struct file *filp, char __user *ubuf,
5111 size_t cnt, loff_t *ppos)
5112{
5113 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5114}
5115
5116static ssize_t
5117tracing_thresh_write(struct file *filp, const char __user *ubuf,
5118 size_t cnt, loff_t *ppos)
5119{
5120 struct trace_array *tr = filp->private_data;
5121 int ret;
5122
5123 mutex_lock(&trace_types_lock);
5124 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5125 if (ret < 0)
5126 goto out;
5127
5128 if (tr->current_trace->update_thresh) {
5129 ret = tr->current_trace->update_thresh(tr);
5130 if (ret < 0)
5131 goto out;
5132 }
5133
5134 ret = cnt;
5135out:
5136 mutex_unlock(&trace_types_lock);
5137
5138 return ret;
5139}
5140
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005141#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005142
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005143static ssize_t
5144tracing_max_lat_read(struct file *filp, char __user *ubuf,
5145 size_t cnt, loff_t *ppos)
5146{
5147 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5148}
5149
5150static ssize_t
5151tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5152 size_t cnt, loff_t *ppos)
5153{
5154 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5155}
5156
Chen Gange428abb2015-11-10 05:15:15 +08005157#endif
5158
Steven Rostedtb3806b42008-05-12 21:20:46 +02005159static int tracing_open_pipe(struct inode *inode, struct file *filp)
5160{
Oleg Nesterov15544202013-07-23 17:25:57 +02005161 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005162 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005163 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005164
5165 if (tracing_disabled)
5166 return -ENODEV;
5167
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005168 if (trace_array_get(tr) < 0)
5169 return -ENODEV;
5170
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005171 mutex_lock(&trace_types_lock);
5172
Steven Rostedtb3806b42008-05-12 21:20:46 +02005173 /* create a buffer to store the information to pass to userspace */
5174 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005175 if (!iter) {
5176 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005177 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005178 goto out;
5179 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005180
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005181 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005182 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005183
5184 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5185 ret = -ENOMEM;
5186 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305187 }
5188
Steven Rostedta3097202008-11-07 22:36:02 -05005189 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305190 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005191
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005192 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005193 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5194
David Sharp8be07092012-11-13 12:18:22 -08005195 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005196 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005197 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5198
Oleg Nesterov15544202013-07-23 17:25:57 +02005199 iter->tr = tr;
5200 iter->trace_buffer = &tr->trace_buffer;
5201 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005202 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005203 filp->private_data = iter;
5204
Steven Rostedt107bad82008-05-12 21:21:01 +02005205 if (iter->trace->pipe_open)
5206 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005207
Arnd Bergmannb4447862010-07-07 23:40:11 +02005208 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005209
5210 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005211out:
5212 mutex_unlock(&trace_types_lock);
5213 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005214
5215fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005216 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005217 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005218 mutex_unlock(&trace_types_lock);
5219 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005220}
5221
5222static int tracing_release_pipe(struct inode *inode, struct file *file)
5223{
5224 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005225 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005226
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005227 mutex_lock(&trace_types_lock);
5228
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005229 tr->current_trace->ref--;
5230
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005231 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005232 iter->trace->pipe_close(iter);
5233
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005234 mutex_unlock(&trace_types_lock);
5235
Rusty Russell44623442009-01-01 10:12:23 +10305236 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005237 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005238 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005239
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005240 trace_array_put(tr);
5241
Steven Rostedtb3806b42008-05-12 21:20:46 +02005242 return 0;
5243}
5244
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005245static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005246trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005247{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005248 struct trace_array *tr = iter->tr;
5249
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005250 /* Iterators are static, they should be filled or empty */
5251 if (trace_buffer_iter(iter, iter->cpu_file))
5252 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005253
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005254 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005255 /*
5256 * Always select as readable when in blocking mode
5257 */
5258 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005259 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005260 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005261 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005262}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005263
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005264static unsigned int
5265tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5266{
5267 struct trace_iterator *iter = filp->private_data;
5268
5269 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005270}
5271
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005272/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005273static int tracing_wait_pipe(struct file *filp)
5274{
5275 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005276 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005277
5278 while (trace_empty(iter)) {
5279
5280 if ((filp->f_flags & O_NONBLOCK)) {
5281 return -EAGAIN;
5282 }
5283
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005284 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005285 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005286 * We still block if tracing is disabled, but we have never
5287 * read anything. This allows a user to cat this file, and
5288 * then enable tracing. But after we have read something,
5289 * we give an EOF when tracing is again disabled.
5290 *
5291 * iter->pos will be 0 if we haven't read anything.
5292 */
Tahsin Erdogan97d402e2017-09-17 03:23:48 -07005293 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005294 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005295
5296 mutex_unlock(&iter->mutex);
5297
Rabin Vincente30f53a2014-11-10 19:46:34 +01005298 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005299
5300 mutex_lock(&iter->mutex);
5301
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005302 if (ret)
5303 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005304 }
5305
5306 return 1;
5307}
5308
Steven Rostedtb3806b42008-05-12 21:20:46 +02005309/*
5310 * Consumer reader.
5311 */
5312static ssize_t
5313tracing_read_pipe(struct file *filp, char __user *ubuf,
5314 size_t cnt, loff_t *ppos)
5315{
5316 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005317 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005318
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005319 /*
5320 * Avoid more than one consumer on a single file descriptor
5321 * This is just a matter of traces coherency, the ring buffer itself
5322 * is protected.
5323 */
5324 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005325
5326 /* return any leftover data */
5327 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5328 if (sret != -EBUSY)
5329 goto out;
5330
5331 trace_seq_init(&iter->seq);
5332
Steven Rostedt107bad82008-05-12 21:21:01 +02005333 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005334 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5335 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005336 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005337 }
5338
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005339waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005340 sret = tracing_wait_pipe(filp);
5341 if (sret <= 0)
5342 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005343
5344 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005345 if (trace_empty(iter)) {
5346 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005347 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005348 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005349
5350 if (cnt >= PAGE_SIZE)
5351 cnt = PAGE_SIZE - 1;
5352
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005353 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005354 memset(&iter->seq, 0,
5355 sizeof(struct trace_iterator) -
5356 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005357 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005358 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005359
Lai Jiangshan4f535962009-05-18 19:35:34 +08005360 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005361 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005362 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005363 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005364 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005365
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005366 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005367 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005368 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005369 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005370 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005371 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005372 if (ret != TRACE_TYPE_NO_CONSUME)
5373 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005374
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005375 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005376 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005377
5378 /*
5379 * Setting the full flag means we reached the trace_seq buffer
5380 * size and we should leave by partial output condition above.
5381 * One of the trace_seq_* functions is not used properly.
5382 */
5383 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5384 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005385 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005386 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005387 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005388
Steven Rostedtb3806b42008-05-12 21:20:46 +02005389 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005390 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005391 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005392 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005393
5394 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005395 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005396 * entries, go back to wait for more entries.
5397 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005398 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005399 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005400
Steven Rostedt107bad82008-05-12 21:21:01 +02005401out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005402 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005403
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005404 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005405}
5406
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005407static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5408 unsigned int idx)
5409{
5410 __free_page(spd->pages[idx]);
5411}
5412
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005413static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005414 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005415 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005416 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005417 .steal = generic_pipe_buf_steal,
5418 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005419};
5420
Steven Rostedt34cd4992009-02-09 12:06:29 -05005421static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005422tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005423{
5424 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005425 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005426 int ret;
5427
5428 /* Seq buffer is page-sized, exactly what we need. */
5429 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005430 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005431 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005432
5433 if (trace_seq_has_overflowed(&iter->seq)) {
5434 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005435 break;
5436 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005437
5438 /*
5439 * This should not be hit, because it should only
5440 * be set if the iter->seq overflowed. But check it
5441 * anyway to be safe.
5442 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005443 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005444 iter->seq.seq.len = save_len;
5445 break;
5446 }
5447
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005448 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005449 if (rem < count) {
5450 rem = 0;
5451 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005452 break;
5453 }
5454
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005455 if (ret != TRACE_TYPE_NO_CONSUME)
5456 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005457 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005458 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005459 rem = 0;
5460 iter->ent = NULL;
5461 break;
5462 }
5463 }
5464
5465 return rem;
5466}
5467
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005468static ssize_t tracing_splice_read_pipe(struct file *filp,
5469 loff_t *ppos,
5470 struct pipe_inode_info *pipe,
5471 size_t len,
5472 unsigned int flags)
5473{
Jens Axboe35f3d142010-05-20 10:43:18 +02005474 struct page *pages_def[PIPE_DEF_BUFFERS];
5475 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005476 struct trace_iterator *iter = filp->private_data;
5477 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005478 .pages = pages_def,
5479 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005480 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005481 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005482 .flags = flags,
5483 .ops = &tracing_pipe_buf_ops,
5484 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005485 };
5486 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005487 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005488 unsigned int i;
5489
Jens Axboe35f3d142010-05-20 10:43:18 +02005490 if (splice_grow_spd(pipe, &spd))
5491 return -ENOMEM;
5492
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005493 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005494
5495 if (iter->trace->splice_read) {
5496 ret = iter->trace->splice_read(iter, filp,
5497 ppos, pipe, len, flags);
5498 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005499 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005500 }
5501
5502 ret = tracing_wait_pipe(filp);
5503 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005504 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005505
Jason Wessel955b61e2010-08-05 09:22:23 -05005506 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005507 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005508 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005509 }
5510
Lai Jiangshan4f535962009-05-18 19:35:34 +08005511 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005512 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005513
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005514 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005515 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005516 spd.pages[i] = alloc_page(GFP_KERNEL);
5517 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005518 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005519
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005520 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005521
5522 /* Copy the data into the page, so we can start over. */
5523 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005524 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005525 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005526 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005527 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005528 break;
5529 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005530 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005531 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005532
Steven Rostedtf9520752009-03-02 14:04:40 -05005533 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005534 }
5535
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005536 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005537 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005538 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005539
5540 spd.nr_pages = i;
5541
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005542 if (i)
5543 ret = splice_to_pipe(pipe, &spd);
5544 else
5545 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005546out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005547 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005548 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005549
Steven Rostedt34cd4992009-02-09 12:06:29 -05005550out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005551 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005552 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005553}
5554
Steven Rostedta98a3c32008-05-12 21:20:59 +02005555static ssize_t
5556tracing_entries_read(struct file *filp, char __user *ubuf,
5557 size_t cnt, loff_t *ppos)
5558{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005559 struct inode *inode = file_inode(filp);
5560 struct trace_array *tr = inode->i_private;
5561 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005562 char buf[64];
5563 int r = 0;
5564 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005565
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005566 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005567
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005568 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005569 int cpu, buf_size_same;
5570 unsigned long size;
5571
5572 size = 0;
5573 buf_size_same = 1;
5574 /* check if all cpu sizes are same */
5575 for_each_tracing_cpu(cpu) {
5576 /* fill in the size from first enabled cpu */
5577 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005578 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5579 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005580 buf_size_same = 0;
5581 break;
5582 }
5583 }
5584
5585 if (buf_size_same) {
5586 if (!ring_buffer_expanded)
5587 r = sprintf(buf, "%lu (expanded: %lu)\n",
5588 size >> 10,
5589 trace_buf_size >> 10);
5590 else
5591 r = sprintf(buf, "%lu\n", size >> 10);
5592 } else
5593 r = sprintf(buf, "X\n");
5594 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005595 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005596
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005597 mutex_unlock(&trace_types_lock);
5598
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005599 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5600 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005601}
5602
5603static ssize_t
5604tracing_entries_write(struct file *filp, const char __user *ubuf,
5605 size_t cnt, loff_t *ppos)
5606{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005607 struct inode *inode = file_inode(filp);
5608 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005609 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005610 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005611
Peter Huewe22fe9b52011-06-07 21:58:27 +02005612 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5613 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005614 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005615
5616 /* must have at least 1 entry */
5617 if (!val)
5618 return -EINVAL;
5619
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005620 /* value is in KB */
5621 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005622 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005623 if (ret < 0)
5624 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005625
Jiri Olsacf8517c2009-10-23 19:36:16 -04005626 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005627
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005628 return cnt;
5629}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005630
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005631static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005632tracing_total_entries_read(struct file *filp, char __user *ubuf,
5633 size_t cnt, loff_t *ppos)
5634{
5635 struct trace_array *tr = filp->private_data;
5636 char buf[64];
5637 int r, cpu;
5638 unsigned long size = 0, expanded_size = 0;
5639
5640 mutex_lock(&trace_types_lock);
5641 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005642 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005643 if (!ring_buffer_expanded)
5644 expanded_size += trace_buf_size >> 10;
5645 }
5646 if (ring_buffer_expanded)
5647 r = sprintf(buf, "%lu\n", size);
5648 else
5649 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5650 mutex_unlock(&trace_types_lock);
5651
5652 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5653}
5654
5655static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005656tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5657 size_t cnt, loff_t *ppos)
5658{
5659 /*
5660 * There is no need to read what the user has written, this function
5661 * is just to make sure that there is no error when "echo" is used
5662 */
5663
5664 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005665
5666 return cnt;
5667}
5668
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005669static int
5670tracing_free_buffer_release(struct inode *inode, struct file *filp)
5671{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005672 struct trace_array *tr = inode->i_private;
5673
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005674 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005675 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005676 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005677 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005678 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005679
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005680 trace_array_put(tr);
5681
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005682 return 0;
5683}
5684
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005685static ssize_t
5686tracing_mark_write(struct file *filp, const char __user *ubuf,
5687 size_t cnt, loff_t *fpos)
5688{
Steven Rostedtd696b582011-09-22 11:50:27 -04005689 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005690 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005691 struct ring_buffer_event *event;
5692 struct ring_buffer *buffer;
5693 struct print_entry *entry;
5694 unsigned long irq_flags;
5695 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005696 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005697 int nr_pages = 1;
5698 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005699 int offset;
5700 int size;
5701 int len;
5702 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005703 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005704
Steven Rostedtc76f0692008-11-07 22:36:02 -05005705 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005706 return -EINVAL;
5707
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005708 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005709 return -EINVAL;
5710
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005711 if (cnt > TRACE_BUF_SIZE)
5712 cnt = TRACE_BUF_SIZE;
5713
Steven Rostedtd696b582011-09-22 11:50:27 -04005714 /*
5715 * Userspace is injecting traces into the kernel trace buffer.
5716 * We want to be as non intrusive as possible.
5717 * To do so, we do not want to allocate any special buffers
5718 * or take any locks, but instead write the userspace data
5719 * straight into the ring buffer.
5720 *
5721 * First we need to pin the userspace buffer into memory,
5722 * which, most likely it is, because it just referenced it.
5723 * But there's no guarantee that it is. By using get_user_pages_fast()
5724 * and kmap_atomic/kunmap_atomic() we can get access to the
5725 * pages directly. We then write the data directly into the
5726 * ring buffer.
5727 */
5728 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005729
Steven Rostedtd696b582011-09-22 11:50:27 -04005730 /* check if we cross pages */
5731 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5732 nr_pages = 2;
5733
5734 offset = addr & (PAGE_SIZE - 1);
5735 addr &= PAGE_MASK;
5736
5737 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5738 if (ret < nr_pages) {
5739 while (--ret >= 0)
5740 put_page(pages[ret]);
5741 written = -EFAULT;
5742 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005743 }
5744
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005745 for (i = 0; i < nr_pages; i++)
5746 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005747
5748 local_save_flags(irq_flags);
5749 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005750 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005751 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5752 irq_flags, preempt_count());
5753 if (!event) {
5754 /* Ring buffer disabled, return as if not open for write */
5755 written = -EBADF;
5756 goto out_unlock;
5757 }
5758
5759 entry = ring_buffer_event_data(event);
5760 entry->ip = _THIS_IP_;
5761
5762 if (nr_pages == 2) {
5763 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005764 memcpy(&entry->buf, map_page[0] + offset, len);
5765 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005766 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005767 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005768
5769 if (entry->buf[cnt - 1] != '\n') {
5770 entry->buf[cnt] = '\n';
5771 entry->buf[cnt + 1] = '\0';
5772 } else
5773 entry->buf[cnt] = '\0';
5774
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005775 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005776
5777 written = cnt;
5778
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005779 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005780
Steven Rostedtd696b582011-09-22 11:50:27 -04005781 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005782 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005783 kunmap_atomic(map_page[i]);
5784 put_page(pages[i]);
5785 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005786 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005787 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005788}
5789
Li Zefan13f16d22009-12-08 11:16:11 +08005790static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005791{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005792 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005793 int i;
5794
5795 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005796 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005797 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005798 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5799 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005800 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005801
Li Zefan13f16d22009-12-08 11:16:11 +08005802 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005803}
5804
Steven Rostedte1e232c2014-02-10 23:38:46 -05005805static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005806{
Zhaolei5079f322009-08-25 16:12:56 +08005807 int i;
5808
Zhaolei5079f322009-08-25 16:12:56 +08005809 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5810 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5811 break;
5812 }
5813 if (i == ARRAY_SIZE(trace_clocks))
5814 return -EINVAL;
5815
Zhaolei5079f322009-08-25 16:12:56 +08005816 mutex_lock(&trace_types_lock);
5817
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005818 tr->clock_id = i;
5819
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005820 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005821
David Sharp60303ed2012-10-11 16:27:52 -07005822 /*
5823 * New clock may not be consistent with the previous clock.
5824 * Reset the buffer so that it doesn't have incomparable timestamps.
5825 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005826 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005827
5828#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liucf0523362017-09-05 16:57:19 -05005829 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005830 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005831 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005832#endif
David Sharp60303ed2012-10-11 16:27:52 -07005833
Zhaolei5079f322009-08-25 16:12:56 +08005834 mutex_unlock(&trace_types_lock);
5835
Steven Rostedte1e232c2014-02-10 23:38:46 -05005836 return 0;
5837}
5838
5839static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5840 size_t cnt, loff_t *fpos)
5841{
5842 struct seq_file *m = filp->private_data;
5843 struct trace_array *tr = m->private;
5844 char buf[64];
5845 const char *clockstr;
5846 int ret;
5847
5848 if (cnt >= sizeof(buf))
5849 return -EINVAL;
5850
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005851 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05005852 return -EFAULT;
5853
5854 buf[cnt] = 0;
5855
5856 clockstr = strstrip(buf);
5857
5858 ret = tracing_set_clock(tr, clockstr);
5859 if (ret)
5860 return ret;
5861
Zhaolei5079f322009-08-25 16:12:56 +08005862 *fpos += cnt;
5863
5864 return cnt;
5865}
5866
Li Zefan13f16d22009-12-08 11:16:11 +08005867static int tracing_clock_open(struct inode *inode, struct file *file)
5868{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005869 struct trace_array *tr = inode->i_private;
5870 int ret;
5871
Li Zefan13f16d22009-12-08 11:16:11 +08005872 if (tracing_disabled)
5873 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005874
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005875 if (trace_array_get(tr))
5876 return -ENODEV;
5877
5878 ret = single_open(file, tracing_clock_show, inode->i_private);
5879 if (ret < 0)
5880 trace_array_put(tr);
5881
5882 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005883}
5884
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005885struct ftrace_buffer_info {
5886 struct trace_iterator iter;
5887 void *spare;
5888 unsigned int read;
5889};
5890
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005891#ifdef CONFIG_TRACER_SNAPSHOT
5892static int tracing_snapshot_open(struct inode *inode, struct file *file)
5893{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005894 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005895 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005896 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005897 int ret = 0;
5898
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005899 if (trace_array_get(tr) < 0)
5900 return -ENODEV;
5901
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005902 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005903 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005904 if (IS_ERR(iter))
5905 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005906 } else {
5907 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005908 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005909 m = kzalloc(sizeof(*m), GFP_KERNEL);
5910 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005911 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005912 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5913 if (!iter) {
5914 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005915 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005916 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005917 ret = 0;
5918
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005919 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005920 iter->trace_buffer = &tr->max_buffer;
5921 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005922 m->private = iter;
5923 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005924 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005925out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005926 if (ret < 0)
5927 trace_array_put(tr);
5928
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005929 return ret;
5930}
5931
5932static ssize_t
5933tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5934 loff_t *ppos)
5935{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005936 struct seq_file *m = filp->private_data;
5937 struct trace_iterator *iter = m->private;
5938 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005939 unsigned long val;
5940 int ret;
5941
5942 ret = tracing_update_buffers();
5943 if (ret < 0)
5944 return ret;
5945
5946 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5947 if (ret)
5948 return ret;
5949
5950 mutex_lock(&trace_types_lock);
5951
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005952 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005953 ret = -EBUSY;
5954 goto out;
5955 }
5956
5957 switch (val) {
5958 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005959 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5960 ret = -EINVAL;
5961 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005962 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005963 if (tr->allocated_snapshot)
5964 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005965 break;
5966 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005967/* Only allow per-cpu swap if the ring buffer supports it */
5968#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5969 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5970 ret = -EINVAL;
5971 break;
5972 }
5973#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005974 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005975 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005976 if (ret < 0)
5977 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005978 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005979 local_irq_disable();
5980 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005981 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005982 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005983 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005984 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005985 local_irq_enable();
5986 break;
5987 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005988 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005989 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5990 tracing_reset_online_cpus(&tr->max_buffer);
5991 else
5992 tracing_reset(&tr->max_buffer, iter->cpu_file);
5993 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005994 break;
5995 }
5996
5997 if (ret >= 0) {
5998 *ppos += cnt;
5999 ret = cnt;
6000 }
6001out:
6002 mutex_unlock(&trace_types_lock);
6003 return ret;
6004}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006005
6006static int tracing_snapshot_release(struct inode *inode, struct file *file)
6007{
6008 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006009 int ret;
6010
6011 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006012
6013 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006014 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006015
6016 /* If write only, the seq_file is just a stub */
6017 if (m)
6018 kfree(m->private);
6019 kfree(m);
6020
6021 return 0;
6022}
6023
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006024static int tracing_buffers_open(struct inode *inode, struct file *filp);
6025static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6026 size_t count, loff_t *ppos);
6027static int tracing_buffers_release(struct inode *inode, struct file *file);
6028static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6029 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6030
6031static int snapshot_raw_open(struct inode *inode, struct file *filp)
6032{
6033 struct ftrace_buffer_info *info;
6034 int ret;
6035
6036 ret = tracing_buffers_open(inode, filp);
6037 if (ret < 0)
6038 return ret;
6039
6040 info = filp->private_data;
6041
6042 if (info->iter.trace->use_max_tr) {
6043 tracing_buffers_release(inode, filp);
6044 return -EBUSY;
6045 }
6046
6047 info->iter.snapshot = true;
6048 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6049
6050 return ret;
6051}
6052
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006053#endif /* CONFIG_TRACER_SNAPSHOT */
6054
6055
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006056static const struct file_operations tracing_thresh_fops = {
6057 .open = tracing_open_generic,
6058 .read = tracing_thresh_read,
6059 .write = tracing_thresh_write,
6060 .llseek = generic_file_llseek,
6061};
6062
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006063#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006064static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006065 .open = tracing_open_generic,
6066 .read = tracing_max_lat_read,
6067 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006068 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006069};
Chen Gange428abb2015-11-10 05:15:15 +08006070#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006071
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006072static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006073 .open = tracing_open_generic,
6074 .read = tracing_set_trace_read,
6075 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006076 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006077};
6078
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006079static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006080 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006081 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006082 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006083 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006084 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006085 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006086};
6087
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006088static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006089 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006090 .read = tracing_entries_read,
6091 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006092 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006093 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006094};
6095
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006096static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006097 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006098 .read = tracing_total_entries_read,
6099 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006100 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006101};
6102
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006103static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006104 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006105 .write = tracing_free_buffer_write,
6106 .release = tracing_free_buffer_release,
6107};
6108
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006109static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006110 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006111 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006112 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006113 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006114};
6115
Zhaolei5079f322009-08-25 16:12:56 +08006116static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006117 .open = tracing_clock_open,
6118 .read = seq_read,
6119 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006120 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006121 .write = tracing_clock_write,
6122};
6123
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006124#ifdef CONFIG_TRACER_SNAPSHOT
6125static const struct file_operations snapshot_fops = {
6126 .open = tracing_snapshot_open,
6127 .read = seq_read,
6128 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006129 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006130 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006131};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006132
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006133static const struct file_operations snapshot_raw_fops = {
6134 .open = snapshot_raw_open,
6135 .read = tracing_buffers_read,
6136 .release = tracing_buffers_release,
6137 .splice_read = tracing_buffers_splice_read,
6138 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006139};
6140
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006141#endif /* CONFIG_TRACER_SNAPSHOT */
6142
Steven Rostedt2cadf912008-12-01 22:20:19 -05006143static int tracing_buffers_open(struct inode *inode, struct file *filp)
6144{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006145 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006146 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006147 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006148
6149 if (tracing_disabled)
6150 return -ENODEV;
6151
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006152 if (trace_array_get(tr) < 0)
6153 return -ENODEV;
6154
Steven Rostedt2cadf912008-12-01 22:20:19 -05006155 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006156 if (!info) {
6157 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006158 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006159 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006160
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006161 mutex_lock(&trace_types_lock);
6162
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006163 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006164 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006165 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006166 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006167 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006168 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006169 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006170
6171 filp->private_data = info;
6172
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006173 tr->current_trace->ref++;
6174
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006175 mutex_unlock(&trace_types_lock);
6176
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006177 ret = nonseekable_open(inode, filp);
6178 if (ret < 0)
6179 trace_array_put(tr);
6180
6181 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006182}
6183
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006184static unsigned int
6185tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6186{
6187 struct ftrace_buffer_info *info = filp->private_data;
6188 struct trace_iterator *iter = &info->iter;
6189
6190 return trace_poll(iter, filp, poll_table);
6191}
6192
Steven Rostedt2cadf912008-12-01 22:20:19 -05006193static ssize_t
6194tracing_buffers_read(struct file *filp, char __user *ubuf,
6195 size_t count, loff_t *ppos)
6196{
6197 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006198 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006199 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006200 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006201
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006202 if (!count)
6203 return 0;
6204
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006205#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006206 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6207 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006208#endif
6209
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006210 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006211 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6212 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006213 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006214 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006215
Steven Rostedt2cadf912008-12-01 22:20:19 -05006216 /* Do we have previous read data to read? */
6217 if (info->read < PAGE_SIZE)
6218 goto read;
6219
Steven Rostedtb6273442013-02-28 13:44:11 -05006220 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006221 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006222 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006223 &info->spare,
6224 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006225 iter->cpu_file, 0);
6226 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006227
6228 if (ret < 0) {
6229 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006230 if ((filp->f_flags & O_NONBLOCK))
6231 return -EAGAIN;
6232
Rabin Vincente30f53a2014-11-10 19:46:34 +01006233 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006234 if (ret)
6235 return ret;
6236
Steven Rostedtb6273442013-02-28 13:44:11 -05006237 goto again;
6238 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006239 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006240 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006241
Steven Rostedt436fc282011-10-14 10:44:25 -04006242 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006243 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006244 size = PAGE_SIZE - info->read;
6245 if (size > count)
6246 size = count;
6247
6248 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006249 if (ret == size)
6250 return -EFAULT;
6251
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006252 size -= ret;
6253
Steven Rostedt2cadf912008-12-01 22:20:19 -05006254 *ppos += size;
6255 info->read += size;
6256
6257 return size;
6258}
6259
6260static int tracing_buffers_release(struct inode *inode, struct file *file)
6261{
6262 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006263 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006264
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006265 mutex_lock(&trace_types_lock);
6266
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006267 iter->tr->current_trace->ref--;
6268
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006269 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006270
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006271 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006272 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006273 kfree(info);
6274
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006275 mutex_unlock(&trace_types_lock);
6276
Steven Rostedt2cadf912008-12-01 22:20:19 -05006277 return 0;
6278}
6279
6280struct buffer_ref {
6281 struct ring_buffer *buffer;
6282 void *page;
6283 int ref;
6284};
6285
6286static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6287 struct pipe_buffer *buf)
6288{
6289 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6290
6291 if (--ref->ref)
6292 return;
6293
6294 ring_buffer_free_read_page(ref->buffer, ref->page);
6295 kfree(ref);
6296 buf->private = 0;
6297}
6298
Steven Rostedt2cadf912008-12-01 22:20:19 -05006299static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6300 struct pipe_buffer *buf)
6301{
6302 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6303
6304 ref->ref++;
6305}
6306
6307/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006308static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006309 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006310 .confirm = generic_pipe_buf_confirm,
6311 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006312 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006313 .get = buffer_pipe_buf_get,
6314};
6315
6316/*
6317 * Callback from splice_to_pipe(), if we need to release some pages
6318 * at the end of the spd in case we error'ed out in filling the pipe.
6319 */
6320static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6321{
6322 struct buffer_ref *ref =
6323 (struct buffer_ref *)spd->partial[i].private;
6324
6325 if (--ref->ref)
6326 return;
6327
6328 ring_buffer_free_read_page(ref->buffer, ref->page);
6329 kfree(ref);
6330 spd->partial[i].private = 0;
6331}
6332
6333static ssize_t
6334tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6335 struct pipe_inode_info *pipe, size_t len,
6336 unsigned int flags)
6337{
6338 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006339 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006340 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6341 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006342 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006343 .pages = pages_def,
6344 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006345 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006346 .flags = flags,
6347 .ops = &buffer_pipe_buf_ops,
6348 .spd_release = buffer_spd_release,
6349 };
6350 struct buffer_ref *ref;
Steven Rostedt (VMware)6edea152017-12-22 20:38:57 -05006351 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006352 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006353
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006354#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006355 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6356 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006357#endif
6358
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006359 if (*ppos & (PAGE_SIZE - 1))
6360 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006361
6362 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006363 if (len < PAGE_SIZE)
6364 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006365 len &= PAGE_MASK;
6366 }
6367
Al Viro1ae22932016-09-17 18:31:46 -04006368 if (splice_grow_spd(pipe, &spd))
6369 return -ENOMEM;
6370
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006371 again:
6372 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006373 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006374
Al Viroa786c062014-04-11 12:01:03 -04006375 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006376 struct page *page;
6377 int r;
6378
6379 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006380 if (!ref) {
6381 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006382 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006383 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006384
Steven Rostedt7267fa62009-04-29 00:16:21 -04006385 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006386 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006387 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006388 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006389 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006390 kfree(ref);
6391 break;
6392 }
6393
6394 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006395 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006396 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07006397 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006398 kfree(ref);
6399 break;
6400 }
6401
Steven Rostedt2cadf912008-12-01 22:20:19 -05006402 page = virt_to_page(ref->page);
6403
6404 spd.pages[i] = page;
6405 spd.partial[i].len = PAGE_SIZE;
6406 spd.partial[i].offset = 0;
6407 spd.partial[i].private = (unsigned long)ref;
6408 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006409 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006410
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006411 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006412 }
6413
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006414 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006415 spd.nr_pages = i;
6416
6417 /* did we read anything? */
6418 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006419 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006420 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006421
Al Viro1ae22932016-09-17 18:31:46 -04006422 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006423 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006424 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006425
Rabin Vincente30f53a2014-11-10 19:46:34 +01006426 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006427 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006428 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006429
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006430 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006431 }
6432
6433 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006434out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006435 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006436
Steven Rostedt2cadf912008-12-01 22:20:19 -05006437 return ret;
6438}
6439
6440static const struct file_operations tracing_buffers_fops = {
6441 .open = tracing_buffers_open,
6442 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006443 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006444 .release = tracing_buffers_release,
6445 .splice_read = tracing_buffers_splice_read,
6446 .llseek = no_llseek,
6447};
6448
Steven Rostedtc8d77182009-04-29 18:03:45 -04006449static ssize_t
6450tracing_stats_read(struct file *filp, char __user *ubuf,
6451 size_t count, loff_t *ppos)
6452{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006453 struct inode *inode = file_inode(filp);
6454 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006455 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006456 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006457 struct trace_seq *s;
6458 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006459 unsigned long long t;
6460 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006461
Li Zefane4f2d102009-06-15 10:57:28 +08006462 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006463 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006464 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006465
6466 trace_seq_init(s);
6467
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006468 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006469 trace_seq_printf(s, "entries: %ld\n", cnt);
6470
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006471 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006472 trace_seq_printf(s, "overrun: %ld\n", cnt);
6473
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006474 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006475 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6476
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006477 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006478 trace_seq_printf(s, "bytes: %ld\n", cnt);
6479
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006480 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006481 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006482 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006483 usec_rem = do_div(t, USEC_PER_SEC);
6484 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6485 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006486
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006487 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006488 usec_rem = do_div(t, USEC_PER_SEC);
6489 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6490 } else {
6491 /* counter or tsc mode for trace_clock */
6492 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006493 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006494
6495 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006496 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006497 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006498
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006499 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006500 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6501
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006502 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006503 trace_seq_printf(s, "read events: %ld\n", cnt);
6504
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006505 count = simple_read_from_buffer(ubuf, count, ppos,
6506 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006507
6508 kfree(s);
6509
6510 return count;
6511}
6512
6513static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006514 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006515 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006516 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006517 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006518};
6519
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006520#ifdef CONFIG_DYNAMIC_FTRACE
6521
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006522int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006523{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006524 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006525}
6526
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006527static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006528tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006529 size_t cnt, loff_t *ppos)
6530{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006531 static char ftrace_dyn_info_buffer[1024];
6532 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006533 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006534 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006535 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006536 int r;
6537
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006538 mutex_lock(&dyn_info_mutex);
6539 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006540
Steven Rostedta26a2a22008-10-31 00:03:22 -04006541 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006542 buf[r++] = '\n';
6543
6544 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6545
6546 mutex_unlock(&dyn_info_mutex);
6547
6548 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006549}
6550
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006551static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006552 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006553 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006554 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006555};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006556#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006557
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006558#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6559static void
6560ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006561{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006562 tracing_snapshot();
6563}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006564
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006565static void
6566ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6567{
6568 unsigned long *count = (long *)data;
6569
6570 if (!*count)
6571 return;
6572
6573 if (*count != -1)
6574 (*count)--;
6575
6576 tracing_snapshot();
6577}
6578
6579static int
6580ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6581 struct ftrace_probe_ops *ops, void *data)
6582{
6583 long count = (long)data;
6584
6585 seq_printf(m, "%ps:", (void *)ip);
6586
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006587 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006588
6589 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006590 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006591 else
6592 seq_printf(m, ":count=%ld\n", count);
6593
6594 return 0;
6595}
6596
6597static struct ftrace_probe_ops snapshot_probe_ops = {
6598 .func = ftrace_snapshot,
6599 .print = ftrace_snapshot_print,
6600};
6601
6602static struct ftrace_probe_ops snapshot_count_probe_ops = {
6603 .func = ftrace_count_snapshot,
6604 .print = ftrace_snapshot_print,
6605};
6606
6607static int
6608ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6609 char *glob, char *cmd, char *param, int enable)
6610{
6611 struct ftrace_probe_ops *ops;
6612 void *count = (void *)-1;
6613 char *number;
6614 int ret;
6615
6616 /* hash funcs only work with set_ftrace_filter */
6617 if (!enable)
6618 return -EINVAL;
6619
6620 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6621
6622 if (glob[0] == '!') {
6623 unregister_ftrace_function_probe_func(glob+1, ops);
6624 return 0;
6625 }
6626
6627 if (!param)
6628 goto out_reg;
6629
6630 number = strsep(&param, ":");
6631
6632 if (!strlen(number))
6633 goto out_reg;
6634
6635 /*
6636 * We use the callback data field (which is a pointer)
6637 * as our counter.
6638 */
6639 ret = kstrtoul(number, 0, (unsigned long *)&count);
6640 if (ret)
6641 return ret;
6642
6643 out_reg:
Steven Rostedt (VMware)d4decac2017-04-19 12:07:08 -04006644 ret = alloc_snapshot(&global_trace);
6645 if (ret < 0)
6646 goto out;
6647
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006648 ret = register_ftrace_function_probe(glob, ops, count);
6649
Steven Rostedt (VMware)d4decac2017-04-19 12:07:08 -04006650 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006651 return ret < 0 ? ret : 0;
6652}
6653
6654static struct ftrace_func_command ftrace_snapshot_cmd = {
6655 .name = "snapshot",
6656 .func = ftrace_trace_snapshot_callback,
6657};
6658
Tom Zanussi38de93a2013-10-24 08:34:18 -05006659static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006660{
6661 return register_ftrace_command(&ftrace_snapshot_cmd);
6662}
6663#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006664static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006665#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006666
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006667static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006668{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006669 if (WARN_ON(!tr->dir))
6670 return ERR_PTR(-ENODEV);
6671
6672 /* Top directory uses NULL as the parent */
6673 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6674 return NULL;
6675
6676 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006677 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006678}
6679
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006680static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6681{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006682 struct dentry *d_tracer;
6683
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006684 if (tr->percpu_dir)
6685 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006686
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006687 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006688 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006689 return NULL;
6690
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006691 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006692
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006693 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006694 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006695
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006696 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006697}
6698
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006699static struct dentry *
6700trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6701 void *data, long cpu, const struct file_operations *fops)
6702{
6703 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6704
6705 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006706 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006707 return ret;
6708}
6709
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006710static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006711tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006712{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006713 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006714 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006715 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006716
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006717 if (!d_percpu)
6718 return;
6719
Steven Rostedtdd49a382010-10-20 21:51:26 -04006720 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006721 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006722 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006723 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006724 return;
6725 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006726
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006727 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006728 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006729 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006730
6731 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006732 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006733 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006734
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006735 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006736 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006737
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006738 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006739 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006740
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006741 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006742 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006743
6744#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006745 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006746 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006747
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006748 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006749 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006750#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006751}
6752
Steven Rostedt60a11772008-05-12 21:20:44 +02006753#ifdef CONFIG_FTRACE_SELFTEST
6754/* Let selftest have access to static functions in this file */
6755#include "trace_selftest.c"
6756#endif
6757
Steven Rostedt577b7852009-02-26 23:43:05 -05006758static ssize_t
6759trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6760 loff_t *ppos)
6761{
6762 struct trace_option_dentry *topt = filp->private_data;
6763 char *buf;
6764
6765 if (topt->flags->val & topt->opt->bit)
6766 buf = "1\n";
6767 else
6768 buf = "0\n";
6769
6770 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6771}
6772
6773static ssize_t
6774trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6775 loff_t *ppos)
6776{
6777 struct trace_option_dentry *topt = filp->private_data;
6778 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006779 int ret;
6780
Peter Huewe22fe9b52011-06-07 21:58:27 +02006781 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6782 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006783 return ret;
6784
Li Zefan8d18eaa2009-12-08 11:17:06 +08006785 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006786 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006787
6788 if (!!(topt->flags->val & topt->opt->bit) != val) {
6789 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006790 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006791 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006792 mutex_unlock(&trace_types_lock);
6793 if (ret)
6794 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006795 }
6796
6797 *ppos += cnt;
6798
6799 return cnt;
6800}
6801
6802
6803static const struct file_operations trace_options_fops = {
6804 .open = tracing_open_generic,
6805 .read = trace_options_read,
6806 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006807 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006808};
6809
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006810/*
6811 * In order to pass in both the trace_array descriptor as well as the index
6812 * to the flag that the trace option file represents, the trace_array
6813 * has a character array of trace_flags_index[], which holds the index
6814 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6815 * The address of this character array is passed to the flag option file
6816 * read/write callbacks.
6817 *
6818 * In order to extract both the index and the trace_array descriptor,
6819 * get_tr_index() uses the following algorithm.
6820 *
6821 * idx = *ptr;
6822 *
6823 * As the pointer itself contains the address of the index (remember
6824 * index[1] == 1).
6825 *
6826 * Then to get the trace_array descriptor, by subtracting that index
6827 * from the ptr, we get to the start of the index itself.
6828 *
6829 * ptr - idx == &index[0]
6830 *
6831 * Then a simple container_of() from that pointer gets us to the
6832 * trace_array descriptor.
6833 */
6834static void get_tr_index(void *data, struct trace_array **ptr,
6835 unsigned int *pindex)
6836{
6837 *pindex = *(unsigned char *)data;
6838
6839 *ptr = container_of(data - *pindex, struct trace_array,
6840 trace_flags_index);
6841}
6842
Steven Rostedta8259072009-02-26 22:19:12 -05006843static ssize_t
6844trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6845 loff_t *ppos)
6846{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006847 void *tr_index = filp->private_data;
6848 struct trace_array *tr;
6849 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006850 char *buf;
6851
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006852 get_tr_index(tr_index, &tr, &index);
6853
6854 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006855 buf = "1\n";
6856 else
6857 buf = "0\n";
6858
6859 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6860}
6861
6862static ssize_t
6863trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6864 loff_t *ppos)
6865{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006866 void *tr_index = filp->private_data;
6867 struct trace_array *tr;
6868 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006869 unsigned long val;
6870 int ret;
6871
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006872 get_tr_index(tr_index, &tr, &index);
6873
Peter Huewe22fe9b52011-06-07 21:58:27 +02006874 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6875 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006876 return ret;
6877
Zhaoleif2d84b62009-08-07 18:55:48 +08006878 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006879 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006880
6881 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006882 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006883 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006884
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006885 if (ret < 0)
6886 return ret;
6887
Steven Rostedta8259072009-02-26 22:19:12 -05006888 *ppos += cnt;
6889
6890 return cnt;
6891}
6892
Steven Rostedta8259072009-02-26 22:19:12 -05006893static const struct file_operations trace_options_core_fops = {
6894 .open = tracing_open_generic,
6895 .read = trace_options_core_read,
6896 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006897 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006898};
6899
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006900struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006901 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006902 struct dentry *parent,
6903 void *data,
6904 const struct file_operations *fops)
6905{
6906 struct dentry *ret;
6907
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006908 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006909 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07006910 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006911
6912 return ret;
6913}
6914
6915
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006916static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006917{
6918 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006919
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006920 if (tr->options)
6921 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006922
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006923 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006924 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006925 return NULL;
6926
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006927 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006928 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006929 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006930 return NULL;
6931 }
6932
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006933 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006934}
6935
Steven Rostedt577b7852009-02-26 23:43:05 -05006936static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006937create_trace_option_file(struct trace_array *tr,
6938 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006939 struct tracer_flags *flags,
6940 struct tracer_opt *opt)
6941{
6942 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006943
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006944 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006945 if (!t_options)
6946 return;
6947
6948 topt->flags = flags;
6949 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006950 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006951
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006952 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006953 &trace_options_fops);
6954
Steven Rostedt577b7852009-02-26 23:43:05 -05006955}
6956
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006957static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006958create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006959{
6960 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006961 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05006962 struct tracer_flags *flags;
6963 struct tracer_opt *opts;
6964 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006965 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05006966
6967 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006968 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05006969
6970 flags = tracer->flags;
6971
6972 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006973 return;
6974
6975 /*
6976 * If this is an instance, only create flags for tracers
6977 * the instance may have.
6978 */
6979 if (!trace_ok_for_array(tracer, tr))
6980 return;
6981
6982 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08006983 /* Make sure there's no duplicate flags. */
6984 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006985 return;
6986 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006987
6988 opts = flags->opts;
6989
6990 for (cnt = 0; opts[cnt].name; cnt++)
6991 ;
6992
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006993 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006994 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006995 return;
6996
6997 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6998 GFP_KERNEL);
6999 if (!tr_topts) {
7000 kfree(topts);
7001 return;
7002 }
7003
7004 tr->topts = tr_topts;
7005 tr->topts[tr->nr_topts].tracer = tracer;
7006 tr->topts[tr->nr_topts].topts = topts;
7007 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007008
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007009 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007010 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007011 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007012 WARN_ONCE(topts[cnt].entry == NULL,
7013 "Failed to create trace option: %s",
7014 opts[cnt].name);
7015 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007016}
7017
Steven Rostedta8259072009-02-26 22:19:12 -05007018static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007019create_trace_option_core_file(struct trace_array *tr,
7020 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007021{
7022 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007023
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007024 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007025 if (!t_options)
7026 return NULL;
7027
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007028 return trace_create_file(option, 0644, t_options,
7029 (void *)&tr->trace_flags_index[index],
7030 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007031}
7032
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007033static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007034{
7035 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007036 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007037 int i;
7038
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007039 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007040 if (!t_options)
7041 return;
7042
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007043 for (i = 0; trace_options[i]; i++) {
7044 if (top_level ||
7045 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7046 create_trace_option_core_file(tr, trace_options[i], i);
7047 }
Steven Rostedta8259072009-02-26 22:19:12 -05007048}
7049
Steven Rostedt499e5472012-02-22 15:50:28 -05007050static ssize_t
7051rb_simple_read(struct file *filp, char __user *ubuf,
7052 size_t cnt, loff_t *ppos)
7053{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007054 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007055 char buf[64];
7056 int r;
7057
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007058 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007059 r = sprintf(buf, "%d\n", r);
7060
7061 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7062}
7063
7064static ssize_t
7065rb_simple_write(struct file *filp, const char __user *ubuf,
7066 size_t cnt, loff_t *ppos)
7067{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007068 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007069 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007070 unsigned long val;
7071 int ret;
7072
7073 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7074 if (ret)
7075 return ret;
7076
7077 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007078 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)dc697312018-08-01 15:40:57 -04007079 if (!!val == tracer_tracing_is_on(tr)) {
7080 val = 0; /* do nothing */
7081 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007082 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007083 if (tr->current_trace->start)
7084 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007085 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007086 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007087 if (tr->current_trace->stop)
7088 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007089 }
7090 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007091 }
7092
7093 (*ppos)++;
7094
7095 return cnt;
7096}
7097
7098static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007099 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007100 .read = rb_simple_read,
7101 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007102 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007103 .llseek = default_llseek,
7104};
7105
Steven Rostedt277ba042012-08-03 16:10:49 -04007106struct dentry *trace_instance_dir;
7107
7108static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007109init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007110
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007111static int
7112allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007113{
7114 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007115
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007116 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007117
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007118 buf->tr = tr;
7119
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007120 buf->buffer = ring_buffer_alloc(size, rb_flags);
7121 if (!buf->buffer)
7122 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007123
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007124 buf->data = alloc_percpu(struct trace_array_cpu);
7125 if (!buf->data) {
7126 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)5dc4cd22017-12-26 20:07:34 -05007127 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007128 return -ENOMEM;
7129 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007130
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007131 /* Allocate the first page for all buffers */
7132 set_buffer_entries(&tr->trace_buffer,
7133 ring_buffer_size(tr->trace_buffer.buffer, 0));
7134
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007135 return 0;
7136}
7137
7138static int allocate_trace_buffers(struct trace_array *tr, int size)
7139{
7140 int ret;
7141
7142 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7143 if (ret)
7144 return ret;
7145
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007146#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007147 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7148 allocate_snapshot ? size : 1);
7149 if (WARN_ON(ret)) {
7150 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia81e155e2017-12-26 15:12:53 +08007151 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007152 free_percpu(tr->trace_buffer.data);
Jing Xia81e155e2017-12-26 15:12:53 +08007153 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007154 return -ENOMEM;
7155 }
7156 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007157
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007158 /*
7159 * Only the top level trace array gets its snapshot allocated
7160 * from the kernel command line.
7161 */
7162 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007163#endif
7164 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007165}
7166
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007167static void free_trace_buffer(struct trace_buffer *buf)
7168{
7169 if (buf->buffer) {
7170 ring_buffer_free(buf->buffer);
7171 buf->buffer = NULL;
7172 free_percpu(buf->data);
7173 buf->data = NULL;
7174 }
7175}
7176
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007177static void free_trace_buffers(struct trace_array *tr)
7178{
7179 if (!tr)
7180 return;
7181
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007182 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007183
7184#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007185 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007186#endif
7187}
7188
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007189static void init_trace_flags_index(struct trace_array *tr)
7190{
7191 int i;
7192
7193 /* Used by the trace options files */
7194 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7195 tr->trace_flags_index[i] = i;
7196}
7197
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007198static void __update_tracer_options(struct trace_array *tr)
7199{
7200 struct tracer *t;
7201
7202 for (t = trace_types; t; t = t->next)
7203 add_tracer_options(tr, t);
7204}
7205
7206static void update_tracer_options(struct trace_array *tr)
7207{
7208 mutex_lock(&trace_types_lock);
7209 __update_tracer_options(tr);
7210 mutex_unlock(&trace_types_lock);
7211}
7212
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007213static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007214{
Steven Rostedt277ba042012-08-03 16:10:49 -04007215 struct trace_array *tr;
7216 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007217
7218 mutex_lock(&trace_types_lock);
7219
7220 ret = -EEXIST;
7221 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7222 if (tr->name && strcmp(tr->name, name) == 0)
7223 goto out_unlock;
7224 }
7225
7226 ret = -ENOMEM;
7227 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7228 if (!tr)
7229 goto out_unlock;
7230
7231 tr->name = kstrdup(name, GFP_KERNEL);
7232 if (!tr->name)
7233 goto out_free_tr;
7234
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007235 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7236 goto out_free_tr;
7237
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007238 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007239
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007240 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7241
Steven Rostedt277ba042012-08-03 16:10:49 -04007242 raw_spin_lock_init(&tr->start_lock);
7243
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007244 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7245
Steven Rostedt277ba042012-08-03 16:10:49 -04007246 tr->current_trace = &nop_trace;
7247
7248 INIT_LIST_HEAD(&tr->systems);
7249 INIT_LIST_HEAD(&tr->events);
7250
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007251 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007252 goto out_free_tr;
7253
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007254 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007255 if (!tr->dir)
7256 goto out_free_tr;
7257
7258 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007259 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007260 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007261 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007262 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007263
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007264 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007265 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007266 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007267
7268 list_add(&tr->list, &ftrace_trace_arrays);
7269
7270 mutex_unlock(&trace_types_lock);
7271
7272 return 0;
7273
7274 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007275 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007276 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007277 kfree(tr->name);
7278 kfree(tr);
7279
7280 out_unlock:
7281 mutex_unlock(&trace_types_lock);
7282
7283 return ret;
7284
7285}
7286
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007287static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007288{
7289 struct trace_array *tr;
7290 int found = 0;
7291 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007292 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007293
7294 mutex_lock(&trace_types_lock);
7295
7296 ret = -ENODEV;
7297 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7298 if (tr->name && strcmp(tr->name, name) == 0) {
7299 found = 1;
7300 break;
7301 }
7302 }
7303 if (!found)
7304 goto out_unlock;
7305
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007306 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007307 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007308 goto out_unlock;
7309
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007310 list_del(&tr->list);
7311
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007312 /* Disable all the flags that were enabled coming in */
7313 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7314 if ((1 << i) & ZEROED_TRACE_FLAGS)
7315 set_tracer_flag(tr, 1 << i, 0);
7316 }
7317
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007318 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007319 event_trace_del_tracer(tr);
Namhyung Kim7da0f8e2017-04-17 11:44:27 +09007320 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007321 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007322 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007323 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007324
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007325 for (i = 0; i < tr->nr_topts; i++) {
7326 kfree(tr->topts[i].topts);
7327 }
7328 kfree(tr->topts);
7329
Chunyu Hu919e4812017-07-20 18:36:09 +08007330 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007331 kfree(tr->name);
7332 kfree(tr);
7333
7334 ret = 0;
7335
7336 out_unlock:
7337 mutex_unlock(&trace_types_lock);
7338
7339 return ret;
7340}
7341
Steven Rostedt277ba042012-08-03 16:10:49 -04007342static __init void create_trace_instances(struct dentry *d_tracer)
7343{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007344 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7345 instance_mkdir,
7346 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007347 if (WARN_ON(!trace_instance_dir))
7348 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007349}
7350
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007351static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007352init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007353{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007354 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007355
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007356 trace_create_file("available_tracers", 0444, d_tracer,
7357 tr, &show_traces_fops);
7358
7359 trace_create_file("current_tracer", 0644, d_tracer,
7360 tr, &set_tracer_fops);
7361
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007362 trace_create_file("tracing_cpumask", 0644, d_tracer,
7363 tr, &tracing_cpumask_fops);
7364
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007365 trace_create_file("trace_options", 0644, d_tracer,
7366 tr, &tracing_iter_fops);
7367
7368 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007369 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007370
7371 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007372 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007373
7374 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007375 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007376
7377 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7378 tr, &tracing_total_entries_fops);
7379
Wang YanQing238ae932013-05-26 16:52:01 +08007380 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007381 tr, &tracing_free_buffer_fops);
7382
7383 trace_create_file("trace_marker", 0220, d_tracer,
7384 tr, &tracing_mark_fops);
7385
Jamie Gennis6eaff2c2012-11-21 15:04:25 -08007386 trace_create_file("saved_tgids", 0444, d_tracer,
7387 tr, &tracing_saved_tgids_fops);
7388
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007389 trace_create_file("trace_clock", 0644, d_tracer, tr,
7390 &trace_clock_fops);
7391
7392 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007393 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007394
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007395 create_trace_options_dir(tr);
7396
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007397#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007398 trace_create_file("tracing_max_latency", 0644, d_tracer,
7399 &tr->max_latency, &tracing_max_lat_fops);
7400#endif
7401
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007402 if (ftrace_create_function_files(tr, d_tracer))
7403 WARN(1, "Could not allocate function filter files");
7404
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007405#ifdef CONFIG_TRACER_SNAPSHOT
7406 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007407 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007408#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007409
7410 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007411 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007412
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007413 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007414}
7415
Eric W. Biedermand3381fa2017-02-01 06:06:16 +13007416static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007417{
7418 struct vfsmount *mnt;
7419 struct file_system_type *type;
7420
7421 /*
7422 * To maintain backward compatibility for tools that mount
7423 * debugfs to get to the tracing facility, tracefs is automatically
7424 * mounted to the debugfs/tracing directory.
7425 */
7426 type = get_fs_type("tracefs");
7427 if (!type)
7428 return NULL;
Eric W. Biedermand3381fa2017-02-01 06:06:16 +13007429 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007430 put_filesystem(type);
7431 if (IS_ERR(mnt))
7432 return NULL;
7433 mntget(mnt);
7434
7435 return mnt;
7436}
7437
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007438/**
7439 * tracing_init_dentry - initialize top level trace array
7440 *
7441 * This is called when creating files or directories in the tracing
7442 * directory. It is called via fs_initcall() by any of the boot up code
7443 * and expects to return the dentry of the top level tracing directory.
7444 */
7445struct dentry *tracing_init_dentry(void)
7446{
7447 struct trace_array *tr = &global_trace;
7448
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007449 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007450 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007451 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007452
Jiaxing Wang8b129192015-11-06 16:04:16 +08007453 if (WARN_ON(!tracefs_initialized()) ||
7454 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7455 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007456 return ERR_PTR(-ENODEV);
7457
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007458 /*
7459 * As there may still be users that expect the tracing
7460 * files to exist in debugfs/tracing, we must automount
7461 * the tracefs file system there, so older tools still
7462 * work with the newer kerenl.
7463 */
7464 tr->dir = debugfs_create_automount("tracing", NULL,
7465 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007466 if (!tr->dir) {
7467 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7468 return ERR_PTR(-ENOMEM);
7469 }
7470
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007471 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007472}
7473
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007474extern struct trace_enum_map *__start_ftrace_enum_maps[];
7475extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7476
7477static void __init trace_enum_init(void)
7478{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007479 int len;
7480
7481 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007482 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007483}
7484
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007485#ifdef CONFIG_MODULES
7486static void trace_module_add_enums(struct module *mod)
7487{
7488 if (!mod->num_trace_enums)
7489 return;
7490
7491 /*
7492 * Modules with bad taint do not have events created, do
7493 * not bother with enums either.
7494 */
7495 if (trace_module_has_bad_taint(mod))
7496 return;
7497
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007498 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007499}
7500
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007501#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7502static void trace_module_remove_enums(struct module *mod)
7503{
7504 union trace_enum_map_item *map;
7505 union trace_enum_map_item **last = &trace_enum_maps;
7506
7507 if (!mod->num_trace_enums)
7508 return;
7509
7510 mutex_lock(&trace_enum_mutex);
7511
7512 map = trace_enum_maps;
7513
7514 while (map) {
7515 if (map->head.mod == mod)
7516 break;
7517 map = trace_enum_jmp_to_tail(map);
7518 last = &map->tail.next;
7519 map = map->tail.next;
7520 }
7521 if (!map)
7522 goto out;
7523
7524 *last = trace_enum_jmp_to_tail(map)->tail.next;
7525 kfree(map);
7526 out:
7527 mutex_unlock(&trace_enum_mutex);
7528}
7529#else
7530static inline void trace_module_remove_enums(struct module *mod) { }
7531#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7532
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007533static int trace_module_notify(struct notifier_block *self,
7534 unsigned long val, void *data)
7535{
7536 struct module *mod = data;
7537
7538 switch (val) {
7539 case MODULE_STATE_COMING:
7540 trace_module_add_enums(mod);
7541 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007542 case MODULE_STATE_GOING:
7543 trace_module_remove_enums(mod);
7544 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007545 }
7546
7547 return 0;
7548}
7549
7550static struct notifier_block trace_module_nb = {
7551 .notifier_call = trace_module_notify,
7552 .priority = 0,
7553};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007554#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007555
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007556static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007557{
7558 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007559
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007560 trace_access_lock_init();
7561
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007562 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007563 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007564 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007565
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007566 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007567 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007568
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007569 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007570 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007571
Li Zefan339ae5d2009-04-17 10:34:30 +08007572 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007573 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007574
Avadh Patel69abe6a2009-04-10 16:04:48 -04007575 trace_create_file("saved_cmdlines", 0444, d_tracer,
7576 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007577
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007578 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7579 NULL, &tracing_saved_cmdlines_size_fops);
7580
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007581 trace_enum_init();
7582
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007583 trace_create_enum_file(d_tracer);
7584
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007585#ifdef CONFIG_MODULES
7586 register_module_notifier(&trace_module_nb);
7587#endif
7588
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007589#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007590 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7591 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007592#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007593
Steven Rostedt277ba042012-08-03 16:10:49 -04007594 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007595
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007596 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007597
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007598 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007599}
7600
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007601static int trace_panic_handler(struct notifier_block *this,
7602 unsigned long event, void *unused)
7603{
Steven Rostedt944ac422008-10-23 19:26:08 -04007604 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007605 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007606 return NOTIFY_OK;
7607}
7608
7609static struct notifier_block trace_panic_notifier = {
7610 .notifier_call = trace_panic_handler,
7611 .next = NULL,
7612 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7613};
7614
7615static int trace_die_handler(struct notifier_block *self,
7616 unsigned long val,
7617 void *data)
7618{
7619 switch (val) {
7620 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007621 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007622 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007623 break;
7624 default:
7625 break;
7626 }
7627 return NOTIFY_OK;
7628}
7629
7630static struct notifier_block trace_die_notifier = {
7631 .notifier_call = trace_die_handler,
7632 .priority = 200
7633};
7634
7635/*
7636 * printk is set to max of 1024, we really don't need it that big.
7637 * Nothing should be printing 1000 characters anyway.
7638 */
7639#define TRACE_MAX_PRINT 1000
7640
7641/*
7642 * Define here KERN_TRACE so that we have one place to modify
7643 * it if we decide to change what log level the ftrace dump
7644 * should be at.
7645 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007646#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007647
Jason Wessel955b61e2010-08-05 09:22:23 -05007648void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007649trace_printk_seq(struct trace_seq *s)
7650{
7651 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007652 if (s->seq.len >= TRACE_MAX_PRINT)
7653 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007654
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007655 /*
7656 * More paranoid code. Although the buffer size is set to
7657 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7658 * an extra layer of protection.
7659 */
7660 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7661 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007662
7663 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007664 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007665
7666 printk(KERN_TRACE "%s", s->buffer);
7667
Steven Rostedtf9520752009-03-02 14:04:40 -05007668 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007669}
7670
Jason Wessel955b61e2010-08-05 09:22:23 -05007671void trace_init_global_iter(struct trace_iterator *iter)
7672{
7673 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007674 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007675 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007676 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007677
7678 if (iter->trace && iter->trace->open)
7679 iter->trace->open(iter);
7680
7681 /* Annotate start of buffers if we had overruns */
7682 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7683 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7684
7685 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7686 if (trace_clocks[iter->tr->clock_id].in_ns)
7687 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007688}
7689
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007690void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007691{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007692 /* use static because iter can be a bit big for the stack */
7693 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007694 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007695 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007696 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007697 unsigned long flags;
7698 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007699
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007700 /* Only allow one dump user at a time. */
7701 if (atomic_inc_return(&dump_running) != 1) {
7702 atomic_dec(&dump_running);
7703 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007704 }
7705
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007706 /*
7707 * Always turn off tracing when we dump.
7708 * We don't need to show trace output of what happens
7709 * between multiple crashes.
7710 *
7711 * If the user does a sysrq-z, then they can re-enable
7712 * tracing with echo 1 > tracing_on.
7713 */
7714 tracing_off();
7715
7716 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007717
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007718 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007719 trace_init_global_iter(&iter);
7720
Steven Rostedtd7690412008-10-01 00:29:53 -04007721 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307722 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007723 }
7724
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007725 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007726
Török Edwinb54d3de2008-11-22 13:28:48 +02007727 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007728 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007729
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007730 switch (oops_dump_mode) {
7731 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007732 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007733 break;
7734 case DUMP_ORIG:
7735 iter.cpu_file = raw_smp_processor_id();
7736 break;
7737 case DUMP_NONE:
7738 goto out_enable;
7739 default:
7740 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007741 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007742 }
7743
7744 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007745
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007746 /* Did function tracer already get disabled? */
7747 if (ftrace_is_dead()) {
7748 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7749 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7750 }
7751
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007752 /*
7753 * We need to stop all tracing on all CPUS to read the
7754 * the next buffer. This is a bit expensive, but is
7755 * not done often. We fill all what we can read,
7756 * and then release the locks again.
7757 */
7758
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007759 while (!trace_empty(&iter)) {
7760
7761 if (!cnt)
7762 printk(KERN_TRACE "---------------------------------\n");
7763
7764 cnt++;
7765
7766 /* reset all but tr, trace, and overruns */
7767 memset(&iter.seq, 0,
7768 sizeof(struct trace_iterator) -
7769 offsetof(struct trace_iterator, seq));
7770 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7771 iter.pos = -1;
7772
Jason Wessel955b61e2010-08-05 09:22:23 -05007773 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007774 int ret;
7775
7776 ret = print_trace_line(&iter);
7777 if (ret != TRACE_TYPE_NO_CONSUME)
7778 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007779 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007780 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007781
7782 trace_printk_seq(&iter.seq);
7783 }
7784
7785 if (!cnt)
7786 printk(KERN_TRACE " (ftrace buffer empty)\n");
7787 else
7788 printk(KERN_TRACE "---------------------------------\n");
7789
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007790 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007791 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007792
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007793 for_each_tracing_cpu(cpu) {
7794 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007795 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007796 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007797 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007798}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007799EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007800
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007801__init static int tracer_alloc_buffers(void)
7802{
Steven Rostedt73c51622009-03-11 13:42:01 -04007803 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307804 int ret = -ENOMEM;
7805
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007806 /*
7807 * Make sure we don't accidently add more trace options
7808 * than we have bits for.
7809 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007810 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007811
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307812 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7813 goto out;
7814
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007815 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307816 goto out_free_buffer_mask;
7817
Steven Rostedt07d777f2011-09-22 14:01:55 -04007818 /* Only allocate trace_printk buffers if a trace_printk exists */
7819 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007820 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007821 trace_printk_init_buffers();
7822
Steven Rostedt73c51622009-03-11 13:42:01 -04007823 /* To save memory, keep the ring buffer size to its minimum */
7824 if (ring_buffer_expanded)
7825 ring_buf_size = trace_buf_size;
7826 else
7827 ring_buf_size = 1;
7828
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307829 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007830 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007831
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007832 raw_spin_lock_init(&global_trace.start_lock);
7833
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007834 /* Used for event triggers */
7835 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7836 if (!temp_buffer)
7837 goto out_free_cpumask;
7838
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007839 if (trace_create_savedcmd() < 0)
7840 goto out_free_temp_buffer;
7841
Steven Rostedtab464282008-05-12 21:21:00 +02007842 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007843 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007844 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7845 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007846 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007847 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007848
Steven Rostedt499e5472012-02-22 15:50:28 -05007849 if (global_trace.buffer_disabled)
7850 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007851
Steven Rostedte1e232c2014-02-10 23:38:46 -05007852 if (trace_boot_clock) {
7853 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7854 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007855 pr_warn("Trace clock %s not defined, going back to default\n",
7856 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05007857 }
7858
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007859 /*
7860 * register_tracer() might reference current_trace, so it
7861 * needs to be set before we register anything. This is
7862 * just a bootstrap of current_trace anyway.
7863 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007864 global_trace.current_trace = &nop_trace;
7865
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007866 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7867
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007868 ftrace_init_global_array_ops(&global_trace);
7869
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007870 init_trace_flags_index(&global_trace);
7871
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007872 register_tracer(&nop_trace);
7873
Steven Rostedt60a11772008-05-12 21:20:44 +02007874 /* All seems OK, enable tracing */
7875 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007876
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007877 atomic_notifier_chain_register(&panic_notifier_list,
7878 &trace_panic_notifier);
7879
7880 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007881
Steven Rostedtae63b312012-05-03 23:09:03 -04007882 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7883
7884 INIT_LIST_HEAD(&global_trace.systems);
7885 INIT_LIST_HEAD(&global_trace.events);
7886 list_add(&global_trace.list, &ftrace_trace_arrays);
7887
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007888 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007889
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007890 register_snapshot_cmd();
7891
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007892 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007893
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007894out_free_savedcmd:
7895 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007896out_free_temp_buffer:
7897 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307898out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007899 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307900out_free_buffer_mask:
7901 free_cpumask_var(tracing_buffer_mask);
7902out:
7903 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007904}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007905
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007906void __init trace_init(void)
7907{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007908 if (tracepoint_printk) {
7909 tracepoint_print_iter =
7910 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7911 if (WARN_ON(!tracepoint_print_iter))
7912 tracepoint_printk = 0;
7913 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007914 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007915 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007916}
7917
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007918__init static int clear_boot_tracer(void)
7919{
7920 /*
7921 * The default tracer at boot buffer is an init section.
7922 * This function is called in lateinit. If we did not
7923 * find the boot tracer, then clear it out, to prevent
7924 * later registration from accessing the buffer that is
7925 * about to be freed.
7926 */
7927 if (!default_bootup_tracer)
7928 return 0;
7929
7930 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7931 default_bootup_tracer);
7932 default_bootup_tracer = NULL;
7933
7934 return 0;
7935}
7936
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007937fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)3170d9a2017-08-01 12:01:52 -04007938late_initcall_sync(clear_boot_tracer);