blob: 6a170a78b4535478b0b816df772f95862ae6edf8 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050077static int
78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010079{
80 return 0;
81}
Steven Rostedt0f048702008-11-05 16:05:44 -050082
83/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040084 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
86 * occurred.
87 */
88static DEFINE_PER_CPU(bool, trace_cmdline_save);
89
90/*
Steven Rostedt0f048702008-11-05 16:05:44 -050091 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
94 * this back to zero.
95 */
Hannes Eder4fd27352009-02-10 19:44:12 +010096static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050097
Jason Wessel955b61e2010-08-05 09:22:23 -050098cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +020099
Steven Rostedt944ac422008-10-23 19:26:08 -0400100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400114 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115
116enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400117
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
122/* Map of enums to their values, for "enum_map" file */
123struct trace_enum_map_head {
124 struct module *mod;
125 unsigned long length;
126};
127
128union trace_enum_map_item;
129
130struct trace_enum_map_tail {
131 /*
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
134 */
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
137};
138
139static DEFINE_MUTEX(trace_enum_mutex);
140
141/*
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
147 */
148union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
152};
153
154static union trace_enum_map_item *trace_enum_maps;
155#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
156
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500158
Li Zefanee6c2c12009-09-18 14:06:47 +0800159#define MAX_TRACER_SIZE 100
160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500161static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100162
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500163static bool allocate_snapshot;
164
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200165static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100166{
Chen Gang67012ab2013-04-08 12:06:44 +0800167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400169 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100171 return 1;
172}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200173__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100174
Steven Rostedt944ac422008-10-23 19:26:08 -0400175static int __init set_ftrace_dump_on_oops(char *str)
176{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
179 return 1;
180 }
181
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
184 return 1;
185 }
186
187 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400188}
189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200190
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400191static int __init stop_trace_on_warning(char *str)
192{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195 return 1;
196}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200197__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400198
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400199static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500200{
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
204 return 1;
205}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400206__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500207
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400208
209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static int __init set_trace_boot_options(char *str)
212{
Chen Gang67012ab2013-04-08 12:06:44 +0800213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400214 return 0;
215}
216__setup("trace_options=", set_trace_boot_options);
217
Steven Rostedte1e232c2014-02-10 23:38:46 -0500218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219static char *trace_boot_clock __initdata;
220
221static int __init set_trace_boot_clock(char *str)
222{
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
225 return 0;
226}
227__setup("trace_clock=", set_trace_boot_clock);
228
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500229static int __init set_tracepoint_printk(char *str)
230{
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
233 return 1;
234}
235__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400236
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800237unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200238{
239 nsec += 500;
240 do_div(nsec, 1000);
241 return nsec;
242}
243
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400244/* trace_flags holds trace_options default values */
245#define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
251
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400252/* trace_options that are only supported by global_trace */
253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
255
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400256/* trace_flags that are default zero for instances */
257#define ZEROED_TRACE_FLAGS \
258 TRACE_ITER_EVENT_FORK
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400259
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200260/*
261 * The global_trace is the descriptor that holds the tracing
262 * buffers for the live tracing. For each CPU, it contains
263 * a link list of pages that will store trace entries. The
264 * page descriptor of the pages in the memory is used to hold
265 * the link list by linking the lru item in the page descriptor
266 * to each of the pages in the buffer per CPU.
267 *
268 * For each active CPU there is a data field that holds the
269 * pages for the buffer for that CPU. Each CPU has the same number
270 * of pages allocated for its buffer.
271 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400272static struct trace_array global_trace = {
273 .trace_flags = TRACE_DEFAULT_FLAGS,
274};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200275
Steven Rostedtae63b312012-05-03 23:09:03 -0400276LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400278int trace_array_get(struct trace_array *this_tr)
279{
280 struct trace_array *tr;
281 int ret = -ENODEV;
282
283 mutex_lock(&trace_types_lock);
284 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
285 if (tr == this_tr) {
286 tr->ref++;
287 ret = 0;
288 break;
289 }
290 }
291 mutex_unlock(&trace_types_lock);
292
293 return ret;
294}
295
296static void __trace_array_put(struct trace_array *this_tr)
297{
298 WARN_ON(!this_tr->ref);
299 this_tr->ref--;
300}
301
302void trace_array_put(struct trace_array *this_tr)
303{
304 mutex_lock(&trace_types_lock);
305 __trace_array_put(this_tr);
306 mutex_unlock(&trace_types_lock);
307}
308
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400309int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 struct ring_buffer *buffer,
311 struct ring_buffer_event *event)
312{
313 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
314 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400315 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500316 return 1;
317 }
318
319 return 0;
320}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500321
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400322void trace_free_pid_list(struct trace_pid_list *pid_list)
323{
324 vfree(pid_list->pids);
325 kfree(pid_list);
326}
327
Steven Rostedtd8275c42016-04-14 12:15:22 -0400328/**
329 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
330 * @filtered_pids: The list of pids to check
331 * @search_pid: The PID to find in @filtered_pids
332 *
333 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
334 */
335bool
336trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
337{
338 /*
339 * If pid_max changed after filtered_pids was created, we
340 * by default ignore all pids greater than the previous pid_max.
341 */
342 if (search_pid >= filtered_pids->pid_max)
343 return false;
344
345 return test_bit(search_pid, filtered_pids->pids);
346}
347
348/**
349 * trace_ignore_this_task - should a task be ignored for tracing
350 * @filtered_pids: The list of pids to check
351 * @task: The task that should be ignored if not filtered
352 *
353 * Checks if @task should be traced or not from @filtered_pids.
354 * Returns true if @task should *NOT* be traced.
355 * Returns false if @task should be traced.
356 */
357bool
358trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
359{
360 /*
361 * Return false, because if filtered_pids does not exist,
362 * all pids are good to trace.
363 */
364 if (!filtered_pids)
365 return false;
366
367 return !trace_find_filtered_pid(filtered_pids, task->pid);
368}
369
370/**
371 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
372 * @pid_list: The list to modify
373 * @self: The current task for fork or NULL for exit
374 * @task: The task to add or remove
375 *
376 * If adding a task, if @self is defined, the task is only added if @self
377 * is also included in @pid_list. This happens on fork and tasks should
378 * only be added when the parent is listed. If @self is NULL, then the
379 * @task pid will be removed from the list, which would happen on exit
380 * of a task.
381 */
382void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
383 struct task_struct *self,
384 struct task_struct *task)
385{
386 if (!pid_list)
387 return;
388
389 /* For forks, we only add if the forking task is listed */
390 if (self) {
391 if (!trace_find_filtered_pid(pid_list, self->pid))
392 return;
393 }
394
395 /* Sorry, but we don't support pid_max changing after setting */
396 if (task->pid >= pid_list->pid_max)
397 return;
398
399 /* "self" is set for forks, and NULL for exits */
400 if (self)
401 set_bit(task->pid, pid_list->pids);
402 else
403 clear_bit(task->pid, pid_list->pids);
404}
405
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400406/**
407 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
408 * @pid_list: The pid list to show
409 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
410 * @pos: The position of the file
411 *
412 * This is used by the seq_file "next" operation to iterate the pids
413 * listed in a trace_pid_list structure.
414 *
415 * Returns the pid+1 as we want to display pid of zero, but NULL would
416 * stop the iteration.
417 */
418void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
419{
420 unsigned long pid = (unsigned long)v;
421
422 (*pos)++;
423
424 /* pid already is +1 of the actual prevous bit */
425 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
426
427 /* Return pid + 1 to allow zero to be represented */
428 if (pid < pid_list->pid_max)
429 return (void *)(pid + 1);
430
431 return NULL;
432}
433
434/**
435 * trace_pid_start - Used for seq_file to start reading pid lists
436 * @pid_list: The pid list to show
437 * @pos: The position of the file
438 *
439 * This is used by seq_file "start" operation to start the iteration
440 * of listing pids.
441 *
442 * Returns the pid+1 as we want to display pid of zero, but NULL would
443 * stop the iteration.
444 */
445void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
446{
447 unsigned long pid;
448 loff_t l = 0;
449
450 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
451 if (pid >= pid_list->pid_max)
452 return NULL;
453
454 /* Return pid + 1 so that zero can be the exit value */
455 for (pid++; pid && l < *pos;
456 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
457 ;
458 return (void *)pid;
459}
460
461/**
462 * trace_pid_show - show the current pid in seq_file processing
463 * @m: The seq_file structure to write into
464 * @v: A void pointer of the pid (+1) value to display
465 *
466 * Can be directly used by seq_file operations to display the current
467 * pid value.
468 */
469int trace_pid_show(struct seq_file *m, void *v)
470{
471 unsigned long pid = (unsigned long)v - 1;
472
473 seq_printf(m, "%lu\n", pid);
474 return 0;
475}
476
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400477/* 128 should be much more than enough */
478#define PID_BUF_SIZE 127
479
480int trace_pid_write(struct trace_pid_list *filtered_pids,
481 struct trace_pid_list **new_pid_list,
482 const char __user *ubuf, size_t cnt)
483{
484 struct trace_pid_list *pid_list;
485 struct trace_parser parser;
486 unsigned long val;
487 int nr_pids = 0;
488 ssize_t read = 0;
489 ssize_t ret = 0;
490 loff_t pos;
491 pid_t pid;
492
493 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
494 return -ENOMEM;
495
496 /*
497 * Always recreate a new array. The write is an all or nothing
498 * operation. Always create a new array when adding new pids by
499 * the user. If the operation fails, then the current list is
500 * not modified.
501 */
502 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang3ddc2992019-04-19 21:22:59 -0500503 if (!pid_list) {
504 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400505 return -ENOMEM;
Wenwen Wang3ddc2992019-04-19 21:22:59 -0500506 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400507
508 pid_list->pid_max = READ_ONCE(pid_max);
509
510 /* Only truncating will shrink pid_max */
511 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
512 pid_list->pid_max = filtered_pids->pid_max;
513
514 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
515 if (!pid_list->pids) {
Wenwen Wang3ddc2992019-04-19 21:22:59 -0500516 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400517 kfree(pid_list);
518 return -ENOMEM;
519 }
520
521 if (filtered_pids) {
522 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000523 for_each_set_bit(pid, filtered_pids->pids,
524 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400525 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400526 nr_pids++;
527 }
528 }
529
530 while (cnt > 0) {
531
532 pos = 0;
533
534 ret = trace_get_user(&parser, ubuf, cnt, &pos);
535 if (ret < 0 || !trace_parser_loaded(&parser))
536 break;
537
538 read += ret;
539 ubuf += ret;
540 cnt -= ret;
541
542 parser.buffer[parser.idx] = 0;
543
544 ret = -EINVAL;
545 if (kstrtoul(parser.buffer, 0, &val))
546 break;
547 if (val >= pid_list->pid_max)
548 break;
549
550 pid = (pid_t)val;
551
552 set_bit(pid, pid_list->pids);
553 nr_pids++;
554
555 trace_parser_clear(&parser);
556 ret = 0;
557 }
558 trace_parser_put(&parser);
559
560 if (ret < 0) {
561 trace_free_pid_list(pid_list);
562 return ret;
563 }
564
565 if (!nr_pids) {
566 /* Cleared the list of pids */
567 trace_free_pid_list(pid_list);
568 read = ret;
569 pid_list = NULL;
570 }
571
572 *new_pid_list = pid_list;
573
574 return read;
575}
576
Fabian Frederickad1438a2014-04-17 21:44:42 +0200577static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400578{
579 u64 ts;
580
581 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700582 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400583 return trace_clock_local();
584
Alexander Z Lam94571582013-08-02 18:36:16 -0700585 ts = ring_buffer_time_stamp(buf->buffer, cpu);
586 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400587
588 return ts;
589}
590
Alexander Z Lam94571582013-08-02 18:36:16 -0700591cycle_t ftrace_now(int cpu)
592{
593 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
594}
595
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400596/**
597 * tracing_is_enabled - Show if global_trace has been disabled
598 *
599 * Shows if the global trace has been enabled or not. It uses the
600 * mirror flag "buffer_disabled" to be used in fast paths such as for
601 * the irqsoff tracer. But it may be inaccurate due to races. If you
602 * need to know the accurate state, use tracing_is_on() which is a little
603 * slower, but accurate.
604 */
Steven Rostedt90369902008-11-05 16:05:44 -0500605int tracing_is_enabled(void)
606{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400607 /*
608 * For quick access (irqsoff uses this in fast path), just
609 * return the mirror variable of the state of the ring buffer.
610 * It's a little racy, but we don't really care.
611 */
612 smp_rmb();
613 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500614}
615
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617 * trace_buf_size is the size in bytes that is allocated
618 * for a buffer. Note, the number of bytes is always rounded
619 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400620 *
621 * This number is purposely set to a low number of 16384.
622 * If the dump on oops happens, it will be much appreciated
623 * to not have to wait for all that output. Anyway this can be
624 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400626#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400627
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400628static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200629
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200630/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200631static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200632
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200633/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200634 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200635 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700636DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200637
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800638/*
639 * serialize the access of the ring buffer
640 *
641 * ring buffer serializes readers, but it is low level protection.
642 * The validity of the events (which returns by ring_buffer_peek() ..etc)
643 * are not protected by ring buffer.
644 *
645 * The content of events may become garbage if we allow other process consumes
646 * these events concurrently:
647 * A) the page of the consumed events may become a normal page
648 * (not reader page) in ring buffer, and this page will be rewrited
649 * by events producer.
650 * B) The page of the consumed events may become a page for splice_read,
651 * and this page will be returned to system.
652 *
653 * These primitives allow multi process access to different cpu ring buffer
654 * concurrently.
655 *
656 * These primitives don't distinguish read-only and read-consume access.
657 * Multi read-only access are also serialized.
658 */
659
660#ifdef CONFIG_SMP
661static DECLARE_RWSEM(all_cpu_access_lock);
662static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
663
664static inline void trace_access_lock(int cpu)
665{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500666 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800667 /* gain it for accessing the whole ring buffer. */
668 down_write(&all_cpu_access_lock);
669 } else {
670 /* gain it for accessing a cpu ring buffer. */
671
Steven Rostedtae3b5092013-01-23 15:22:59 -0500672 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800673 down_read(&all_cpu_access_lock);
674
675 /* Secondly block other access to this @cpu ring buffer. */
676 mutex_lock(&per_cpu(cpu_access_lock, cpu));
677 }
678}
679
680static inline void trace_access_unlock(int cpu)
681{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500682 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800683 up_write(&all_cpu_access_lock);
684 } else {
685 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
686 up_read(&all_cpu_access_lock);
687 }
688}
689
690static inline void trace_access_lock_init(void)
691{
692 int cpu;
693
694 for_each_possible_cpu(cpu)
695 mutex_init(&per_cpu(cpu_access_lock, cpu));
696}
697
698#else
699
700static DEFINE_MUTEX(access_lock);
701
702static inline void trace_access_lock(int cpu)
703{
704 (void)cpu;
705 mutex_lock(&access_lock);
706}
707
708static inline void trace_access_unlock(int cpu)
709{
710 (void)cpu;
711 mutex_unlock(&access_lock);
712}
713
714static inline void trace_access_lock_init(void)
715{
716}
717
718#endif
719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#ifdef CONFIG_STACKTRACE
721static void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400724static inline void ftrace_trace_stack(struct trace_array *tr,
725 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400726 unsigned long flags,
727 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400728
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400729#else
730static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
731 unsigned long flags,
732 int skip, int pc, struct pt_regs *regs)
733{
734}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400735static inline void ftrace_trace_stack(struct trace_array *tr,
736 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400737 unsigned long flags,
738 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400739{
740}
741
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400742#endif
743
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400744static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400745{
746 if (tr->trace_buffer.buffer)
747 ring_buffer_record_on(tr->trace_buffer.buffer);
748 /*
749 * This flag is looked at when buffers haven't been allocated
750 * yet, or by some tracers (like irqsoff), that just want to
751 * know if the ring buffer has been disabled, but it can handle
752 * races of where it gets disabled but we still do a record.
753 * As the check is in the fast path of the tracers, it is more
754 * important to be fast than accurate.
755 */
756 tr->buffer_disabled = 0;
757 /* Make the flag seen by readers */
758 smp_wmb();
759}
760
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200761/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500762 * tracing_on - enable tracing buffers
763 *
764 * This function enables tracing buffers that may have been
765 * disabled with tracing_off.
766 */
767void tracing_on(void)
768{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400769 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500770}
771EXPORT_SYMBOL_GPL(tracing_on);
772
773/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500774 * __trace_puts - write a constant string into the trace buffer.
775 * @ip: The address of the caller
776 * @str: The constant string to write
777 * @size: The size of the string.
778 */
779int __trace_puts(unsigned long ip, const char *str, int size)
780{
781 struct ring_buffer_event *event;
782 struct ring_buffer *buffer;
783 struct print_entry *entry;
784 unsigned long irq_flags;
785 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800786 int pc;
787
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400788 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800789 return 0;
790
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800791 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500792
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500793 if (unlikely(tracing_selftest_running || tracing_disabled))
794 return 0;
795
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500796 alloc = sizeof(*entry) + size + 2; /* possible \n added */
797
798 local_save_flags(irq_flags);
799 buffer = global_trace.trace_buffer.buffer;
800 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800801 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500802 if (!event)
803 return 0;
804
805 entry = ring_buffer_event_data(event);
806 entry->ip = ip;
807
808 memcpy(&entry->buf, str, size);
809
810 /* Add a newline if necessary */
811 if (entry->buf[size - 1] != '\n') {
812 entry->buf[size] = '\n';
813 entry->buf[size + 1] = '\0';
814 } else
815 entry->buf[size] = '\0';
816
817 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400818 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500819
820 return size;
821}
822EXPORT_SYMBOL_GPL(__trace_puts);
823
824/**
825 * __trace_bputs - write the pointer to a constant string into trace buffer
826 * @ip: The address of the caller
827 * @str: The constant string to write to the buffer to
828 */
829int __trace_bputs(unsigned long ip, const char *str)
830{
831 struct ring_buffer_event *event;
832 struct ring_buffer *buffer;
833 struct bputs_entry *entry;
834 unsigned long irq_flags;
835 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800836 int pc;
837
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400838 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800839 return 0;
840
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800841 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500842
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500843 if (unlikely(tracing_selftest_running || tracing_disabled))
844 return 0;
845
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500846 local_save_flags(irq_flags);
847 buffer = global_trace.trace_buffer.buffer;
848 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800849 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500850 if (!event)
851 return 0;
852
853 entry = ring_buffer_event_data(event);
854 entry->ip = ip;
855 entry->str = str;
856
857 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400858 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500859
860 return 1;
861}
862EXPORT_SYMBOL_GPL(__trace_bputs);
863
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500864#ifdef CONFIG_TRACER_SNAPSHOT
865/**
866 * trace_snapshot - take a snapshot of the current buffer.
867 *
868 * This causes a swap between the snapshot buffer and the current live
869 * tracing buffer. You can use this to take snapshots of the live
870 * trace when some condition is triggered, but continue to trace.
871 *
872 * Note, make sure to allocate the snapshot with either
873 * a tracing_snapshot_alloc(), or by doing it manually
874 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
875 *
876 * If the snapshot buffer is not allocated, it will stop tracing.
877 * Basically making a permanent snapshot.
878 */
879void tracing_snapshot(void)
880{
881 struct trace_array *tr = &global_trace;
882 struct tracer *tracer = tr->current_trace;
883 unsigned long flags;
884
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500885 if (in_nmi()) {
886 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
887 internal_trace_puts("*** snapshot is being ignored ***\n");
888 return;
889 }
890
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500891 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500892 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
893 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500894 tracing_off();
895 return;
896 }
897
898 /* Note, snapshot can not be used when the tracer uses it */
899 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500900 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
901 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500902 return;
903 }
904
905 local_irq_save(flags);
906 update_max_tr(tr, current, smp_processor_id());
907 local_irq_restore(flags);
908}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500909EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500910
911static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
912 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400913static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
914
915static int alloc_snapshot(struct trace_array *tr)
916{
917 int ret;
918
919 if (!tr->allocated_snapshot) {
920
921 /* allocate spare buffer */
922 ret = resize_buffer_duplicate_size(&tr->max_buffer,
923 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
924 if (ret < 0)
925 return ret;
926
927 tr->allocated_snapshot = true;
928 }
929
930 return 0;
931}
932
Fabian Frederickad1438a2014-04-17 21:44:42 +0200933static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400934{
935 /*
936 * We don't free the ring buffer. instead, resize it because
937 * The max_tr ring buffer has some state (e.g. ring->clock) and
938 * we want preserve it.
939 */
940 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
941 set_buffer_entries(&tr->max_buffer, 1);
942 tracing_reset_online_cpus(&tr->max_buffer);
943 tr->allocated_snapshot = false;
944}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500945
946/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500947 * tracing_alloc_snapshot - allocate snapshot buffer.
948 *
949 * This only allocates the snapshot buffer if it isn't already
950 * allocated - it doesn't also take a snapshot.
951 *
952 * This is meant to be used in cases where the snapshot buffer needs
953 * to be set up for events that can't sleep but need to be able to
954 * trigger a snapshot.
955 */
956int tracing_alloc_snapshot(void)
957{
958 struct trace_array *tr = &global_trace;
959 int ret;
960
961 ret = alloc_snapshot(tr);
962 WARN_ON(ret < 0);
963
964 return ret;
965}
966EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
967
968/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500969 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
970 *
971 * This is similar to trace_snapshot(), but it will allocate the
972 * snapshot buffer if it isn't already allocated. Use this only
973 * where it is safe to sleep, as the allocation may sleep.
974 *
975 * This causes a swap between the snapshot buffer and the current live
976 * tracing buffer. You can use this to take snapshots of the live
977 * trace when some condition is triggered, but continue to trace.
978 */
979void tracing_snapshot_alloc(void)
980{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500981 int ret;
982
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500983 ret = tracing_alloc_snapshot();
984 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400985 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500986
987 tracing_snapshot();
988}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500989EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500990#else
991void tracing_snapshot(void)
992{
993 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
994}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500995EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500996int tracing_alloc_snapshot(void)
997{
998 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
999 return -ENODEV;
1000}
1001EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001002void tracing_snapshot_alloc(void)
1003{
1004 /* Give warning */
1005 tracing_snapshot();
1006}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001007EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001008#endif /* CONFIG_TRACER_SNAPSHOT */
1009
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -04001010static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001011{
1012 if (tr->trace_buffer.buffer)
1013 ring_buffer_record_off(tr->trace_buffer.buffer);
1014 /*
1015 * This flag is looked at when buffers haven't been allocated
1016 * yet, or by some tracers (like irqsoff), that just want to
1017 * know if the ring buffer has been disabled, but it can handle
1018 * races of where it gets disabled but we still do a record.
1019 * As the check is in the fast path of the tracers, it is more
1020 * important to be fast than accurate.
1021 */
1022 tr->buffer_disabled = 1;
1023 /* Make the flag seen by readers */
1024 smp_wmb();
1025}
1026
Steven Rostedt499e5472012-02-22 15:50:28 -05001027/**
1028 * tracing_off - turn off tracing buffers
1029 *
1030 * This function stops the tracing buffers from recording data.
1031 * It does not disable any overhead the tracers themselves may
1032 * be causing. This function simply causes all recording to
1033 * the ring buffers to fail.
1034 */
1035void tracing_off(void)
1036{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001037 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001038}
1039EXPORT_SYMBOL_GPL(tracing_off);
1040
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001041void disable_trace_on_warning(void)
1042{
1043 if (__disable_trace_on_warning)
1044 tracing_off();
1045}
1046
Steven Rostedt499e5472012-02-22 15:50:28 -05001047/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001048 * tracer_tracing_is_on - show real state of ring buffer enabled
1049 * @tr : the trace array to know if ring buffer is enabled
1050 *
1051 * Shows real state of the ring buffer if it is enabled or not.
1052 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001053int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001054{
1055 if (tr->trace_buffer.buffer)
1056 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1057 return !tr->buffer_disabled;
1058}
1059
Steven Rostedt499e5472012-02-22 15:50:28 -05001060/**
1061 * tracing_is_on - show state of ring buffers enabled
1062 */
1063int tracing_is_on(void)
1064{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001065 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001066}
1067EXPORT_SYMBOL_GPL(tracing_is_on);
1068
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001070{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001071 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001072
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073 if (!str)
1074 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001075 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001076 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001077 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001078 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001079 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001080 return 1;
1081}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001082__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001083
Tim Bird0e950172010-02-25 15:36:43 -08001084static int __init set_tracing_thresh(char *str)
1085{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001086 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001087 int ret;
1088
1089 if (!str)
1090 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001091 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001092 if (ret < 0)
1093 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001094 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001095 return 1;
1096}
1097__setup("tracing_thresh=", set_tracing_thresh);
1098
Steven Rostedt57f50be2008-05-12 21:20:44 +02001099unsigned long nsecs_to_usecs(unsigned long nsecs)
1100{
1101 return nsecs / 1000;
1102}
1103
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001104/*
1105 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1106 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1107 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1108 * of strings in the order that the enums were defined.
1109 */
1110#undef C
1111#define C(a, b) b
1112
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001114static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001115 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001116 NULL
1117};
1118
Zhaolei5079f322009-08-25 16:12:56 +08001119static struct {
1120 u64 (*func)(void);
1121 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001122 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001123} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001124 { trace_clock_local, "local", 1 },
1125 { trace_clock_global, "global", 1 },
1126 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001127 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001128 { trace_clock, "perf", 1 },
1129 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001130 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001131 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001132};
1133
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001134/*
1135 * trace_parser_get_init - gets the buffer for trace parser
1136 */
1137int trace_parser_get_init(struct trace_parser *parser, int size)
1138{
1139 memset(parser, 0, sizeof(*parser));
1140
1141 parser->buffer = kmalloc(size, GFP_KERNEL);
1142 if (!parser->buffer)
1143 return 1;
1144
1145 parser->size = size;
1146 return 0;
1147}
1148
1149/*
1150 * trace_parser_put - frees the buffer for trace parser
1151 */
1152void trace_parser_put(struct trace_parser *parser)
1153{
1154 kfree(parser->buffer);
1155}
1156
1157/*
1158 * trace_get_user - reads the user input string separated by space
1159 * (matched by isspace(ch))
1160 *
1161 * For each string found the 'struct trace_parser' is updated,
1162 * and the function returns.
1163 *
1164 * Returns number of bytes read.
1165 *
1166 * See kernel/trace/trace.h for 'struct trace_parser' details.
1167 */
1168int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1169 size_t cnt, loff_t *ppos)
1170{
1171 char ch;
1172 size_t read = 0;
1173 ssize_t ret;
1174
1175 if (!*ppos)
1176 trace_parser_clear(parser);
1177
1178 ret = get_user(ch, ubuf++);
1179 if (ret)
1180 goto out;
1181
1182 read++;
1183 cnt--;
1184
1185 /*
1186 * The parser is not finished with the last write,
1187 * continue reading the user input without skipping spaces.
1188 */
1189 if (!parser->cont) {
1190 /* skip white space */
1191 while (cnt && isspace(ch)) {
1192 ret = get_user(ch, ubuf++);
1193 if (ret)
1194 goto out;
1195 read++;
1196 cnt--;
1197 }
1198
1199 /* only spaces were written */
1200 if (isspace(ch)) {
1201 *ppos += read;
1202 ret = read;
1203 goto out;
1204 }
1205
1206 parser->idx = 0;
1207 }
1208
1209 /* read the non-space input */
1210 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001211 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001212 parser->buffer[parser->idx++] = ch;
1213 else {
1214 ret = -EINVAL;
1215 goto out;
1216 }
1217 ret = get_user(ch, ubuf++);
1218 if (ret)
1219 goto out;
1220 read++;
1221 cnt--;
1222 }
1223
1224 /* We either got finished input or we have to wait for another call. */
1225 if (isspace(ch)) {
1226 parser->buffer[parser->idx] = 0;
1227 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001228 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001229 parser->cont = true;
1230 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001231 } else {
1232 ret = -EINVAL;
1233 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001234 }
1235
1236 *ppos += read;
1237 ret = read;
1238
1239out:
1240 return ret;
1241}
1242
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001243/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001244static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001245{
1246 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001247
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001248 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001249 return -EBUSY;
1250
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001251 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001252 if (cnt > len)
1253 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001254 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001255
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001256 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001257 return cnt;
1258}
1259
Tim Bird0e950172010-02-25 15:36:43 -08001260unsigned long __read_mostly tracing_thresh;
1261
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001262#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001263/*
1264 * Copy the new maximum trace into the separate maximum-trace
1265 * structure. (this way the maximum trace is permanently saved,
1266 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1267 */
1268static void
1269__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1270{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001271 struct trace_buffer *trace_buf = &tr->trace_buffer;
1272 struct trace_buffer *max_buf = &tr->max_buffer;
1273 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1274 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001275
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001276 max_buf->cpu = cpu;
1277 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001278
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001279 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001280 max_data->critical_start = data->critical_start;
1281 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001282
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001283 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001284 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001285 /*
1286 * If tsk == current, then use current_uid(), as that does not use
1287 * RCU. The irq tracer can be called out of RCU scope.
1288 */
1289 if (tsk == current)
1290 max_data->uid = current_uid();
1291 else
1292 max_data->uid = task_uid(tsk);
1293
Steven Rostedt8248ac02009-09-02 12:27:41 -04001294 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1295 max_data->policy = tsk->policy;
1296 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001297
1298 /* record this tasks comm */
1299 tracing_record_cmdline(tsk);
1300}
1301
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001302/**
1303 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1304 * @tr: tracer
1305 * @tsk: the task with the latency
1306 * @cpu: The cpu that initiated the trace.
1307 *
1308 * Flip the buffers between the @tr and the max_tr and record information
1309 * about which task was the cause of this latency.
1310 */
Ingo Molnare309b412008-05-12 21:20:51 +02001311void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001312update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1313{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001314 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001315
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001316 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001317 return;
1318
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001319 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001320
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001321 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001322 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001323 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001324 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001325 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001326
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001327 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001328
Masami Hiramatsua26030a2018-07-14 01:28:15 +09001329 /* Inherit the recordable setting from trace_buffer */
1330 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1331 ring_buffer_record_on(tr->max_buffer.buffer);
1332 else
1333 ring_buffer_record_off(tr->max_buffer.buffer);
1334
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001335 buf = tr->trace_buffer.buffer;
1336 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1337 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001338
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001339 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001340 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001341}
1342
1343/**
1344 * update_max_tr_single - only copy one trace over, and reset the rest
1345 * @tr - tracer
1346 * @tsk - task with the latency
1347 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001348 *
1349 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001350 */
Ingo Molnare309b412008-05-12 21:20:51 +02001351void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001352update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001354 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001356 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001357 return;
1358
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001359 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001360 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001361 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001362 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001363 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001364 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001365
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001366 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001367
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001368 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001369
Steven Rostedte8165db2009-09-03 19:13:05 -04001370 if (ret == -EBUSY) {
1371 /*
1372 * We failed to swap the buffer due to a commit taking
1373 * place on this CPU. We fail to record, but we reset
1374 * the max trace buffer (no one writes directly to it)
1375 * and flag that it failed.
1376 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001377 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001378 "Failed to swap buffers due to commit in progress\n");
1379 }
1380
Steven Rostedte8165db2009-09-03 19:13:05 -04001381 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001382
1383 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001384 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001385}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001386#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001387
Rabin Vincente30f53a2014-11-10 19:46:34 +01001388static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001389{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001390 /* Iterators are static, they should be filled or empty */
1391 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001392 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001393
Rabin Vincente30f53a2014-11-10 19:46:34 +01001394 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1395 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001396}
1397
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001398#ifdef CONFIG_FTRACE_STARTUP_TEST
1399static int run_tracer_selftest(struct tracer *type)
1400{
1401 struct trace_array *tr = &global_trace;
1402 struct tracer *saved_tracer = tr->current_trace;
1403 int ret;
1404
1405 if (!type->selftest || tracing_selftest_disabled)
1406 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001407
1408 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001409 * Run a selftest on this tracer.
1410 * Here we reset the trace buffer, and set the current
1411 * tracer to be this tracer. The tracer can then run some
1412 * internal tracing to verify that everything is in order.
1413 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001414 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001415 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001416
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001417 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001418
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001419#ifdef CONFIG_TRACER_MAX_TRACE
1420 if (type->use_max_tr) {
1421 /* If we expanded the buffers, make sure the max is expanded too */
1422 if (ring_buffer_expanded)
1423 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1424 RING_BUFFER_ALL_CPUS);
1425 tr->allocated_snapshot = true;
1426 }
1427#endif
1428
1429 /* the test is responsible for initializing and enabling */
1430 pr_info("Testing tracer %s: ", type->name);
1431 ret = type->selftest(type, tr);
1432 /* the test is responsible for resetting too */
1433 tr->current_trace = saved_tracer;
1434 if (ret) {
1435 printk(KERN_CONT "FAILED!\n");
1436 /* Add the warning after printing 'FAILED' */
1437 WARN_ON(1);
1438 return -1;
1439 }
1440 /* Only reset on passing, to avoid touching corrupted buffers */
1441 tracing_reset_online_cpus(&tr->trace_buffer);
1442
1443#ifdef CONFIG_TRACER_MAX_TRACE
1444 if (type->use_max_tr) {
1445 tr->allocated_snapshot = false;
1446
1447 /* Shrink the max buffer again */
1448 if (ring_buffer_expanded)
1449 ring_buffer_resize(tr->max_buffer.buffer, 1,
1450 RING_BUFFER_ALL_CPUS);
1451 }
1452#endif
1453
1454 printk(KERN_CONT "PASSED\n");
1455 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001456}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001457#else
1458static inline int run_tracer_selftest(struct tracer *type)
1459{
1460 return 0;
1461}
1462#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001463
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001464static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1465
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001466static void __init apply_trace_boot_options(void);
1467
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001468/**
1469 * register_tracer - register a tracer with the ftrace system.
1470 * @type - the plugin for the tracer
1471 *
1472 * Register a new plugin tracer.
1473 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001474int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001475{
1476 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001477 int ret = 0;
1478
1479 if (!type->name) {
1480 pr_info("Tracer must have a name\n");
1481 return -1;
1482 }
1483
Dan Carpenter24a461d2010-07-10 12:06:44 +02001484 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001485 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1486 return -1;
1487 }
1488
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001489 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001490
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001491 tracing_selftest_running = true;
1492
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001493 for (t = trace_types; t; t = t->next) {
1494 if (strcmp(type->name, t->name) == 0) {
1495 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001496 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497 type->name);
1498 ret = -1;
1499 goto out;
1500 }
1501 }
1502
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001503 if (!type->set_flag)
1504 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001505 if (!type->flags) {
1506 /*allocate a dummy tracer_flags*/
1507 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001508 if (!type->flags) {
1509 ret = -ENOMEM;
1510 goto out;
1511 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001512 type->flags->val = 0;
1513 type->flags->opts = dummy_tracer_opt;
1514 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001515 if (!type->flags->opts)
1516 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001517
Chunyu Hud39cdd22016-03-08 21:37:01 +08001518 /* store the tracer for __set_tracer_option */
1519 type->flags->trace = type;
1520
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001521 ret = run_tracer_selftest(type);
1522 if (ret < 0)
1523 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001524
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525 type->next = trace_types;
1526 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001527 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001528
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001529 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001530 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001531 mutex_unlock(&trace_types_lock);
1532
Steven Rostedtdac74942009-02-05 01:13:38 -05001533 if (ret || !default_bootup_tracer)
1534 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001535
Li Zefanee6c2c12009-09-18 14:06:47 +08001536 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001537 goto out_unlock;
1538
1539 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1540 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001541 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001542 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001543
1544 apply_trace_boot_options();
1545
Steven Rostedtdac74942009-02-05 01:13:38 -05001546 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001547 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001548#ifdef CONFIG_FTRACE_STARTUP_TEST
1549 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1550 type->name);
1551#endif
1552
1553 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001554 return ret;
1555}
1556
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001557void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001558{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001559 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001560
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001561 if (!buffer)
1562 return;
1563
Steven Rostedtf6339032009-09-04 12:35:16 -04001564 ring_buffer_record_disable(buffer);
1565
1566 /* Make sure all commits have finished */
1567 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001568 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001569
1570 ring_buffer_record_enable(buffer);
1571}
1572
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001573void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001574{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001575 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001576 int cpu;
1577
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001578 if (!buffer)
1579 return;
1580
Steven Rostedt621968c2009-09-04 12:02:35 -04001581 ring_buffer_record_disable(buffer);
1582
1583 /* Make sure all commits have finished */
1584 synchronize_sched();
1585
Alexander Z Lam94571582013-08-02 18:36:16 -07001586 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001587
1588 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001589 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001590
1591 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001592}
1593
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001594/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001595void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001596{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001597 struct trace_array *tr;
1598
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001599 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001600 tracing_reset_online_cpus(&tr->trace_buffer);
1601#ifdef CONFIG_TRACER_MAX_TRACE
1602 tracing_reset_online_cpus(&tr->max_buffer);
1603#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001604 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001605}
1606
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001607#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001608#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001609static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001610struct saved_cmdlines_buffer {
1611 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1612 unsigned *map_cmdline_to_pid;
1613 unsigned cmdline_num;
1614 int cmdline_idx;
1615 char *saved_cmdlines;
1616};
1617static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001618
Steven Rostedt25b0b442008-05-12 21:21:00 +02001619/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001620static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001621
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001622static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001623{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001624 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1625}
1626
1627static inline void set_cmdline(int idx, const char *cmdline)
1628{
1629 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1630}
1631
1632static int allocate_cmdlines_buffer(unsigned int val,
1633 struct saved_cmdlines_buffer *s)
1634{
1635 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1636 GFP_KERNEL);
1637 if (!s->map_cmdline_to_pid)
1638 return -ENOMEM;
1639
1640 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1641 if (!s->saved_cmdlines) {
1642 kfree(s->map_cmdline_to_pid);
1643 return -ENOMEM;
1644 }
1645
1646 s->cmdline_idx = 0;
1647 s->cmdline_num = val;
1648 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1649 sizeof(s->map_pid_to_cmdline));
1650 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1651 val * sizeof(*s->map_cmdline_to_pid));
1652
1653 return 0;
1654}
1655
1656static int trace_create_savedcmd(void)
1657{
1658 int ret;
1659
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001660 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001661 if (!savedcmd)
1662 return -ENOMEM;
1663
1664 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1665 if (ret < 0) {
1666 kfree(savedcmd);
1667 savedcmd = NULL;
1668 return -ENOMEM;
1669 }
1670
1671 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001672}
1673
Carsten Emdeb5130b12009-09-13 01:43:07 +02001674int is_tracing_stopped(void)
1675{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001676 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001677}
1678
Steven Rostedt0f048702008-11-05 16:05:44 -05001679/**
1680 * tracing_start - quick start of the tracer
1681 *
1682 * If tracing is enabled but was stopped by tracing_stop,
1683 * this will start the tracer back up.
1684 */
1685void tracing_start(void)
1686{
1687 struct ring_buffer *buffer;
1688 unsigned long flags;
1689
1690 if (tracing_disabled)
1691 return;
1692
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001693 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1694 if (--global_trace.stop_count) {
1695 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001696 /* Someone screwed up their debugging */
1697 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001698 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001699 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001700 goto out;
1701 }
1702
Steven Rostedta2f80712010-03-12 19:56:00 -05001703 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001704 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001705
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001706 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001707 if (buffer)
1708 ring_buffer_record_enable(buffer);
1709
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001710#ifdef CONFIG_TRACER_MAX_TRACE
1711 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001712 if (buffer)
1713 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001714#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001715
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001716 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001717
Steven Rostedt0f048702008-11-05 16:05:44 -05001718 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001719 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1720}
1721
1722static void tracing_start_tr(struct trace_array *tr)
1723{
1724 struct ring_buffer *buffer;
1725 unsigned long flags;
1726
1727 if (tracing_disabled)
1728 return;
1729
1730 /* If global, we need to also start the max tracer */
1731 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1732 return tracing_start();
1733
1734 raw_spin_lock_irqsave(&tr->start_lock, flags);
1735
1736 if (--tr->stop_count) {
1737 if (tr->stop_count < 0) {
1738 /* Someone screwed up their debugging */
1739 WARN_ON_ONCE(1);
1740 tr->stop_count = 0;
1741 }
1742 goto out;
1743 }
1744
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001745 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001746 if (buffer)
1747 ring_buffer_record_enable(buffer);
1748
1749 out:
1750 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001751}
1752
1753/**
1754 * tracing_stop - quick stop of the tracer
1755 *
1756 * Light weight way to stop tracing. Use in conjunction with
1757 * tracing_start.
1758 */
1759void tracing_stop(void)
1760{
1761 struct ring_buffer *buffer;
1762 unsigned long flags;
1763
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001764 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1765 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001766 goto out;
1767
Steven Rostedta2f80712010-03-12 19:56:00 -05001768 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001769 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001770
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001771 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001772 if (buffer)
1773 ring_buffer_record_disable(buffer);
1774
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001775#ifdef CONFIG_TRACER_MAX_TRACE
1776 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001777 if (buffer)
1778 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001779#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001780
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001781 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001782
Steven Rostedt0f048702008-11-05 16:05:44 -05001783 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001784 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1785}
1786
1787static void tracing_stop_tr(struct trace_array *tr)
1788{
1789 struct ring_buffer *buffer;
1790 unsigned long flags;
1791
1792 /* If global, we need to also stop the max tracer */
1793 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1794 return tracing_stop();
1795
1796 raw_spin_lock_irqsave(&tr->start_lock, flags);
1797 if (tr->stop_count++)
1798 goto out;
1799
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001800 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001801 if (buffer)
1802 ring_buffer_record_disable(buffer);
1803
1804 out:
1805 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001806}
1807
Ingo Molnare309b412008-05-12 21:20:51 +02001808void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001809
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001810static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001811{
Carsten Emdea635cf02009-03-18 09:00:41 +01001812 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001813
1814 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001815 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001816
1817 /*
1818 * It's not the end of the world if we don't get
1819 * the lock, but we also don't want to spin
1820 * nor do we want to disable interrupts,
1821 * so if we miss here, then better luck next time.
1822 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001823 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001824 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001825
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001826 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001827 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001828 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001829
Carsten Emdea635cf02009-03-18 09:00:41 +01001830 /*
1831 * Check whether the cmdline buffer at idx has a pid
1832 * mapped. We are going to overwrite that entry so we
1833 * need to clear the map_pid_to_cmdline. Otherwise we
1834 * would read the new comm for the old pid.
1835 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001836 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001837 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001838 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001839
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001840 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1841 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001842
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001843 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001844 }
1845
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001846 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001847
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001848 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001849
1850 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001851}
1852
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001853static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001854{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001855 unsigned map;
1856
Steven Rostedt4ca530852009-03-16 19:20:15 -04001857 if (!pid) {
1858 strcpy(comm, "<idle>");
1859 return;
1860 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001861
Steven Rostedt74bf4072010-01-25 15:11:53 -05001862 if (WARN_ON_ONCE(pid < 0)) {
1863 strcpy(comm, "<XXX>");
1864 return;
1865 }
1866
Steven Rostedt4ca530852009-03-16 19:20:15 -04001867 if (pid > PID_MAX_DEFAULT) {
1868 strcpy(comm, "<...>");
1869 return;
1870 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001871
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001872 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001873 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001874 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001875 else
1876 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001877}
1878
1879void trace_find_cmdline(int pid, char comm[])
1880{
1881 preempt_disable();
1882 arch_spin_lock(&trace_cmdline_lock);
1883
1884 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001885
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001886 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001887 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001888}
1889
Ingo Molnare309b412008-05-12 21:20:51 +02001890void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001891{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001892 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001893 return;
1894
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001895 if (!__this_cpu_read(trace_cmdline_save))
1896 return;
1897
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001898 if (trace_save_cmdline(tsk))
1899 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001900}
1901
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001902void
Steven Rostedt38697052008-10-01 13:14:09 -04001903tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1904 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001905{
1906 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001907
Steven Rostedt777e2082008-09-29 23:02:42 -04001908 entry->preempt_count = pc & 0xff;
1909 entry->pid = (tsk) ? tsk->pid : 0;
1910 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001911#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001912 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001913#else
1914 TRACE_FLAG_IRQS_NOSUPPORT |
1915#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01001916 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001917 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondeti04e002a2016-12-09 21:50:17 +05301918 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001919 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1920 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001921}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001922EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001923
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001924static __always_inline void
1925trace_event_setup(struct ring_buffer_event *event,
1926 int type, unsigned long flags, int pc)
1927{
1928 struct trace_entry *ent = ring_buffer_event_data(event);
1929
1930 tracing_generic_entry_update(ent, flags, pc);
1931 ent->type = type;
1932}
1933
Steven Rostedte77405a2009-09-02 14:17:06 -04001934struct ring_buffer_event *
1935trace_buffer_lock_reserve(struct ring_buffer *buffer,
1936 int type,
1937 unsigned long len,
1938 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001939{
1940 struct ring_buffer_event *event;
1941
Steven Rostedte77405a2009-09-02 14:17:06 -04001942 event = ring_buffer_lock_reserve(buffer, len);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001943 if (event != NULL)
1944 trace_event_setup(event, type, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001945
1946 return event;
1947}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001948
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001949DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1950DEFINE_PER_CPU(int, trace_buffered_event_cnt);
1951static int trace_buffered_event_ref;
1952
1953/**
1954 * trace_buffered_event_enable - enable buffering events
1955 *
1956 * When events are being filtered, it is quicker to use a temporary
1957 * buffer to write the event data into if there's a likely chance
1958 * that it will not be committed. The discard of the ring buffer
1959 * is not as fast as committing, and is much slower than copying
1960 * a commit.
1961 *
1962 * When an event is to be filtered, allocate per cpu buffers to
1963 * write the event data into, and if the event is filtered and discarded
1964 * it is simply dropped, otherwise, the entire data is to be committed
1965 * in one shot.
1966 */
1967void trace_buffered_event_enable(void)
1968{
1969 struct ring_buffer_event *event;
1970 struct page *page;
1971 int cpu;
1972
1973 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
1974
1975 if (trace_buffered_event_ref++)
1976 return;
1977
1978 for_each_tracing_cpu(cpu) {
1979 page = alloc_pages_node(cpu_to_node(cpu),
1980 GFP_KERNEL | __GFP_NORETRY, 0);
1981 if (!page)
1982 goto failed;
1983
1984 event = page_address(page);
1985 memset(event, 0, sizeof(*event));
1986
1987 per_cpu(trace_buffered_event, cpu) = event;
1988
1989 preempt_disable();
1990 if (cpu == smp_processor_id() &&
1991 this_cpu_read(trace_buffered_event) !=
1992 per_cpu(trace_buffered_event, cpu))
1993 WARN_ON_ONCE(1);
1994 preempt_enable();
1995 }
1996
1997 return;
1998 failed:
1999 trace_buffered_event_disable();
2000}
2001
2002static void enable_trace_buffered_event(void *data)
2003{
2004 /* Probably not needed, but do it anyway */
2005 smp_rmb();
2006 this_cpu_dec(trace_buffered_event_cnt);
2007}
2008
2009static void disable_trace_buffered_event(void *data)
2010{
2011 this_cpu_inc(trace_buffered_event_cnt);
2012}
2013
2014/**
2015 * trace_buffered_event_disable - disable buffering events
2016 *
2017 * When a filter is removed, it is faster to not use the buffered
2018 * events, and to commit directly into the ring buffer. Free up
2019 * the temp buffers when there are no more users. This requires
2020 * special synchronization with current events.
2021 */
2022void trace_buffered_event_disable(void)
2023{
2024 int cpu;
2025
2026 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2027
2028 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2029 return;
2030
2031 if (--trace_buffered_event_ref)
2032 return;
2033
2034 preempt_disable();
2035 /* For each CPU, set the buffer as used. */
2036 smp_call_function_many(tracing_buffer_mask,
2037 disable_trace_buffered_event, NULL, 1);
2038 preempt_enable();
2039
2040 /* Wait for all current users to finish */
2041 synchronize_sched();
2042
2043 for_each_tracing_cpu(cpu) {
2044 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2045 per_cpu(trace_buffered_event, cpu) = NULL;
2046 }
2047 /*
2048 * Make sure trace_buffered_event is NULL before clearing
2049 * trace_buffered_event_cnt.
2050 */
2051 smp_wmb();
2052
2053 preempt_disable();
2054 /* Do the work on each cpu */
2055 smp_call_function_many(tracing_buffer_mask,
2056 enable_trace_buffered_event, NULL, 1);
2057 preempt_enable();
2058}
2059
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002060void
2061__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
2062{
2063 __this_cpu_write(trace_cmdline_save, true);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002064
2065 /* If this is the temp buffer, we need to commit fully */
2066 if (this_cpu_read(trace_buffered_event) == event) {
2067 /* Length is in event->array[0] */
2068 ring_buffer_write(buffer, event->array[0], &event->array[1]);
2069 /* Release the temp buffer */
2070 this_cpu_dec(trace_buffered_event_cnt);
2071 } else
2072 ring_buffer_unlock_commit(buffer, event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002073}
2074
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002075static struct ring_buffer *temp_buffer;
2076
Steven Rostedtef5580d2009-02-27 19:38:04 -05002077struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002078trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002079 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002080 int type, unsigned long len,
2081 unsigned long flags, int pc)
2082{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002083 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002084 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002085
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002086 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002087
2088 if ((trace_file->flags &
2089 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2090 (entry = this_cpu_read(trace_buffered_event))) {
2091 /* Try to use the per cpu buffer first */
2092 val = this_cpu_inc_return(trace_buffered_event_cnt);
2093 if (val == 1) {
2094 trace_event_setup(entry, type, flags, pc);
2095 entry->array[0] = len;
2096 return entry;
2097 }
2098 this_cpu_dec(trace_buffered_event_cnt);
2099 }
2100
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002101 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002102 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002103 /*
2104 * If tracing is off, but we have triggers enabled
2105 * we still need to look at the event data. Use the temp_buffer
2106 * to store the trace event for the tigger to use. It's recusive
2107 * safe and will not be recorded anywhere.
2108 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002109 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002110 *current_rb = temp_buffer;
2111 entry = trace_buffer_lock_reserve(*current_rb,
2112 type, len, flags, pc);
2113 }
2114 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002115}
2116EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2117
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002118void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2119 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002120 struct ring_buffer_event *event,
2121 unsigned long flags, int pc,
2122 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002123{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002124 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002125
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002126 /*
2127 * If regs is not set, then skip the following callers:
2128 * trace_buffer_unlock_commit_regs
2129 * event_trigger_unlock_commit
2130 * trace_event_buffer_commit
2131 * trace_event_raw_event_sched_switch
2132 * Note, we can still get here via blktrace, wakeup tracer
2133 * and mmiotrace, but that's ok if they lose a function or
2134 * two. They are that meaningful.
2135 */
2136 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002137 ftrace_trace_userstack(buffer, flags, pc);
2138}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002139
Ingo Molnare309b412008-05-12 21:20:51 +02002140void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002141trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002142 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2143 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002144{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002145 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002146 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002147 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002148 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002149
Steven Rostedte77405a2009-09-02 14:17:06 -04002150 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002151 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002152 if (!event)
2153 return;
2154 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002155 entry->ip = ip;
2156 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002157
Tom Zanussif306cc82013-10-24 08:34:17 -05002158 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002159 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002160}
2161
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002162#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002163
2164#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2165struct ftrace_stack {
2166 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2167};
2168
2169static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2170static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2171
Steven Rostedte77405a2009-09-02 14:17:06 -04002172static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002173 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002174 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002175{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002176 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002177 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002178 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002179 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002180 int use_stack;
2181 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002182
2183 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002184 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002185
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002186 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002187 * Add two, for this function and the call to save_stack_trace()
2188 * If regs is set, then these functions will not be in the way.
2189 */
2190 if (!regs)
2191 trace.skip += 2;
2192
2193 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002194 * Since events can happen in NMIs there's no safe way to
2195 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2196 * or NMI comes in, it will just have to use the default
2197 * FTRACE_STACK_SIZE.
2198 */
2199 preempt_disable_notrace();
2200
Shan Wei82146522012-11-19 13:21:01 +08002201 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002202 /*
2203 * We don't need any atomic variables, just a barrier.
2204 * If an interrupt comes in, we don't care, because it would
2205 * have exited and put the counter back to what we want.
2206 * We just need a barrier to keep gcc from moving things
2207 * around.
2208 */
2209 barrier();
2210 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002211 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002212 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2213
2214 if (regs)
2215 save_stack_trace_regs(regs, &trace);
2216 else
2217 save_stack_trace(&trace);
2218
2219 if (trace.nr_entries > size)
2220 size = trace.nr_entries;
2221 } else
2222 /* From now on, use_stack is a boolean */
2223 use_stack = 0;
2224
2225 size *= sizeof(unsigned long);
2226
2227 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
2228 sizeof(*entry) + size, flags, pc);
2229 if (!event)
2230 goto out;
2231 entry = ring_buffer_event_data(event);
2232
2233 memset(&entry->caller, 0, size);
2234
2235 if (use_stack)
2236 memcpy(&entry->caller, trace.entries,
2237 trace.nr_entries * sizeof(unsigned long));
2238 else {
2239 trace.max_entries = FTRACE_STACK_ENTRIES;
2240 trace.entries = entry->caller;
2241 if (regs)
2242 save_stack_trace_regs(regs, &trace);
2243 else
2244 save_stack_trace(&trace);
2245 }
2246
2247 entry->size = trace.nr_entries;
2248
Tom Zanussif306cc82013-10-24 08:34:17 -05002249 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002250 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002251
2252 out:
2253 /* Again, don't let gcc optimize things here */
2254 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002255 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002256 preempt_enable_notrace();
2257
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002258}
2259
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002260static inline void ftrace_trace_stack(struct trace_array *tr,
2261 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002262 unsigned long flags,
2263 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002264{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002265 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002266 return;
2267
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002268 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002269}
2270
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002271void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2272 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002273{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002274 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04002275}
2276
Steven Rostedt03889382009-12-11 09:48:22 -05002277/**
2278 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002279 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002280 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002281void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002282{
2283 unsigned long flags;
2284
2285 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002286 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002287
2288 local_save_flags(flags);
2289
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002290 /*
2291 * Skip 3 more, seems to get us at the caller of
2292 * this function.
2293 */
2294 skip += 3;
2295 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2296 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002297}
2298
Steven Rostedt91e86e52010-11-10 12:56:12 +01002299static DEFINE_PER_CPU(int, user_stack_count);
2300
Steven Rostedte77405a2009-09-02 14:17:06 -04002301void
2302ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002303{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002304 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002305 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002306 struct userstack_entry *entry;
2307 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002308
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002309 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002310 return;
2311
Steven Rostedtb6345872010-03-12 20:03:30 -05002312 /*
2313 * NMIs can not handle page faults, even with fix ups.
2314 * The save user stack can (and often does) fault.
2315 */
2316 if (unlikely(in_nmi()))
2317 return;
2318
Steven Rostedt91e86e52010-11-10 12:56:12 +01002319 /*
2320 * prevent recursion, since the user stack tracing may
2321 * trigger other kernel events.
2322 */
2323 preempt_disable();
2324 if (__this_cpu_read(user_stack_count))
2325 goto out;
2326
2327 __this_cpu_inc(user_stack_count);
2328
Steven Rostedte77405a2009-09-02 14:17:06 -04002329 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002330 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002331 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002332 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002333 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002334
Steven Rostedt48659d32009-09-11 11:36:23 -04002335 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002336 memset(&entry->caller, 0, sizeof(entry->caller));
2337
2338 trace.nr_entries = 0;
2339 trace.max_entries = FTRACE_STACK_ENTRIES;
2340 trace.skip = 0;
2341 trace.entries = entry->caller;
2342
2343 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002344 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002345 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002346
Li Zefan1dbd1952010-12-09 15:47:56 +08002347 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002348 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002349 out:
2350 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002351}
2352
Hannes Eder4fd27352009-02-10 19:44:12 +01002353#ifdef UNUSED
2354static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002355{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002356 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002357}
Hannes Eder4fd27352009-02-10 19:44:12 +01002358#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002359
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002360#endif /* CONFIG_STACKTRACE */
2361
Steven Rostedt07d777f2011-09-22 14:01:55 -04002362/* created for use with alloc_percpu */
2363struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002364 int nesting;
2365 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002366};
2367
2368static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002369
2370/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002371 * Thise allows for lockless recording. If we're nested too deeply, then
2372 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002373 */
2374static char *get_trace_buf(void)
2375{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002376 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002377
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002378 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002379 return NULL;
2380
Steven Rostedt (VMware)96cf9182017-09-05 11:32:01 -04002381 buffer->nesting++;
2382
2383 /* Interrupts must see nesting incremented before we use the buffer */
2384 barrier();
2385 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002386}
2387
2388static void put_trace_buf(void)
2389{
Steven Rostedt (VMware)96cf9182017-09-05 11:32:01 -04002390 /* Don't let the decrement of nesting leak before this */
2391 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002392 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002393}
2394
2395static int alloc_percpu_trace_buffer(void)
2396{
2397 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002398
2399 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002400 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2401 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002402
2403 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002404 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002405}
2406
Steven Rostedt81698832012-10-11 10:15:05 -04002407static int buffers_allocated;
2408
Steven Rostedt07d777f2011-09-22 14:01:55 -04002409void trace_printk_init_buffers(void)
2410{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002411 if (buffers_allocated)
2412 return;
2413
2414 if (alloc_percpu_trace_buffer())
2415 return;
2416
Steven Rostedt2184db42014-05-28 13:14:40 -04002417 /* trace_printk() is for debug use only. Don't use it in production. */
2418
Joe Perchesa395d6a2016-03-22 14:28:09 -07002419 pr_warn("\n");
2420 pr_warn("**********************************************************\n");
2421 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2422 pr_warn("** **\n");
2423 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2424 pr_warn("** **\n");
2425 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2426 pr_warn("** unsafe for production use. **\n");
2427 pr_warn("** **\n");
2428 pr_warn("** If you see this message and you are not debugging **\n");
2429 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2430 pr_warn("** **\n");
2431 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2432 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002433
Steven Rostedtb382ede62012-10-10 21:44:34 -04002434 /* Expand the buffers to set size */
2435 tracing_update_buffers();
2436
Steven Rostedt07d777f2011-09-22 14:01:55 -04002437 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002438
2439 /*
2440 * trace_printk_init_buffers() can be called by modules.
2441 * If that happens, then we need to start cmdline recording
2442 * directly here. If the global_trace.buffer is already
2443 * allocated here, then this was called by module code.
2444 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002445 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002446 tracing_start_cmdline_record();
2447}
2448
2449void trace_printk_start_comm(void)
2450{
2451 /* Start tracing comms if trace printk is set */
2452 if (!buffers_allocated)
2453 return;
2454 tracing_start_cmdline_record();
2455}
2456
2457static void trace_printk_start_stop_comm(int enabled)
2458{
2459 if (!buffers_allocated)
2460 return;
2461
2462 if (enabled)
2463 tracing_start_cmdline_record();
2464 else
2465 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002466}
2467
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002468/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002469 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002470 *
2471 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002472int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002473{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002474 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002475 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002476 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002477 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002478 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002479 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002480 char *tbuffer;
2481 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002482
2483 if (unlikely(tracing_selftest_running || tracing_disabled))
2484 return 0;
2485
2486 /* Don't pollute graph traces with trace_vprintk internals */
2487 pause_graph_tracing();
2488
2489 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002490 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002491
Steven Rostedt07d777f2011-09-22 14:01:55 -04002492 tbuffer = get_trace_buf();
2493 if (!tbuffer) {
2494 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002495 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002496 }
2497
2498 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2499
2500 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002501 goto out;
2502
Steven Rostedt07d777f2011-09-22 14:01:55 -04002503 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002504 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002505 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002506 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2507 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002508 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002509 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002510 entry = ring_buffer_event_data(event);
2511 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002512 entry->fmt = fmt;
2513
Steven Rostedt07d777f2011-09-22 14:01:55 -04002514 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002515 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002516 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002517 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002518 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002519
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002520out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002521 put_trace_buf();
2522
2523out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002524 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002525 unpause_graph_tracing();
2526
2527 return len;
2528}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002529EXPORT_SYMBOL_GPL(trace_vbprintk);
2530
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002531__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002532static int
2533__trace_array_vprintk(struct ring_buffer *buffer,
2534 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002535{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002536 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002537 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002538 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002539 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002540 unsigned long flags;
2541 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002542
2543 if (tracing_disabled || tracing_selftest_running)
2544 return 0;
2545
Steven Rostedt07d777f2011-09-22 14:01:55 -04002546 /* Don't pollute graph traces with trace_vprintk internals */
2547 pause_graph_tracing();
2548
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002549 pc = preempt_count();
2550 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002551
Steven Rostedt07d777f2011-09-22 14:01:55 -04002552
2553 tbuffer = get_trace_buf();
2554 if (!tbuffer) {
2555 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002556 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002557 }
2558
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002559 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002560
Steven Rostedt07d777f2011-09-22 14:01:55 -04002561 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002562 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002563 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002564 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002565 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002566 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002567 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002568 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002569
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002570 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002571 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002572 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002573 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002574 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002575
2576out:
2577 put_trace_buf();
2578
2579out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002580 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002581 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002582
2583 return len;
2584}
Steven Rostedt659372d2009-09-03 19:11:07 -04002585
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002586__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002587int trace_array_vprintk(struct trace_array *tr,
2588 unsigned long ip, const char *fmt, va_list args)
2589{
2590 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2591}
2592
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002593__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002594int trace_array_printk(struct trace_array *tr,
2595 unsigned long ip, const char *fmt, ...)
2596{
2597 int ret;
2598 va_list ap;
2599
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002601 return 0;
2602
2603 va_start(ap, fmt);
2604 ret = trace_array_vprintk(tr, ip, fmt, ap);
2605 va_end(ap);
2606 return ret;
2607}
2608
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002609__printf(3, 4)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002610int trace_array_printk_buf(struct ring_buffer *buffer,
2611 unsigned long ip, const char *fmt, ...)
2612{
2613 int ret;
2614 va_list ap;
2615
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002616 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002617 return 0;
2618
2619 va_start(ap, fmt);
2620 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2621 va_end(ap);
2622 return ret;
2623}
2624
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002625__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04002626int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2627{
Steven Rostedta813a152009-10-09 01:41:35 -04002628 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002629}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002630EXPORT_SYMBOL_GPL(trace_vprintk);
2631
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002632static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002633{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002634 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2635
Steven Rostedt5a90f572008-09-03 17:42:51 -04002636 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002637 if (buf_iter)
2638 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002639}
2640
Ingo Molnare309b412008-05-12 21:20:51 +02002641static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002642peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2643 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002644{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002645 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002646 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002647
Steven Rostedtd7690412008-10-01 00:29:53 -04002648 if (buf_iter)
2649 event = ring_buffer_iter_peek(buf_iter, ts);
2650 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002651 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002652 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002653
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002654 if (event) {
2655 iter->ent_size = ring_buffer_event_length(event);
2656 return ring_buffer_event_data(event);
2657 }
2658 iter->ent_size = 0;
2659 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002660}
Steven Rostedtd7690412008-10-01 00:29:53 -04002661
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002662static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002663__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2664 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002665{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002666 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002667 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002668 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002669 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002670 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002671 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002672 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002673 int cpu;
2674
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002675 /*
2676 * If we are in a per_cpu trace file, don't bother by iterating over
2677 * all cpu and peek directly.
2678 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002679 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002680 if (ring_buffer_empty_cpu(buffer, cpu_file))
2681 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002682 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002683 if (ent_cpu)
2684 *ent_cpu = cpu_file;
2685
2686 return ent;
2687 }
2688
Steven Rostedtab464282008-05-12 21:21:00 +02002689 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002690
2691 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002692 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002693
Steven Rostedtbc21b472010-03-31 19:49:26 -04002694 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002695
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002696 /*
2697 * Pick the entry with the smallest timestamp:
2698 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002699 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002700 next = ent;
2701 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002702 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002703 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002704 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002705 }
2706 }
2707
Steven Rostedt12b5da32012-03-27 10:43:28 -04002708 iter->ent_size = next_size;
2709
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002710 if (ent_cpu)
2711 *ent_cpu = next_cpu;
2712
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002713 if (ent_ts)
2714 *ent_ts = next_ts;
2715
Steven Rostedtbc21b472010-03-31 19:49:26 -04002716 if (missing_events)
2717 *missing_events = next_lost;
2718
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002719 return next;
2720}
2721
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002722/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002723struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2724 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002725{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002726 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002727}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002728
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002729/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002730void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002731{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002732 iter->ent = __find_next_entry(iter, &iter->cpu,
2733 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002734
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002735 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002736 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002737
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002738 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002739}
2740
Ingo Molnare309b412008-05-12 21:20:51 +02002741static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002742{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002743 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002744 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002745}
2746
Ingo Molnare309b412008-05-12 21:20:51 +02002747static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002748{
2749 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002750 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002751 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002752
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002753 WARN_ON_ONCE(iter->leftover);
2754
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002755 (*pos)++;
2756
2757 /* can't go backwards */
2758 if (iter->idx > i)
2759 return NULL;
2760
2761 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002762 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002763 else
2764 ent = iter;
2765
2766 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002767 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002768
2769 iter->pos = *pos;
2770
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002771 return ent;
2772}
2773
Jason Wessel955b61e2010-08-05 09:22:23 -05002774void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002775{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002776 struct ring_buffer_event *event;
2777 struct ring_buffer_iter *buf_iter;
2778 unsigned long entries = 0;
2779 u64 ts;
2780
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002781 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002782
Steven Rostedt6d158a82012-06-27 20:46:14 -04002783 buf_iter = trace_buffer_iter(iter, cpu);
2784 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002785 return;
2786
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002787 ring_buffer_iter_reset(buf_iter);
2788
2789 /*
2790 * We could have the case with the max latency tracers
2791 * that a reset never took place on a cpu. This is evident
2792 * by the timestamp being before the start of the buffer.
2793 */
2794 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002795 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002796 break;
2797 entries++;
2798 ring_buffer_read(buf_iter, NULL);
2799 }
2800
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002801 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002802}
2803
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002804/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002805 * The current tracer is copied to avoid a global locking
2806 * all around.
2807 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002808static void *s_start(struct seq_file *m, loff_t *pos)
2809{
2810 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002811 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002812 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002813 void *p = NULL;
2814 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002815 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002816
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002817 /*
2818 * copy the tracer to avoid using a global lock all around.
2819 * iter->trace is a copy of current_trace, the pointer to the
2820 * name may be used instead of a strcmp(), as iter->trace->name
2821 * will point to the same string as current_trace->name.
2822 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002823 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002824 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2825 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002826 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002827
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002828#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002829 if (iter->snapshot && iter->trace->use_max_tr)
2830 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002831#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002832
2833 if (!iter->snapshot)
2834 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002835
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002836 if (*pos != iter->pos) {
2837 iter->ent = NULL;
2838 iter->cpu = 0;
2839 iter->idx = -1;
2840
Steven Rostedtae3b5092013-01-23 15:22:59 -05002841 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002842 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002843 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002844 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002845 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002846
Lai Jiangshanac91d852010-03-02 17:54:50 +08002847 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002848 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2849 ;
2850
2851 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002852 /*
2853 * If we overflowed the seq_file before, then we want
2854 * to just reuse the trace_seq buffer again.
2855 */
2856 if (iter->leftover)
2857 p = iter;
2858 else {
2859 l = *pos - 1;
2860 p = s_next(m, p, &l);
2861 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002862 }
2863
Lai Jiangshan4f535962009-05-18 19:35:34 +08002864 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002865 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002866 return p;
2867}
2868
2869static void s_stop(struct seq_file *m, void *p)
2870{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002871 struct trace_iterator *iter = m->private;
2872
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002873#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002874 if (iter->snapshot && iter->trace->use_max_tr)
2875 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002876#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002877
2878 if (!iter->snapshot)
2879 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002880
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002881 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002882 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002883}
2884
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002885static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002886get_total_entries(struct trace_buffer *buf,
2887 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002888{
2889 unsigned long count;
2890 int cpu;
2891
2892 *total = 0;
2893 *entries = 0;
2894
2895 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002896 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002897 /*
2898 * If this buffer has skipped entries, then we hold all
2899 * entries for the trace and we need to ignore the
2900 * ones before the time stamp.
2901 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002902 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2903 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002904 /* total is the same as the entries */
2905 *total += count;
2906 } else
2907 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002908 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002909 *entries += count;
2910 }
2911}
2912
Ingo Molnare309b412008-05-12 21:20:51 +02002913static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002914{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002915 seq_puts(m, "# _------=> CPU# \n"
2916 "# / _-----=> irqs-off \n"
2917 "# | / _----=> need-resched \n"
2918 "# || / _---=> hardirq/softirq \n"
2919 "# ||| / _--=> preempt-depth \n"
2920 "# |||| / delay \n"
2921 "# cmd pid ||||| time | caller \n"
2922 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002923}
2924
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002925static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002926{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002927 unsigned long total;
2928 unsigned long entries;
2929
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002930 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002931 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2932 entries, total, num_online_cpus());
2933 seq_puts(m, "#\n");
2934}
2935
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002936static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002937{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002938 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002939 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2940 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002941}
2942
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002943static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002944{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002945 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002946 seq_puts(m, "# _-----=> irqs-off\n"
2947 "# / _----=> need-resched\n"
2948 "# | / _---=> hardirq/softirq\n"
2949 "# || / _--=> preempt-depth\n"
2950 "# ||| / delay\n"
2951 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2952 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002953}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002954
Jiri Olsa62b915f2010-04-02 19:01:22 +02002955void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002956print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2957{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002958 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002959 struct trace_buffer *buf = iter->trace_buffer;
2960 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002961 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002962 unsigned long entries;
2963 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002964 const char *name = "preemption";
2965
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002966 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002967
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002968 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002969
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002970 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002971 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002972 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002973 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002974 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002975 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002976 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002977 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002978 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002979 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002980#if defined(CONFIG_PREEMPT_NONE)
2981 "server",
2982#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2983 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002984#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002985 "preempt",
2986#else
2987 "unknown",
2988#endif
2989 /* These are reserved for later use */
2990 0, 0, 0, 0);
2991#ifdef CONFIG_SMP
2992 seq_printf(m, " #P:%d)\n", num_online_cpus());
2993#else
2994 seq_puts(m, ")\n");
2995#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002996 seq_puts(m, "# -----------------\n");
2997 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002998 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002999 data->comm, data->pid,
3000 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003001 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003002 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003003
3004 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003005 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003006 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3007 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003008 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003009 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3010 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003011 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003012 }
3013
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003014 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003015}
3016
Steven Rostedta3097202008-11-07 22:36:02 -05003017static void test_cpu_buff_start(struct trace_iterator *iter)
3018{
3019 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003020 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003021
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003022 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003023 return;
3024
3025 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3026 return;
3027
Matthias Kaehlcke8bd71282017-04-21 16:41:10 -07003028 if (cpumask_available(iter->started) &&
3029 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003030 return;
3031
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003032 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003033 return;
3034
Matthias Kaehlcke8bd71282017-04-21 16:41:10 -07003035 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003036 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003037
3038 /* Don't print started cpu buffer for the first entry of the trace */
3039 if (iter->idx > 1)
3040 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3041 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003042}
3043
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003044static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003045{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003046 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003047 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003048 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003049 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003050 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003051
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003052 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003053
Steven Rostedta3097202008-11-07 22:36:02 -05003054 test_cpu_buff_start(iter);
3055
Steven Rostedtf633cef2008-12-23 23:24:13 -05003056 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003057
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003058 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003059 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3060 trace_print_lat_context(iter);
3061 else
3062 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003063 }
3064
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003065 if (trace_seq_has_overflowed(s))
3066 return TRACE_TYPE_PARTIAL_LINE;
3067
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003068 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003069 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003070
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003071 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003072
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003073 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003074}
3075
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003076static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003077{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003078 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003079 struct trace_seq *s = &iter->seq;
3080 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003081 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003082
3083 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003084
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003085 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003086 trace_seq_printf(s, "%d %d %llu ",
3087 entry->pid, iter->cpu, iter->ts);
3088
3089 if (trace_seq_has_overflowed(s))
3090 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003091
Steven Rostedtf633cef2008-12-23 23:24:13 -05003092 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003093 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003094 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003095
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003096 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003097
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003098 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003099}
3100
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003101static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003102{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003103 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003104 struct trace_seq *s = &iter->seq;
3105 unsigned char newline = '\n';
3106 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003107 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003108
3109 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003110
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003111 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003112 SEQ_PUT_HEX_FIELD(s, entry->pid);
3113 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3114 SEQ_PUT_HEX_FIELD(s, iter->ts);
3115 if (trace_seq_has_overflowed(s))
3116 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003117 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003118
Steven Rostedtf633cef2008-12-23 23:24:13 -05003119 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003120 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003121 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003122 if (ret != TRACE_TYPE_HANDLED)
3123 return ret;
3124 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003125
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003126 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003127
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003128 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003129}
3130
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003131static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003132{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003133 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003134 struct trace_seq *s = &iter->seq;
3135 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003136 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003137
3138 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003139
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003140 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003141 SEQ_PUT_FIELD(s, entry->pid);
3142 SEQ_PUT_FIELD(s, iter->cpu);
3143 SEQ_PUT_FIELD(s, iter->ts);
3144 if (trace_seq_has_overflowed(s))
3145 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003146 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003147
Steven Rostedtf633cef2008-12-23 23:24:13 -05003148 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003149 return event ? event->funcs->binary(iter, 0, event) :
3150 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003151}
3152
Jiri Olsa62b915f2010-04-02 19:01:22 +02003153int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003154{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003155 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003156 int cpu;
3157
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003158 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003159 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003160 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003161 buf_iter = trace_buffer_iter(iter, cpu);
3162 if (buf_iter) {
3163 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003164 return 0;
3165 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003166 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003167 return 0;
3168 }
3169 return 1;
3170 }
3171
Steven Rostedtab464282008-05-12 21:21:00 +02003172 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003173 buf_iter = trace_buffer_iter(iter, cpu);
3174 if (buf_iter) {
3175 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003176 return 0;
3177 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003178 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003179 return 0;
3180 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003181 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003182
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003183 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003184}
3185
Lai Jiangshan4f535962009-05-18 19:35:34 +08003186/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003187enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003188{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003189 struct trace_array *tr = iter->tr;
3190 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003191 enum print_line_t ret;
3192
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003193 if (iter->lost_events) {
3194 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3195 iter->cpu, iter->lost_events);
3196 if (trace_seq_has_overflowed(&iter->seq))
3197 return TRACE_TYPE_PARTIAL_LINE;
3198 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003199
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003200 if (iter->trace && iter->trace->print_line) {
3201 ret = iter->trace->print_line(iter);
3202 if (ret != TRACE_TYPE_UNHANDLED)
3203 return ret;
3204 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003205
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003206 if (iter->ent->type == TRACE_BPUTS &&
3207 trace_flags & TRACE_ITER_PRINTK &&
3208 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3209 return trace_print_bputs_msg_only(iter);
3210
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003211 if (iter->ent->type == TRACE_BPRINT &&
3212 trace_flags & TRACE_ITER_PRINTK &&
3213 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003214 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003215
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003216 if (iter->ent->type == TRACE_PRINT &&
3217 trace_flags & TRACE_ITER_PRINTK &&
3218 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003219 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003220
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003221 if (trace_flags & TRACE_ITER_BIN)
3222 return print_bin_fmt(iter);
3223
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003224 if (trace_flags & TRACE_ITER_HEX)
3225 return print_hex_fmt(iter);
3226
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003227 if (trace_flags & TRACE_ITER_RAW)
3228 return print_raw_fmt(iter);
3229
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003230 return print_trace_fmt(iter);
3231}
3232
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003233void trace_latency_header(struct seq_file *m)
3234{
3235 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003236 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003237
3238 /* print nothing if the buffers are empty */
3239 if (trace_empty(iter))
3240 return;
3241
3242 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3243 print_trace_header(m, iter);
3244
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003245 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003246 print_lat_help_header(m);
3247}
3248
Jiri Olsa62b915f2010-04-02 19:01:22 +02003249void trace_default_header(struct seq_file *m)
3250{
3251 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003252 struct trace_array *tr = iter->tr;
3253 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003254
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003255 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3256 return;
3257
Jiri Olsa62b915f2010-04-02 19:01:22 +02003258 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3259 /* print nothing if the buffers are empty */
3260 if (trace_empty(iter))
3261 return;
3262 print_trace_header(m, iter);
3263 if (!(trace_flags & TRACE_ITER_VERBOSE))
3264 print_lat_help_header(m);
3265 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003266 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3267 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003268 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003269 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003270 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003271 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003272 }
3273}
3274
Steven Rostedte0a413f2011-09-29 21:26:16 -04003275static void test_ftrace_alive(struct seq_file *m)
3276{
3277 if (!ftrace_is_dead())
3278 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003279 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3280 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003281}
3282
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003283#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003284static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003285{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003286 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3287 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3288 "# Takes a snapshot of the main buffer.\n"
3289 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3290 "# (Doesn't have to be '2' works with any number that\n"
3291 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003292}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003293
3294static void show_snapshot_percpu_help(struct seq_file *m)
3295{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003296 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003297#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003298 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3299 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003300#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003301 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3302 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003303#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003304 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3305 "# (Doesn't have to be '2' works with any number that\n"
3306 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003307}
3308
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003309static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3310{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003311 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003312 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003313 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003314 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003315
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003316 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003317 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3318 show_snapshot_main_help(m);
3319 else
3320 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003321}
3322#else
3323/* Should never be called */
3324static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3325#endif
3326
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003327static int s_show(struct seq_file *m, void *v)
3328{
3329 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003330 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003331
3332 if (iter->ent == NULL) {
3333 if (iter->tr) {
3334 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3335 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003336 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003337 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003338 if (iter->snapshot && trace_empty(iter))
3339 print_snapshot_help(m, iter);
3340 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003341 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003342 else
3343 trace_default_header(m);
3344
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003345 } else if (iter->leftover) {
3346 /*
3347 * If we filled the seq_file buffer earlier, we
3348 * want to just show it now.
3349 */
3350 ret = trace_print_seq(m, &iter->seq);
3351
3352 /* ret should this time be zero, but you never know */
3353 iter->leftover = ret;
3354
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003355 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003356 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003357 ret = trace_print_seq(m, &iter->seq);
3358 /*
3359 * If we overflow the seq_file buffer, then it will
3360 * ask us for this data again at start up.
3361 * Use that instead.
3362 * ret is 0 if seq_file write succeeded.
3363 * -1 otherwise.
3364 */
3365 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003366 }
3367
3368 return 0;
3369}
3370
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003371/*
3372 * Should be used after trace_array_get(), trace_types_lock
3373 * ensures that i_cdev was already initialized.
3374 */
3375static inline int tracing_get_cpu(struct inode *inode)
3376{
3377 if (inode->i_cdev) /* See trace_create_cpu_file() */
3378 return (long)inode->i_cdev - 1;
3379 return RING_BUFFER_ALL_CPUS;
3380}
3381
James Morris88e9d342009-09-22 16:43:43 -07003382static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003383 .start = s_start,
3384 .next = s_next,
3385 .stop = s_stop,
3386 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003387};
3388
Ingo Molnare309b412008-05-12 21:20:51 +02003389static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003390__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003391{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003392 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003393 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003394 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003395
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003396 if (tracing_disabled)
3397 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003398
Jiri Olsa50e18b92012-04-25 10:23:39 +02003399 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003400 if (!iter)
3401 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003402
Gil Fruchter72917232015-06-09 10:32:35 +03003403 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003404 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003405 if (!iter->buffer_iter)
3406 goto release;
3407
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003408 /*
3409 * We make a copy of the current tracer to avoid concurrent
3410 * changes on it while we are reading.
3411 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003412 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003413 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003414 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003415 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003416
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003417 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003418
Li Zefan79f55992009-06-15 14:58:26 +08003419 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003420 goto fail;
3421
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003422 iter->tr = tr;
3423
3424#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003425 /* Currently only the top directory has a snapshot */
3426 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003427 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003428 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003429#endif
3430 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003431 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003432 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003433 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003434 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003436 /* Notify the tracer early; before we stop tracing. */
3437 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003438 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003439
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003440 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003441 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003442 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3443
David Sharp8be07092012-11-13 12:18:22 -08003444 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003445 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003446 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3447
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003448 /* stop the trace while dumping if we are not opening "snapshot" */
3449 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003450 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003451
Steven Rostedtae3b5092013-01-23 15:22:59 -05003452 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003453 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003454 iter->buffer_iter[cpu] =
Douglas Anderson3085d412019-03-08 11:32:04 -08003455 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3456 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07003457 }
3458 ring_buffer_read_prepare_sync();
3459 for_each_tracing_cpu(cpu) {
3460 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003461 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003462 }
3463 } else {
3464 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003465 iter->buffer_iter[cpu] =
Douglas Anderson3085d412019-03-08 11:32:04 -08003466 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3467 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07003468 ring_buffer_read_prepare_sync();
3469 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003470 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003471 }
3472
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003473 mutex_unlock(&trace_types_lock);
3474
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003475 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003476
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003477 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003478 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003479 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003480 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003481release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003482 seq_release_private(inode, file);
3483 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003484}
3485
3486int tracing_open_generic(struct inode *inode, struct file *filp)
3487{
Steven Rostedt60a11772008-05-12 21:20:44 +02003488 if (tracing_disabled)
3489 return -ENODEV;
3490
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003491 filp->private_data = inode->i_private;
3492 return 0;
3493}
3494
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003495bool tracing_is_disabled(void)
3496{
3497 return (tracing_disabled) ? true: false;
3498}
3499
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003500/*
3501 * Open and update trace_array ref count.
3502 * Must have the current trace_array passed to it.
3503 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003504static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003505{
3506 struct trace_array *tr = inode->i_private;
3507
3508 if (tracing_disabled)
3509 return -ENODEV;
3510
3511 if (trace_array_get(tr) < 0)
3512 return -ENODEV;
3513
3514 filp->private_data = inode->i_private;
3515
3516 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003517}
3518
Hannes Eder4fd27352009-02-10 19:44:12 +01003519static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003520{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003521 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003522 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003523 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003524 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003525
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003526 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003527 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003528 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003529 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003530
Oleg Nesterov6484c712013-07-23 17:26:10 +02003531 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003532 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003534
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003535 for_each_tracing_cpu(cpu) {
3536 if (iter->buffer_iter[cpu])
3537 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3538 }
3539
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003540 if (iter->trace && iter->trace->close)
3541 iter->trace->close(iter);
3542
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003543 if (!iter->snapshot)
3544 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003545 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003546
3547 __trace_array_put(tr);
3548
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003549 mutex_unlock(&trace_types_lock);
3550
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003551 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003552 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003553 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003554 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003555 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003556
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003557 return 0;
3558}
3559
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003560static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3561{
3562 struct trace_array *tr = inode->i_private;
3563
3564 trace_array_put(tr);
3565 return 0;
3566}
3567
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003568static int tracing_single_release_tr(struct inode *inode, struct file *file)
3569{
3570 struct trace_array *tr = inode->i_private;
3571
3572 trace_array_put(tr);
3573
3574 return single_release(inode, file);
3575}
3576
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003577static int tracing_open(struct inode *inode, struct file *file)
3578{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003579 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003580 struct trace_iterator *iter;
3581 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003582
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003583 if (trace_array_get(tr) < 0)
3584 return -ENODEV;
3585
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003586 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003587 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3588 int cpu = tracing_get_cpu(inode);
Bo Yan5fb4be22017-09-18 10:03:35 -07003589 struct trace_buffer *trace_buf = &tr->trace_buffer;
3590
3591#ifdef CONFIG_TRACER_MAX_TRACE
3592 if (tr->current_trace->print_max)
3593 trace_buf = &tr->max_buffer;
3594#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02003595
3596 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan5fb4be22017-09-18 10:03:35 -07003597 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003598 else
Bo Yan5fb4be22017-09-18 10:03:35 -07003599 tracing_reset(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003600 }
3601
3602 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003603 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003604 if (IS_ERR(iter))
3605 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003606 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003607 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3608 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003609
3610 if (ret < 0)
3611 trace_array_put(tr);
3612
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003613 return ret;
3614}
3615
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003616/*
3617 * Some tracers are not suitable for instance buffers.
3618 * A tracer is always available for the global array (toplevel)
3619 * or if it explicitly states that it is.
3620 */
3621static bool
3622trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3623{
3624 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3625}
3626
3627/* Find the next tracer that this trace array may use */
3628static struct tracer *
3629get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3630{
3631 while (t && !trace_ok_for_array(t, tr))
3632 t = t->next;
3633
3634 return t;
3635}
3636
Ingo Molnare309b412008-05-12 21:20:51 +02003637static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003638t_next(struct seq_file *m, void *v, loff_t *pos)
3639{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003640 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003641 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003642
3643 (*pos)++;
3644
3645 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003646 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003647
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003648 return t;
3649}
3650
3651static void *t_start(struct seq_file *m, loff_t *pos)
3652{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003653 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003654 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003655 loff_t l = 0;
3656
3657 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003658
3659 t = get_tracer_for_array(tr, trace_types);
3660 for (; t && l < *pos; t = t_next(m, t, &l))
3661 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003662
3663 return t;
3664}
3665
3666static void t_stop(struct seq_file *m, void *p)
3667{
3668 mutex_unlock(&trace_types_lock);
3669}
3670
3671static int t_show(struct seq_file *m, void *v)
3672{
3673 struct tracer *t = v;
3674
3675 if (!t)
3676 return 0;
3677
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003678 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003679 if (t->next)
3680 seq_putc(m, ' ');
3681 else
3682 seq_putc(m, '\n');
3683
3684 return 0;
3685}
3686
James Morris88e9d342009-09-22 16:43:43 -07003687static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003688 .start = t_start,
3689 .next = t_next,
3690 .stop = t_stop,
3691 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003692};
3693
3694static int show_traces_open(struct inode *inode, struct file *file)
3695{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003696 struct trace_array *tr = inode->i_private;
3697 struct seq_file *m;
3698 int ret;
3699
Steven Rostedt60a11772008-05-12 21:20:44 +02003700 if (tracing_disabled)
3701 return -ENODEV;
3702
Steven Rostedt (VMware)5e402ba2019-10-11 18:19:17 -04003703 if (trace_array_get(tr) < 0)
3704 return -ENODEV;
3705
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003706 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)5e402ba2019-10-11 18:19:17 -04003707 if (ret) {
3708 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003709 return ret;
Steven Rostedt (VMware)5e402ba2019-10-11 18:19:17 -04003710 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003711
3712 m = file->private_data;
3713 m->private = tr;
3714
3715 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003716}
3717
Steven Rostedt (VMware)5e402ba2019-10-11 18:19:17 -04003718static int show_traces_release(struct inode *inode, struct file *file)
3719{
3720 struct trace_array *tr = inode->i_private;
3721
3722 trace_array_put(tr);
3723 return seq_release(inode, file);
3724}
3725
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003726static ssize_t
3727tracing_write_stub(struct file *filp, const char __user *ubuf,
3728 size_t count, loff_t *ppos)
3729{
3730 return count;
3731}
3732
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003733loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003734{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003735 int ret;
3736
Slava Pestov364829b2010-11-24 15:13:16 -08003737 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003738 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003739 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003740 file->f_pos = ret = 0;
3741
3742 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003743}
3744
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003745static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003746 .open = tracing_open,
3747 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003748 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003749 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003750 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003751};
3752
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003753static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003754 .open = show_traces_open,
3755 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003756 .llseek = seq_lseek,
Steven Rostedt (VMware)5e402ba2019-10-11 18:19:17 -04003757 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003758};
3759
3760static ssize_t
3761tracing_cpumask_read(struct file *filp, char __user *ubuf,
3762 size_t count, loff_t *ppos)
3763{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003764 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Dud760f902017-11-30 11:39:43 +08003765 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003766 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003767
Changbin Dud760f902017-11-30 11:39:43 +08003768 len = snprintf(NULL, 0, "%*pb\n",
3769 cpumask_pr_args(tr->tracing_cpumask)) + 1;
3770 mask_str = kmalloc(len, GFP_KERNEL);
3771 if (!mask_str)
3772 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003773
Changbin Dud760f902017-11-30 11:39:43 +08003774 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08003775 cpumask_pr_args(tr->tracing_cpumask));
3776 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003777 count = -EINVAL;
3778 goto out_err;
3779 }
Changbin Dud760f902017-11-30 11:39:43 +08003780 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003781
3782out_err:
Changbin Dud760f902017-11-30 11:39:43 +08003783 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003784
3785 return count;
3786}
3787
3788static ssize_t
3789tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3790 size_t count, loff_t *ppos)
3791{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003792 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303793 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003794 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303795
3796 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3797 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003798
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303799 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003800 if (err)
3801 goto err_unlock;
3802
Steven Rostedta5e25882008-12-02 15:34:05 -05003803 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003804 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003805 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003806 /*
3807 * Increase/decrease the disabled counter if we are
3808 * about to flip a bit in the cpumask:
3809 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003810 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303811 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003812 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3813 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003814 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003815 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303816 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003817 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3818 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003819 }
3820 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003821 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003822 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003823
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003824 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303825 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003826
Ingo Molnarc7078de2008-05-12 21:20:52 +02003827 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003828
3829err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003830 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003831
3832 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003833}
3834
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003835static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003836 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003837 .read = tracing_cpumask_read,
3838 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003839 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003840 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003841};
3842
Li Zefanfdb372e2009-12-08 11:15:59 +08003843static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003844{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003845 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003846 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003847 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003848 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003849
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003850 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003851 tracer_flags = tr->current_trace->flags->val;
3852 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003853
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003854 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003855 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003856 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003857 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003858 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003859 }
3860
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003861 for (i = 0; trace_opts[i].name; i++) {
3862 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003863 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003864 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003865 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003866 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003867 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003868
Li Zefanfdb372e2009-12-08 11:15:59 +08003869 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003870}
3871
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003872static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003873 struct tracer_flags *tracer_flags,
3874 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003875{
Chunyu Hud39cdd22016-03-08 21:37:01 +08003876 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003877 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003878
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003879 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003880 if (ret)
3881 return ret;
3882
3883 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003884 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003885 else
Zhaolei77708412009-08-07 18:53:21 +08003886 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003887 return 0;
3888}
3889
Li Zefan8d18eaa2009-12-08 11:17:06 +08003890/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003891static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003892{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003893 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003894 struct tracer_flags *tracer_flags = trace->flags;
3895 struct tracer_opt *opts = NULL;
3896 int i;
3897
3898 for (i = 0; tracer_flags->opts[i].name; i++) {
3899 opts = &tracer_flags->opts[i];
3900
3901 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003902 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003903 }
3904
3905 return -EINVAL;
3906}
3907
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003908/* Some tracers require overwrite to stay enabled */
3909int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3910{
3911 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3912 return -1;
3913
3914 return 0;
3915}
3916
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003917int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003918{
3919 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003920 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003921 return 0;
3922
3923 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003924 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003925 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003926 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003927
3928 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003929 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003930 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003931 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003932
3933 if (mask == TRACE_ITER_RECORD_CMD)
3934 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003935
Steven Rostedtc37775d2016-04-13 16:59:18 -04003936 if (mask == TRACE_ITER_EVENT_FORK)
3937 trace_event_follow_fork(tr, enabled);
3938
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003939 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003940 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003941#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003942 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003943#endif
3944 }
Steven Rostedt81698832012-10-11 10:15:05 -04003945
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003946 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04003947 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003948 trace_printk_control(enabled);
3949 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003950
3951 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003952}
3953
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003954static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003955{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003956 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003957 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003958 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003959 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003960 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003961
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003962 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003963
Li Zefan8d18eaa2009-12-08 11:17:06 +08003964 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003965 neg = 1;
3966 cmp += 2;
3967 }
3968
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003969 mutex_lock(&trace_types_lock);
3970
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003971 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003972 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003973 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003974 break;
3975 }
3976 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003977
3978 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003979 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003980 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003981
3982 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003983
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003984 /*
3985 * If the first trailing whitespace is replaced with '\0' by strstrip,
3986 * turn it back into a space.
3987 */
3988 if (orig_len > strlen(option))
3989 option[strlen(option)] = ' ';
3990
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003991 return ret;
3992}
3993
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003994static void __init apply_trace_boot_options(void)
3995{
3996 char *buf = trace_boot_options_buf;
3997 char *option;
3998
3999 while (true) {
4000 option = strsep(&buf, ",");
4001
4002 if (!option)
4003 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004004
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004005 if (*option)
4006 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004007
4008 /* Put back the comma to allow this to be called again */
4009 if (buf)
4010 *(buf - 1) = ',';
4011 }
4012}
4013
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004014static ssize_t
4015tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4016 size_t cnt, loff_t *ppos)
4017{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004018 struct seq_file *m = filp->private_data;
4019 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004020 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004021 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004022
4023 if (cnt >= sizeof(buf))
4024 return -EINVAL;
4025
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004026 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004027 return -EFAULT;
4028
Steven Rostedta8dd2172013-01-09 20:54:17 -05004029 buf[cnt] = 0;
4030
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004031 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004032 if (ret < 0)
4033 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004034
Jiri Olsacf8517c2009-10-23 19:36:16 -04004035 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004036
4037 return cnt;
4038}
4039
Li Zefanfdb372e2009-12-08 11:15:59 +08004040static int tracing_trace_options_open(struct inode *inode, struct file *file)
4041{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004042 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004043 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004044
Li Zefanfdb372e2009-12-08 11:15:59 +08004045 if (tracing_disabled)
4046 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004047
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004048 if (trace_array_get(tr) < 0)
4049 return -ENODEV;
4050
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004051 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4052 if (ret < 0)
4053 trace_array_put(tr);
4054
4055 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004056}
4057
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004058static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004059 .open = tracing_trace_options_open,
4060 .read = seq_read,
4061 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004062 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004063 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004064};
4065
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004066static const char readme_msg[] =
4067 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004068 "# echo 0 > tracing_on : quick way to disable tracing\n"
4069 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4070 " Important files:\n"
4071 " trace\t\t\t- The static contents of the buffer\n"
4072 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4073 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4074 " current_tracer\t- function and latency tracers\n"
4075 " available_tracers\t- list of configured tracers for current_tracer\n"
4076 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4077 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4078 " trace_clock\t\t-change the clock used to order events\n"
4079 " local: Per cpu clock but may not be synced across CPUs\n"
4080 " global: Synced across CPUs but slows tracing down.\n"
4081 " counter: Not a clock, but just an increment\n"
4082 " uptime: Jiffy counter from time of boot\n"
4083 " perf: Same clock that perf events use\n"
4084#ifdef CONFIG_X86_64
4085 " x86-tsc: TSC cycle counter\n"
4086#endif
4087 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4088 " tracing_cpumask\t- Limit which CPUs to trace\n"
4089 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4090 "\t\t\t Remove sub-buffer with rmdir\n"
4091 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004092 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4093 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004094 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004095#ifdef CONFIG_DYNAMIC_FTRACE
4096 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004097 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4098 "\t\t\t functions\n"
4099 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4100 "\t modules: Can select a group via module\n"
4101 "\t Format: :mod:<module-name>\n"
4102 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4103 "\t triggers: a command to perform when function is hit\n"
4104 "\t Format: <function>:<trigger>[:count]\n"
4105 "\t trigger: traceon, traceoff\n"
4106 "\t\t enable_event:<system>:<event>\n"
4107 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004108#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004109 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004110#endif
4111#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004112 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004113#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004114 "\t\t dump\n"
4115 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004116 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4117 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4118 "\t The first one will disable tracing every time do_fault is hit\n"
4119 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4120 "\t The first time do trap is hit and it disables tracing, the\n"
4121 "\t counter will decrement to 2. If tracing is already disabled,\n"
4122 "\t the counter will not decrement. It only decrements when the\n"
4123 "\t trigger did work\n"
4124 "\t To remove trigger without count:\n"
4125 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4126 "\t To remove trigger with a count:\n"
4127 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004128 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004129 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4130 "\t modules: Can select a group via module command :mod:\n"
4131 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004132#endif /* CONFIG_DYNAMIC_FTRACE */
4133#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004134 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4135 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004136#endif
4137#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4138 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004139 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004140 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4141#endif
4142#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004143 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4144 "\t\t\t snapshot buffer. Read the contents for more\n"
4145 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004146#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004147#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004148 " stack_trace\t\t- Shows the max stack trace when active\n"
4149 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004150 "\t\t\t Write into this file to reset the max size (trigger a\n"
4151 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004152#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004153 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4154 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004155#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004156#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu86425622016-08-18 17:58:15 +09004157#ifdef CONFIG_KPROBE_EVENT
4158 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4159 "\t\t\t Write into this file to define/undefine new trace events.\n"
4160#endif
4161#ifdef CONFIG_UPROBE_EVENT
4162 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4163 "\t\t\t Write into this file to define/undefine new trace events.\n"
4164#endif
4165#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
4166 "\t accepts: event-definitions (one definition per line)\n"
4167 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4168 "\t -:[<group>/]<event>\n"
4169#ifdef CONFIG_KPROBE_EVENT
4170 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4171#endif
4172#ifdef CONFIG_UPROBE_EVENT
4173 "\t place: <path>:<offset>\n"
4174#endif
4175 "\t args: <name>=fetcharg[:type]\n"
4176 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4177 "\t $stack<index>, $stack, $retval, $comm\n"
4178 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4179 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4180#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004181 " events/\t\t- Directory containing all trace event subsystems:\n"
4182 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4183 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004184 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4185 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004186 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004187 " events/<system>/<event>/\t- Directory containing control files for\n"
4188 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004189 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4190 " filter\t\t- If set, only events passing filter are traced\n"
4191 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004192 "\t Format: <trigger>[:count][if <filter>]\n"
4193 "\t trigger: traceon, traceoff\n"
4194 "\t enable_event:<system>:<event>\n"
4195 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004196#ifdef CONFIG_HIST_TRIGGERS
4197 "\t enable_hist:<system>:<event>\n"
4198 "\t disable_hist:<system>:<event>\n"
4199#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004200#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004201 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004202#endif
4203#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004204 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004205#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004206#ifdef CONFIG_HIST_TRIGGERS
4207 "\t\t hist (see below)\n"
4208#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004209 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4210 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4211 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4212 "\t events/block/block_unplug/trigger\n"
4213 "\t The first disables tracing every time block_unplug is hit.\n"
4214 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4215 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4216 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4217 "\t Like function triggers, the counter is only decremented if it\n"
4218 "\t enabled or disabled tracing.\n"
4219 "\t To remove a trigger without a count:\n"
4220 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4221 "\t To remove a trigger with a count:\n"
4222 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4223 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004224#ifdef CONFIG_HIST_TRIGGERS
4225 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004226 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004227 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004228 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004229 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004230 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004231 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004232 "\t [if <filter>]\n\n"
4233 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004234 "\t table using the key(s) and value(s) named, and the value of a\n"
4235 "\t sum called 'hitcount' is incremented. Keys and values\n"
4236 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004237 "\t can be any field, or the special string 'stacktrace'.\n"
4238 "\t Compound keys consisting of up to two fields can be specified\n"
4239 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4240 "\t fields. Sort keys consisting of up to two fields can be\n"
4241 "\t specified using the 'sort' keyword. The sort direction can\n"
4242 "\t be modified by appending '.descending' or '.ascending' to a\n"
4243 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004244 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4245 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4246 "\t its histogram data will be shared with other triggers of the\n"
4247 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004248 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004249 "\t table in its entirety to stdout. If there are multiple hist\n"
4250 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004251 "\t trigger in the output. The table displayed for a named\n"
4252 "\t trigger will be the same as any other instance having the\n"
4253 "\t same name. The default format used to display a given field\n"
4254 "\t can be modified by appending any of the following modifiers\n"
4255 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004256 "\t .hex display a number as a hex value\n"
4257 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004258 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004259 "\t .execname display a common_pid as a program name\n"
4260 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004261 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004262 "\t The 'pause' parameter can be used to pause an existing hist\n"
4263 "\t trigger or to start a hist trigger but not log any events\n"
4264 "\t until told to do so. 'continue' can be used to start or\n"
4265 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004266 "\t The 'clear' parameter will clear the contents of a running\n"
4267 "\t hist trigger and leave its current paused/active state\n"
4268 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004269 "\t The enable_hist and disable_hist triggers can be used to\n"
4270 "\t have one event conditionally start and stop another event's\n"
4271 "\t already-attached hist trigger. The syntax is analagous to\n"
4272 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004273#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004274;
4275
4276static ssize_t
4277tracing_readme_read(struct file *filp, char __user *ubuf,
4278 size_t cnt, loff_t *ppos)
4279{
4280 return simple_read_from_buffer(ubuf, cnt, ppos,
4281 readme_msg, strlen(readme_msg));
4282}
4283
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004284static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004285 .open = tracing_open_generic,
4286 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004287 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004288};
4289
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004290static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004291{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004292 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004293
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004294 if (*pos || m->count)
4295 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004296
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004297 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004298
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004299 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4300 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004301 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004302 continue;
4303
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004304 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004305 }
4306
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004307 return NULL;
4308}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004309
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004310static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4311{
4312 void *v;
4313 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004314
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004315 preempt_disable();
4316 arch_spin_lock(&trace_cmdline_lock);
4317
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004318 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004319 while (l <= *pos) {
4320 v = saved_cmdlines_next(m, v, &l);
4321 if (!v)
4322 return NULL;
4323 }
4324
4325 return v;
4326}
4327
4328static void saved_cmdlines_stop(struct seq_file *m, void *v)
4329{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004330 arch_spin_unlock(&trace_cmdline_lock);
4331 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004332}
4333
4334static int saved_cmdlines_show(struct seq_file *m, void *v)
4335{
4336 char buf[TASK_COMM_LEN];
4337 unsigned int *pid = v;
4338
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004339 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004340 seq_printf(m, "%d %s\n", *pid, buf);
4341 return 0;
4342}
4343
4344static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4345 .start = saved_cmdlines_start,
4346 .next = saved_cmdlines_next,
4347 .stop = saved_cmdlines_stop,
4348 .show = saved_cmdlines_show,
4349};
4350
4351static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4352{
4353 if (tracing_disabled)
4354 return -ENODEV;
4355
4356 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004357}
4358
4359static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004360 .open = tracing_saved_cmdlines_open,
4361 .read = seq_read,
4362 .llseek = seq_lseek,
4363 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004364};
4365
4366static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004367tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4368 size_t cnt, loff_t *ppos)
4369{
4370 char buf[64];
4371 int r;
4372
4373 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004374 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004375 arch_spin_unlock(&trace_cmdline_lock);
4376
4377 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4378}
4379
4380static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4381{
4382 kfree(s->saved_cmdlines);
4383 kfree(s->map_cmdline_to_pid);
4384 kfree(s);
4385}
4386
4387static int tracing_resize_saved_cmdlines(unsigned int val)
4388{
4389 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4390
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004391 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004392 if (!s)
4393 return -ENOMEM;
4394
4395 if (allocate_cmdlines_buffer(val, s) < 0) {
4396 kfree(s);
4397 return -ENOMEM;
4398 }
4399
4400 arch_spin_lock(&trace_cmdline_lock);
4401 savedcmd_temp = savedcmd;
4402 savedcmd = s;
4403 arch_spin_unlock(&trace_cmdline_lock);
4404 free_saved_cmdlines_buffer(savedcmd_temp);
4405
4406 return 0;
4407}
4408
4409static ssize_t
4410tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4411 size_t cnt, loff_t *ppos)
4412{
4413 unsigned long val;
4414 int ret;
4415
4416 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4417 if (ret)
4418 return ret;
4419
4420 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4421 if (!val || val > PID_MAX_DEFAULT)
4422 return -EINVAL;
4423
4424 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4425 if (ret < 0)
4426 return ret;
4427
4428 *ppos += cnt;
4429
4430 return cnt;
4431}
4432
4433static const struct file_operations tracing_saved_cmdlines_size_fops = {
4434 .open = tracing_open_generic,
4435 .read = tracing_saved_cmdlines_size_read,
4436 .write = tracing_saved_cmdlines_size_write,
4437};
4438
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004439#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4440static union trace_enum_map_item *
4441update_enum_map(union trace_enum_map_item *ptr)
4442{
4443 if (!ptr->map.enum_string) {
4444 if (ptr->tail.next) {
4445 ptr = ptr->tail.next;
4446 /* Set ptr to the next real item (skip head) */
4447 ptr++;
4448 } else
4449 return NULL;
4450 }
4451 return ptr;
4452}
4453
4454static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4455{
4456 union trace_enum_map_item *ptr = v;
4457
4458 /*
4459 * Paranoid! If ptr points to end, we don't want to increment past it.
4460 * This really should never happen.
4461 */
4462 ptr = update_enum_map(ptr);
4463 if (WARN_ON_ONCE(!ptr))
4464 return NULL;
4465
4466 ptr++;
4467
4468 (*pos)++;
4469
4470 ptr = update_enum_map(ptr);
4471
4472 return ptr;
4473}
4474
4475static void *enum_map_start(struct seq_file *m, loff_t *pos)
4476{
4477 union trace_enum_map_item *v;
4478 loff_t l = 0;
4479
4480 mutex_lock(&trace_enum_mutex);
4481
4482 v = trace_enum_maps;
4483 if (v)
4484 v++;
4485
4486 while (v && l < *pos) {
4487 v = enum_map_next(m, v, &l);
4488 }
4489
4490 return v;
4491}
4492
4493static void enum_map_stop(struct seq_file *m, void *v)
4494{
4495 mutex_unlock(&trace_enum_mutex);
4496}
4497
4498static int enum_map_show(struct seq_file *m, void *v)
4499{
4500 union trace_enum_map_item *ptr = v;
4501
4502 seq_printf(m, "%s %ld (%s)\n",
4503 ptr->map.enum_string, ptr->map.enum_value,
4504 ptr->map.system);
4505
4506 return 0;
4507}
4508
4509static const struct seq_operations tracing_enum_map_seq_ops = {
4510 .start = enum_map_start,
4511 .next = enum_map_next,
4512 .stop = enum_map_stop,
4513 .show = enum_map_show,
4514};
4515
4516static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4517{
4518 if (tracing_disabled)
4519 return -ENODEV;
4520
4521 return seq_open(filp, &tracing_enum_map_seq_ops);
4522}
4523
4524static const struct file_operations tracing_enum_map_fops = {
4525 .open = tracing_enum_map_open,
4526 .read = seq_read,
4527 .llseek = seq_lseek,
4528 .release = seq_release,
4529};
4530
4531static inline union trace_enum_map_item *
4532trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4533{
4534 /* Return tail of array given the head */
4535 return ptr + ptr->head.length + 1;
4536}
4537
4538static void
4539trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4540 int len)
4541{
4542 struct trace_enum_map **stop;
4543 struct trace_enum_map **map;
4544 union trace_enum_map_item *map_array;
4545 union trace_enum_map_item *ptr;
4546
4547 stop = start + len;
4548
4549 /*
4550 * The trace_enum_maps contains the map plus a head and tail item,
4551 * where the head holds the module and length of array, and the
4552 * tail holds a pointer to the next list.
4553 */
4554 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4555 if (!map_array) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07004556 pr_warn("Unable to allocate trace enum mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004557 return;
4558 }
4559
4560 mutex_lock(&trace_enum_mutex);
4561
4562 if (!trace_enum_maps)
4563 trace_enum_maps = map_array;
4564 else {
4565 ptr = trace_enum_maps;
4566 for (;;) {
4567 ptr = trace_enum_jmp_to_tail(ptr);
4568 if (!ptr->tail.next)
4569 break;
4570 ptr = ptr->tail.next;
4571
4572 }
4573 ptr->tail.next = map_array;
4574 }
4575 map_array->head.mod = mod;
4576 map_array->head.length = len;
4577 map_array++;
4578
4579 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4580 map_array->map = **map;
4581 map_array++;
4582 }
4583 memset(map_array, 0, sizeof(*map_array));
4584
4585 mutex_unlock(&trace_enum_mutex);
4586}
4587
4588static void trace_create_enum_file(struct dentry *d_tracer)
4589{
4590 trace_create_file("enum_map", 0444, d_tracer,
4591 NULL, &tracing_enum_map_fops);
4592}
4593
4594#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4595static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4596static inline void trace_insert_enum_map_file(struct module *mod,
4597 struct trace_enum_map **start, int len) { }
4598#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4599
4600static void trace_insert_enum_map(struct module *mod,
4601 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004602{
4603 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004604
4605 if (len <= 0)
4606 return;
4607
4608 map = start;
4609
4610 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004611
4612 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004613}
4614
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004615static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004616tracing_set_trace_read(struct file *filp, char __user *ubuf,
4617 size_t cnt, loff_t *ppos)
4618{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004619 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004620 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004621 int r;
4622
4623 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004624 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004625 mutex_unlock(&trace_types_lock);
4626
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004627 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004628}
4629
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004630int tracer_init(struct tracer *t, struct trace_array *tr)
4631{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004632 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004633 return t->init(tr);
4634}
4635
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004636static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004637{
4638 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004639
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004640 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004641 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004642}
4643
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004644#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004645/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004646static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4647 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004648{
4649 int cpu, ret = 0;
4650
4651 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4652 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004653 ret = ring_buffer_resize(trace_buf->buffer,
4654 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004655 if (ret < 0)
4656 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004657 per_cpu_ptr(trace_buf->data, cpu)->entries =
4658 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004659 }
4660 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004661 ret = ring_buffer_resize(trace_buf->buffer,
4662 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004663 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004664 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4665 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004666 }
4667
4668 return ret;
4669}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004670#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004671
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004672static int __tracing_resize_ring_buffer(struct trace_array *tr,
4673 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004674{
4675 int ret;
4676
4677 /*
4678 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004679 * we use the size that was given, and we can forget about
4680 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004681 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004682 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004683
Steven Rostedtb382ede62012-10-10 21:44:34 -04004684 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004685 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004686 return 0;
4687
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004688 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004689 if (ret < 0)
4690 return ret;
4691
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004692#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004693 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4694 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004695 goto out;
4696
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004697 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004698 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004699 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4700 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004701 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004702 /*
4703 * AARGH! We are left with different
4704 * size max buffer!!!!
4705 * The max buffer is our "snapshot" buffer.
4706 * When a tracer needs a snapshot (one of the
4707 * latency tracers), it swaps the max buffer
4708 * with the saved snap shot. We succeeded to
4709 * update the size of the main buffer, but failed to
4710 * update the size of the max buffer. But when we tried
4711 * to reset the main buffer to the original size, we
4712 * failed there too. This is very unlikely to
4713 * happen, but if it does, warn and kill all
4714 * tracing.
4715 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004716 WARN_ON(1);
4717 tracing_disabled = 1;
4718 }
4719 return ret;
4720 }
4721
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004722 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004723 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004724 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004725 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004726
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004727 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004728#endif /* CONFIG_TRACER_MAX_TRACE */
4729
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004730 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004731 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004732 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004733 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004734
4735 return ret;
4736}
4737
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004738static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4739 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004740{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004741 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004742
4743 mutex_lock(&trace_types_lock);
4744
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004745 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4746 /* make sure, this cpu is enabled in the mask */
4747 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4748 ret = -EINVAL;
4749 goto out;
4750 }
4751 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004752
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004753 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004754 if (ret < 0)
4755 ret = -ENOMEM;
4756
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004757out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004758 mutex_unlock(&trace_types_lock);
4759
4760 return ret;
4761}
4762
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004763
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004764/**
4765 * tracing_update_buffers - used by tracing facility to expand ring buffers
4766 *
4767 * To save on memory when the tracing is never used on a system with it
4768 * configured in. The ring buffers are set to a minimum size. But once
4769 * a user starts to use the tracing facility, then they need to grow
4770 * to their default size.
4771 *
4772 * This function is to be called when a tracer is about to be used.
4773 */
4774int tracing_update_buffers(void)
4775{
4776 int ret = 0;
4777
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004778 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004779 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004780 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004781 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004782 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004783
4784 return ret;
4785}
4786
Steven Rostedt577b7852009-02-26 23:43:05 -05004787struct trace_option_dentry;
4788
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004789static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004790create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004791
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004792/*
4793 * Used to clear out the tracer before deletion of an instance.
4794 * Must have trace_types_lock held.
4795 */
4796static void tracing_set_nop(struct trace_array *tr)
4797{
4798 if (tr->current_trace == &nop_trace)
4799 return;
4800
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004801 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004802
4803 if (tr->current_trace->reset)
4804 tr->current_trace->reset(tr);
4805
4806 tr->current_trace = &nop_trace;
4807}
4808
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004809static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004810{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004811 /* Only enable if the directory has been created already. */
4812 if (!tr->dir)
4813 return;
4814
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004815 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004816}
4817
4818static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4819{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004820 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004821#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004822 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004823#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004824 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004825
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004826 mutex_lock(&trace_types_lock);
4827
Steven Rostedt73c51622009-03-11 13:42:01 -04004828 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004829 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004830 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004831 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004832 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004833 ret = 0;
4834 }
4835
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004836 for (t = trace_types; t; t = t->next) {
4837 if (strcmp(t->name, buf) == 0)
4838 break;
4839 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004840 if (!t) {
4841 ret = -EINVAL;
4842 goto out;
4843 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004844 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004845 goto out;
4846
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004847 /* Some tracers are only allowed for the top level buffer */
4848 if (!trace_ok_for_array(t, tr)) {
4849 ret = -EINVAL;
4850 goto out;
4851 }
4852
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004853 /* If trace pipe files are being read, we can't change the tracer */
4854 if (tr->current_trace->ref) {
4855 ret = -EBUSY;
4856 goto out;
4857 }
4858
Steven Rostedt9f029e82008-11-12 15:24:24 -05004859 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004860
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004861 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004862
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004863 if (tr->current_trace->reset)
4864 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004865
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004866 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004867 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004868
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004869#ifdef CONFIG_TRACER_MAX_TRACE
4870 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004871
4872 if (had_max_tr && !t->use_max_tr) {
4873 /*
4874 * We need to make sure that the update_max_tr sees that
4875 * current_trace changed to nop_trace to keep it from
4876 * swapping the buffers after we resize it.
4877 * The update_max_tr is called from interrupts disabled
4878 * so a synchronized_sched() is sufficient.
4879 */
4880 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004881 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004882 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004883#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004884
4885#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004886 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004887 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004888 if (ret < 0)
4889 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004890 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004891#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004892
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004893 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004894 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004895 if (ret)
4896 goto out;
4897 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004898
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004899 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004900 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004901 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004902 out:
4903 mutex_unlock(&trace_types_lock);
4904
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004905 return ret;
4906}
4907
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004908static ssize_t
4909tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4910 size_t cnt, loff_t *ppos)
4911{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004912 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004913 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004914 int i;
4915 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004916 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004917
Steven Rostedt60063a62008-10-28 10:44:24 -04004918 ret = cnt;
4919
Li Zefanee6c2c12009-09-18 14:06:47 +08004920 if (cnt > MAX_TRACER_SIZE)
4921 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004922
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004923 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004924 return -EFAULT;
4925
4926 buf[cnt] = 0;
4927
4928 /* strip ending whitespace. */
4929 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4930 buf[i] = 0;
4931
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004932 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004933 if (err)
4934 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004935
Jiri Olsacf8517c2009-10-23 19:36:16 -04004936 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004937
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004938 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004939}
4940
4941static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004942tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4943 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004944{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004945 char buf[64];
4946 int r;
4947
Steven Rostedtcffae432008-05-12 21:21:00 +02004948 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004949 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004950 if (r > sizeof(buf))
4951 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004952 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004953}
4954
4955static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004956tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4957 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004958{
Hannes Eder5e398412009-02-10 19:44:34 +01004959 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004960 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004961
Peter Huewe22fe9b52011-06-07 21:58:27 +02004962 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4963 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004964 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004965
4966 *ptr = val * 1000;
4967
4968 return cnt;
4969}
4970
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004971static ssize_t
4972tracing_thresh_read(struct file *filp, char __user *ubuf,
4973 size_t cnt, loff_t *ppos)
4974{
4975 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4976}
4977
4978static ssize_t
4979tracing_thresh_write(struct file *filp, const char __user *ubuf,
4980 size_t cnt, loff_t *ppos)
4981{
4982 struct trace_array *tr = filp->private_data;
4983 int ret;
4984
4985 mutex_lock(&trace_types_lock);
4986 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4987 if (ret < 0)
4988 goto out;
4989
4990 if (tr->current_trace->update_thresh) {
4991 ret = tr->current_trace->update_thresh(tr);
4992 if (ret < 0)
4993 goto out;
4994 }
4995
4996 ret = cnt;
4997out:
4998 mutex_unlock(&trace_types_lock);
4999
5000 return ret;
5001}
5002
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005003#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005004
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005005static ssize_t
5006tracing_max_lat_read(struct file *filp, char __user *ubuf,
5007 size_t cnt, loff_t *ppos)
5008{
5009 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5010}
5011
5012static ssize_t
5013tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5014 size_t cnt, loff_t *ppos)
5015{
5016 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5017}
5018
Chen Gange428abb2015-11-10 05:15:15 +08005019#endif
5020
Steven Rostedtb3806b42008-05-12 21:20:46 +02005021static int tracing_open_pipe(struct inode *inode, struct file *filp)
5022{
Oleg Nesterov15544202013-07-23 17:25:57 +02005023 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005024 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005025 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005026
5027 if (tracing_disabled)
5028 return -ENODEV;
5029
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005030 if (trace_array_get(tr) < 0)
5031 return -ENODEV;
5032
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005033 mutex_lock(&trace_types_lock);
5034
Steven Rostedtb3806b42008-05-12 21:20:46 +02005035 /* create a buffer to store the information to pass to userspace */
5036 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005037 if (!iter) {
5038 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005039 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005040 goto out;
5041 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005042
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005043 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005044 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005045
5046 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5047 ret = -ENOMEM;
5048 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305049 }
5050
Steven Rostedta3097202008-11-07 22:36:02 -05005051 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305052 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005053
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005054 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005055 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5056
David Sharp8be07092012-11-13 12:18:22 -08005057 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005058 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005059 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5060
Oleg Nesterov15544202013-07-23 17:25:57 +02005061 iter->tr = tr;
5062 iter->trace_buffer = &tr->trace_buffer;
5063 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005064 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005065 filp->private_data = iter;
5066
Steven Rostedt107bad82008-05-12 21:21:01 +02005067 if (iter->trace->pipe_open)
5068 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005069
Arnd Bergmannb4447862010-07-07 23:40:11 +02005070 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005071
5072 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005073out:
5074 mutex_unlock(&trace_types_lock);
5075 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005076
5077fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005078 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005079 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005080 mutex_unlock(&trace_types_lock);
5081 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005082}
5083
5084static int tracing_release_pipe(struct inode *inode, struct file *file)
5085{
5086 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005087 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005088
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005089 mutex_lock(&trace_types_lock);
5090
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005091 tr->current_trace->ref--;
5092
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005093 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005094 iter->trace->pipe_close(iter);
5095
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005096 mutex_unlock(&trace_types_lock);
5097
Rusty Russell44623442009-01-01 10:12:23 +10305098 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005099 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005100 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005101
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005102 trace_array_put(tr);
5103
Steven Rostedtb3806b42008-05-12 21:20:46 +02005104 return 0;
5105}
5106
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005107static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005108trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005109{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005110 struct trace_array *tr = iter->tr;
5111
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005112 /* Iterators are static, they should be filled or empty */
5113 if (trace_buffer_iter(iter, iter->cpu_file))
5114 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005115
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005116 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005117 /*
5118 * Always select as readable when in blocking mode
5119 */
5120 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005121 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005122 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005123 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005124}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005125
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005126static unsigned int
5127tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5128{
5129 struct trace_iterator *iter = filp->private_data;
5130
5131 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005132}
5133
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005134/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005135static int tracing_wait_pipe(struct file *filp)
5136{
5137 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005138 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005139
5140 while (trace_empty(iter)) {
5141
5142 if ((filp->f_flags & O_NONBLOCK)) {
5143 return -EAGAIN;
5144 }
5145
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005146 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005147 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005148 * We still block if tracing is disabled, but we have never
5149 * read anything. This allows a user to cat this file, and
5150 * then enable tracing. But after we have read something,
5151 * we give an EOF when tracing is again disabled.
5152 *
5153 * iter->pos will be 0 if we haven't read anything.
5154 */
Tahsin Erdogan97d402e2017-09-17 03:23:48 -07005155 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005156 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005157
5158 mutex_unlock(&iter->mutex);
5159
Rabin Vincente30f53a2014-11-10 19:46:34 +01005160 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005161
5162 mutex_lock(&iter->mutex);
5163
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005164 if (ret)
5165 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005166 }
5167
5168 return 1;
5169}
5170
Steven Rostedtb3806b42008-05-12 21:20:46 +02005171/*
5172 * Consumer reader.
5173 */
5174static ssize_t
5175tracing_read_pipe(struct file *filp, char __user *ubuf,
5176 size_t cnt, loff_t *ppos)
5177{
5178 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005179 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005180
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005181 /*
5182 * Avoid more than one consumer on a single file descriptor
5183 * This is just a matter of traces coherency, the ring buffer itself
5184 * is protected.
5185 */
5186 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005187
5188 /* return any leftover data */
5189 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5190 if (sret != -EBUSY)
5191 goto out;
5192
5193 trace_seq_init(&iter->seq);
5194
Steven Rostedt107bad82008-05-12 21:21:01 +02005195 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005196 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5197 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005198 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005199 }
5200
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005201waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005202 sret = tracing_wait_pipe(filp);
5203 if (sret <= 0)
5204 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005205
5206 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005207 if (trace_empty(iter)) {
5208 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005209 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005210 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005211
5212 if (cnt >= PAGE_SIZE)
5213 cnt = PAGE_SIZE - 1;
5214
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005215 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005216 memset(&iter->seq, 0,
5217 sizeof(struct trace_iterator) -
5218 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005219 cpumask_clear(iter->started);
Petr Mladekc24de622019-10-11 16:21:34 +02005220 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005221 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005222
Lai Jiangshan4f535962009-05-18 19:35:34 +08005223 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005224 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005225 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005226 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005227 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005228
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005229 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005230 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005231 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005232 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005233 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005234 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005235 if (ret != TRACE_TYPE_NO_CONSUME)
5236 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005237
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005238 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005239 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005240
5241 /*
5242 * Setting the full flag means we reached the trace_seq buffer
5243 * size and we should leave by partial output condition above.
5244 * One of the trace_seq_* functions is not used properly.
5245 */
5246 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5247 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005248 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005249 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005250 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005251
Steven Rostedtb3806b42008-05-12 21:20:46 +02005252 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005253 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005254 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005255 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005256
5257 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005258 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005259 * entries, go back to wait for more entries.
5260 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005261 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005262 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005263
Steven Rostedt107bad82008-05-12 21:21:01 +02005264out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005265 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005266
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005267 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005268}
5269
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005270static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5271 unsigned int idx)
5272{
5273 __free_page(spd->pages[idx]);
5274}
5275
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005276static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005277 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005278 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005279 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005280 .steal = generic_pipe_buf_steal,
5281 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005282};
5283
Steven Rostedt34cd4992009-02-09 12:06:29 -05005284static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005285tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005286{
5287 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005288 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005289 int ret;
5290
5291 /* Seq buffer is page-sized, exactly what we need. */
5292 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005293 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005294 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005295
5296 if (trace_seq_has_overflowed(&iter->seq)) {
5297 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005298 break;
5299 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005300
5301 /*
5302 * This should not be hit, because it should only
5303 * be set if the iter->seq overflowed. But check it
5304 * anyway to be safe.
5305 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005306 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005307 iter->seq.seq.len = save_len;
5308 break;
5309 }
5310
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005311 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005312 if (rem < count) {
5313 rem = 0;
5314 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005315 break;
5316 }
5317
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005318 if (ret != TRACE_TYPE_NO_CONSUME)
5319 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005320 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005321 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005322 rem = 0;
5323 iter->ent = NULL;
5324 break;
5325 }
5326 }
5327
5328 return rem;
5329}
5330
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005331static ssize_t tracing_splice_read_pipe(struct file *filp,
5332 loff_t *ppos,
5333 struct pipe_inode_info *pipe,
5334 size_t len,
5335 unsigned int flags)
5336{
Jens Axboe35f3d142010-05-20 10:43:18 +02005337 struct page *pages_def[PIPE_DEF_BUFFERS];
5338 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005339 struct trace_iterator *iter = filp->private_data;
5340 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005341 .pages = pages_def,
5342 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005343 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005344 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005345 .flags = flags,
5346 .ops = &tracing_pipe_buf_ops,
5347 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005348 };
5349 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005350 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005351 unsigned int i;
5352
Jens Axboe35f3d142010-05-20 10:43:18 +02005353 if (splice_grow_spd(pipe, &spd))
5354 return -ENOMEM;
5355
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005356 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005357
5358 if (iter->trace->splice_read) {
5359 ret = iter->trace->splice_read(iter, filp,
5360 ppos, pipe, len, flags);
5361 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005362 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005363 }
5364
5365 ret = tracing_wait_pipe(filp);
5366 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005367 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005368
Jason Wessel955b61e2010-08-05 09:22:23 -05005369 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005370 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005371 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005372 }
5373
Lai Jiangshan4f535962009-05-18 19:35:34 +08005374 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005375 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005376
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005377 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005378 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005379 spd.pages[i] = alloc_page(GFP_KERNEL);
5380 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005381 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005382
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005383 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005384
5385 /* Copy the data into the page, so we can start over. */
5386 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005387 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005388 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005389 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005390 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005391 break;
5392 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005393 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005394 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005395
Steven Rostedtf9520752009-03-02 14:04:40 -05005396 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005397 }
5398
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005399 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005400 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005401 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005402
5403 spd.nr_pages = i;
5404
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005405 if (i)
5406 ret = splice_to_pipe(pipe, &spd);
5407 else
5408 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005409out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005410 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005411 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005412
Steven Rostedt34cd4992009-02-09 12:06:29 -05005413out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005414 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005415 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005416}
5417
Steven Rostedta98a3c32008-05-12 21:20:59 +02005418static ssize_t
5419tracing_entries_read(struct file *filp, char __user *ubuf,
5420 size_t cnt, loff_t *ppos)
5421{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005422 struct inode *inode = file_inode(filp);
5423 struct trace_array *tr = inode->i_private;
5424 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005425 char buf[64];
5426 int r = 0;
5427 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005428
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005429 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005430
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005431 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005432 int cpu, buf_size_same;
5433 unsigned long size;
5434
5435 size = 0;
5436 buf_size_same = 1;
5437 /* check if all cpu sizes are same */
5438 for_each_tracing_cpu(cpu) {
5439 /* fill in the size from first enabled cpu */
5440 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005441 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5442 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005443 buf_size_same = 0;
5444 break;
5445 }
5446 }
5447
5448 if (buf_size_same) {
5449 if (!ring_buffer_expanded)
5450 r = sprintf(buf, "%lu (expanded: %lu)\n",
5451 size >> 10,
5452 trace_buf_size >> 10);
5453 else
5454 r = sprintf(buf, "%lu\n", size >> 10);
5455 } else
5456 r = sprintf(buf, "X\n");
5457 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005458 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005459
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005460 mutex_unlock(&trace_types_lock);
5461
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005462 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5463 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005464}
5465
5466static ssize_t
5467tracing_entries_write(struct file *filp, const char __user *ubuf,
5468 size_t cnt, loff_t *ppos)
5469{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005470 struct inode *inode = file_inode(filp);
5471 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005472 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005473 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005474
Peter Huewe22fe9b52011-06-07 21:58:27 +02005475 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5476 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005477 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005478
5479 /* must have at least 1 entry */
5480 if (!val)
5481 return -EINVAL;
5482
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005483 /* value is in KB */
5484 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005485 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005486 if (ret < 0)
5487 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005488
Jiri Olsacf8517c2009-10-23 19:36:16 -04005489 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005490
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005491 return cnt;
5492}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005493
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005494static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005495tracing_total_entries_read(struct file *filp, char __user *ubuf,
5496 size_t cnt, loff_t *ppos)
5497{
5498 struct trace_array *tr = filp->private_data;
5499 char buf[64];
5500 int r, cpu;
5501 unsigned long size = 0, expanded_size = 0;
5502
5503 mutex_lock(&trace_types_lock);
5504 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005505 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005506 if (!ring_buffer_expanded)
5507 expanded_size += trace_buf_size >> 10;
5508 }
5509 if (ring_buffer_expanded)
5510 r = sprintf(buf, "%lu\n", size);
5511 else
5512 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5513 mutex_unlock(&trace_types_lock);
5514
5515 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5516}
5517
5518static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005519tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5520 size_t cnt, loff_t *ppos)
5521{
5522 /*
5523 * There is no need to read what the user has written, this function
5524 * is just to make sure that there is no error when "echo" is used
5525 */
5526
5527 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005528
5529 return cnt;
5530}
5531
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005532static int
5533tracing_free_buffer_release(struct inode *inode, struct file *filp)
5534{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005535 struct trace_array *tr = inode->i_private;
5536
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005537 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005538 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005539 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005540 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005541 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005542
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005543 trace_array_put(tr);
5544
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005545 return 0;
5546}
5547
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005548static ssize_t
5549tracing_mark_write(struct file *filp, const char __user *ubuf,
5550 size_t cnt, loff_t *fpos)
5551{
Steven Rostedtd696b582011-09-22 11:50:27 -04005552 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005553 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005554 struct ring_buffer_event *event;
5555 struct ring_buffer *buffer;
5556 struct print_entry *entry;
5557 unsigned long irq_flags;
5558 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005559 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005560 int nr_pages = 1;
5561 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005562 int offset;
5563 int size;
5564 int len;
5565 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005566 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005567
Steven Rostedtc76f0692008-11-07 22:36:02 -05005568 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005569 return -EINVAL;
5570
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005571 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005572 return -EINVAL;
5573
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005574 if (cnt > TRACE_BUF_SIZE)
5575 cnt = TRACE_BUF_SIZE;
5576
Steven Rostedtd696b582011-09-22 11:50:27 -04005577 /*
5578 * Userspace is injecting traces into the kernel trace buffer.
5579 * We want to be as non intrusive as possible.
5580 * To do so, we do not want to allocate any special buffers
5581 * or take any locks, but instead write the userspace data
5582 * straight into the ring buffer.
5583 *
5584 * First we need to pin the userspace buffer into memory,
5585 * which, most likely it is, because it just referenced it.
5586 * But there's no guarantee that it is. By using get_user_pages_fast()
5587 * and kmap_atomic/kunmap_atomic() we can get access to the
5588 * pages directly. We then write the data directly into the
5589 * ring buffer.
5590 */
5591 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005592
Steven Rostedtd696b582011-09-22 11:50:27 -04005593 /* check if we cross pages */
5594 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5595 nr_pages = 2;
5596
5597 offset = addr & (PAGE_SIZE - 1);
5598 addr &= PAGE_MASK;
5599
5600 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5601 if (ret < nr_pages) {
5602 while (--ret >= 0)
5603 put_page(pages[ret]);
5604 written = -EFAULT;
5605 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005606 }
5607
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005608 for (i = 0; i < nr_pages; i++)
5609 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005610
5611 local_save_flags(irq_flags);
5612 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005613 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005614 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5615 irq_flags, preempt_count());
5616 if (!event) {
5617 /* Ring buffer disabled, return as if not open for write */
5618 written = -EBADF;
5619 goto out_unlock;
5620 }
5621
5622 entry = ring_buffer_event_data(event);
5623 entry->ip = _THIS_IP_;
5624
5625 if (nr_pages == 2) {
5626 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005627 memcpy(&entry->buf, map_page[0] + offset, len);
5628 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005629 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005630 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005631
5632 if (entry->buf[cnt - 1] != '\n') {
5633 entry->buf[cnt] = '\n';
5634 entry->buf[cnt + 1] = '\0';
5635 } else
5636 entry->buf[cnt] = '\0';
5637
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005638 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005639
5640 written = cnt;
5641
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005642 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005643
Steven Rostedtd696b582011-09-22 11:50:27 -04005644 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005645 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005646 kunmap_atomic(map_page[i]);
5647 put_page(pages[i]);
5648 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005649 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005650 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005651}
5652
Li Zefan13f16d22009-12-08 11:16:11 +08005653static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005654{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005655 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005656 int i;
5657
5658 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005659 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005660 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005661 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5662 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005663 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005664
Li Zefan13f16d22009-12-08 11:16:11 +08005665 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005666}
5667
Steven Rostedte1e232c2014-02-10 23:38:46 -05005668static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005669{
Zhaolei5079f322009-08-25 16:12:56 +08005670 int i;
5671
Zhaolei5079f322009-08-25 16:12:56 +08005672 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5673 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5674 break;
5675 }
5676 if (i == ARRAY_SIZE(trace_clocks))
5677 return -EINVAL;
5678
Zhaolei5079f322009-08-25 16:12:56 +08005679 mutex_lock(&trace_types_lock);
5680
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005681 tr->clock_id = i;
5682
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005683 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005684
David Sharp60303ed2012-10-11 16:27:52 -07005685 /*
5686 * New clock may not be consistent with the previous clock.
5687 * Reset the buffer so that it doesn't have incomparable timestamps.
5688 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005689 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005690
5691#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liucf0523362017-09-05 16:57:19 -05005692 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005693 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005694 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005695#endif
David Sharp60303ed2012-10-11 16:27:52 -07005696
Zhaolei5079f322009-08-25 16:12:56 +08005697 mutex_unlock(&trace_types_lock);
5698
Steven Rostedte1e232c2014-02-10 23:38:46 -05005699 return 0;
5700}
5701
5702static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5703 size_t cnt, loff_t *fpos)
5704{
5705 struct seq_file *m = filp->private_data;
5706 struct trace_array *tr = m->private;
5707 char buf[64];
5708 const char *clockstr;
5709 int ret;
5710
5711 if (cnt >= sizeof(buf))
5712 return -EINVAL;
5713
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005714 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05005715 return -EFAULT;
5716
5717 buf[cnt] = 0;
5718
5719 clockstr = strstrip(buf);
5720
5721 ret = tracing_set_clock(tr, clockstr);
5722 if (ret)
5723 return ret;
5724
Zhaolei5079f322009-08-25 16:12:56 +08005725 *fpos += cnt;
5726
5727 return cnt;
5728}
5729
Li Zefan13f16d22009-12-08 11:16:11 +08005730static int tracing_clock_open(struct inode *inode, struct file *file)
5731{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005732 struct trace_array *tr = inode->i_private;
5733 int ret;
5734
Li Zefan13f16d22009-12-08 11:16:11 +08005735 if (tracing_disabled)
5736 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005737
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005738 if (trace_array_get(tr))
5739 return -ENODEV;
5740
5741 ret = single_open(file, tracing_clock_show, inode->i_private);
5742 if (ret < 0)
5743 trace_array_put(tr);
5744
5745 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005746}
5747
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005748struct ftrace_buffer_info {
5749 struct trace_iterator iter;
5750 void *spare;
5751 unsigned int read;
5752};
5753
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005754#ifdef CONFIG_TRACER_SNAPSHOT
5755static int tracing_snapshot_open(struct inode *inode, struct file *file)
5756{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005757 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005758 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005759 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005760 int ret = 0;
5761
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005762 if (trace_array_get(tr) < 0)
5763 return -ENODEV;
5764
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005765 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005766 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005767 if (IS_ERR(iter))
5768 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005769 } else {
5770 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005771 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005772 m = kzalloc(sizeof(*m), GFP_KERNEL);
5773 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005774 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005775 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5776 if (!iter) {
5777 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005778 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005779 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005780 ret = 0;
5781
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005782 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005783 iter->trace_buffer = &tr->max_buffer;
5784 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005785 m->private = iter;
5786 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005787 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005788out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005789 if (ret < 0)
5790 trace_array_put(tr);
5791
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005792 return ret;
5793}
5794
5795static ssize_t
5796tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5797 loff_t *ppos)
5798{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005799 struct seq_file *m = filp->private_data;
5800 struct trace_iterator *iter = m->private;
5801 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005802 unsigned long val;
5803 int ret;
5804
5805 ret = tracing_update_buffers();
5806 if (ret < 0)
5807 return ret;
5808
5809 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5810 if (ret)
5811 return ret;
5812
5813 mutex_lock(&trace_types_lock);
5814
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005815 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005816 ret = -EBUSY;
5817 goto out;
5818 }
5819
5820 switch (val) {
5821 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005822 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5823 ret = -EINVAL;
5824 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005825 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005826 if (tr->allocated_snapshot)
5827 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005828 break;
5829 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005830/* Only allow per-cpu swap if the ring buffer supports it */
5831#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5832 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5833 ret = -EINVAL;
5834 break;
5835 }
5836#endif
Eiichi Tsukatafc19ad32019-06-25 10:29:10 +09005837 if (!tr->allocated_snapshot)
5838 ret = resize_buffer_duplicate_size(&tr->max_buffer,
5839 &tr->trace_buffer, iter->cpu_file);
5840 else
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005841 ret = alloc_snapshot(tr);
Eiichi Tsukatafc19ad32019-06-25 10:29:10 +09005842
5843 if (ret < 0)
5844 break;
5845
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005846 local_irq_disable();
5847 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005848 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005849 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005850 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005851 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005852 local_irq_enable();
5853 break;
5854 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005855 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005856 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5857 tracing_reset_online_cpus(&tr->max_buffer);
5858 else
5859 tracing_reset(&tr->max_buffer, iter->cpu_file);
5860 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005861 break;
5862 }
5863
5864 if (ret >= 0) {
5865 *ppos += cnt;
5866 ret = cnt;
5867 }
5868out:
5869 mutex_unlock(&trace_types_lock);
5870 return ret;
5871}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005872
5873static int tracing_snapshot_release(struct inode *inode, struct file *file)
5874{
5875 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005876 int ret;
5877
5878 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005879
5880 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005881 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005882
5883 /* If write only, the seq_file is just a stub */
5884 if (m)
5885 kfree(m->private);
5886 kfree(m);
5887
5888 return 0;
5889}
5890
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005891static int tracing_buffers_open(struct inode *inode, struct file *filp);
5892static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5893 size_t count, loff_t *ppos);
5894static int tracing_buffers_release(struct inode *inode, struct file *file);
5895static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5896 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5897
5898static int snapshot_raw_open(struct inode *inode, struct file *filp)
5899{
5900 struct ftrace_buffer_info *info;
5901 int ret;
5902
5903 ret = tracing_buffers_open(inode, filp);
5904 if (ret < 0)
5905 return ret;
5906
5907 info = filp->private_data;
5908
5909 if (info->iter.trace->use_max_tr) {
5910 tracing_buffers_release(inode, filp);
5911 return -EBUSY;
5912 }
5913
5914 info->iter.snapshot = true;
5915 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5916
5917 return ret;
5918}
5919
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005920#endif /* CONFIG_TRACER_SNAPSHOT */
5921
5922
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005923static const struct file_operations tracing_thresh_fops = {
5924 .open = tracing_open_generic,
5925 .read = tracing_thresh_read,
5926 .write = tracing_thresh_write,
5927 .llseek = generic_file_llseek,
5928};
5929
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005930#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005931static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005932 .open = tracing_open_generic,
5933 .read = tracing_max_lat_read,
5934 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005935 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005936};
Chen Gange428abb2015-11-10 05:15:15 +08005937#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005938
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005939static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005940 .open = tracing_open_generic,
5941 .read = tracing_set_trace_read,
5942 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005943 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005944};
5945
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005946static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005947 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005948 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005949 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005950 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005951 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005952 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005953};
5954
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005955static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005956 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005957 .read = tracing_entries_read,
5958 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005959 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005960 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005961};
5962
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005963static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005964 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005965 .read = tracing_total_entries_read,
5966 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005967 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005968};
5969
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005970static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005971 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005972 .write = tracing_free_buffer_write,
5973 .release = tracing_free_buffer_release,
5974};
5975
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005976static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005977 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005978 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005979 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005980 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005981};
5982
Zhaolei5079f322009-08-25 16:12:56 +08005983static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005984 .open = tracing_clock_open,
5985 .read = seq_read,
5986 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005987 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005988 .write = tracing_clock_write,
5989};
5990
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005991#ifdef CONFIG_TRACER_SNAPSHOT
5992static const struct file_operations snapshot_fops = {
5993 .open = tracing_snapshot_open,
5994 .read = seq_read,
5995 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005996 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005997 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005998};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005999
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006000static const struct file_operations snapshot_raw_fops = {
6001 .open = snapshot_raw_open,
6002 .read = tracing_buffers_read,
6003 .release = tracing_buffers_release,
6004 .splice_read = tracing_buffers_splice_read,
6005 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006006};
6007
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006008#endif /* CONFIG_TRACER_SNAPSHOT */
6009
Steven Rostedt2cadf912008-12-01 22:20:19 -05006010static int tracing_buffers_open(struct inode *inode, struct file *filp)
6011{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006012 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006013 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006014 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006015
6016 if (tracing_disabled)
6017 return -ENODEV;
6018
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006019 if (trace_array_get(tr) < 0)
6020 return -ENODEV;
6021
Steven Rostedt2cadf912008-12-01 22:20:19 -05006022 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006023 if (!info) {
6024 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006025 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006026 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006027
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006028 mutex_lock(&trace_types_lock);
6029
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006030 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006031 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006032 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006033 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006034 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006035 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006036 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006037
6038 filp->private_data = info;
6039
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006040 tr->current_trace->ref++;
6041
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006042 mutex_unlock(&trace_types_lock);
6043
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006044 ret = nonseekable_open(inode, filp);
6045 if (ret < 0)
6046 trace_array_put(tr);
6047
6048 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006049}
6050
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006051static unsigned int
6052tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6053{
6054 struct ftrace_buffer_info *info = filp->private_data;
6055 struct trace_iterator *iter = &info->iter;
6056
6057 return trace_poll(iter, filp, poll_table);
6058}
6059
Steven Rostedt2cadf912008-12-01 22:20:19 -05006060static ssize_t
6061tracing_buffers_read(struct file *filp, char __user *ubuf,
6062 size_t count, loff_t *ppos)
6063{
6064 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006065 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006066 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006067 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006068
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006069 if (!count)
6070 return 0;
6071
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006072#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006073 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6074 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006075#endif
6076
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006077 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006078 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6079 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006080 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006081 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006082
Steven Rostedt2cadf912008-12-01 22:20:19 -05006083 /* Do we have previous read data to read? */
6084 if (info->read < PAGE_SIZE)
6085 goto read;
6086
Steven Rostedtb6273442013-02-28 13:44:11 -05006087 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006088 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006089 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006090 &info->spare,
6091 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006092 iter->cpu_file, 0);
6093 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006094
6095 if (ret < 0) {
6096 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006097 if ((filp->f_flags & O_NONBLOCK))
6098 return -EAGAIN;
6099
Rabin Vincente30f53a2014-11-10 19:46:34 +01006100 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006101 if (ret)
6102 return ret;
6103
Steven Rostedtb6273442013-02-28 13:44:11 -05006104 goto again;
6105 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006106 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006107 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006108
Steven Rostedt436fc282011-10-14 10:44:25 -04006109 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006110 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006111 size = PAGE_SIZE - info->read;
6112 if (size > count)
6113 size = count;
6114
6115 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006116 if (ret == size)
6117 return -EFAULT;
6118
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006119 size -= ret;
6120
Steven Rostedt2cadf912008-12-01 22:20:19 -05006121 *ppos += size;
6122 info->read += size;
6123
6124 return size;
6125}
6126
6127static int tracing_buffers_release(struct inode *inode, struct file *file)
6128{
6129 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006130 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006131
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006132 mutex_lock(&trace_types_lock);
6133
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006134 iter->tr->current_trace->ref--;
6135
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006136 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006137
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006138 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006139 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006140 kfree(info);
6141
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006142 mutex_unlock(&trace_types_lock);
6143
Steven Rostedt2cadf912008-12-01 22:20:19 -05006144 return 0;
6145}
6146
6147struct buffer_ref {
6148 struct ring_buffer *buffer;
6149 void *page;
6150 int ref;
6151};
6152
6153static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6154 struct pipe_buffer *buf)
6155{
6156 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6157
6158 if (--ref->ref)
6159 return;
6160
6161 ring_buffer_free_read_page(ref->buffer, ref->page);
6162 kfree(ref);
6163 buf->private = 0;
6164}
6165
Matthew Wilcox95570902019-04-05 14:02:10 -07006166static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006167 struct pipe_buffer *buf)
6168{
6169 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6170
Matthew Wilcox95570902019-04-05 14:02:10 -07006171 if (ref->ref > INT_MAX/2)
6172 return false;
6173
Steven Rostedt2cadf912008-12-01 22:20:19 -05006174 ref->ref++;
Matthew Wilcox95570902019-04-05 14:02:10 -07006175 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006176}
6177
6178/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006179static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006180 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006181 .confirm = generic_pipe_buf_confirm,
6182 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006183 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006184 .get = buffer_pipe_buf_get,
6185};
6186
6187/*
6188 * Callback from splice_to_pipe(), if we need to release some pages
6189 * at the end of the spd in case we error'ed out in filling the pipe.
6190 */
6191static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6192{
6193 struct buffer_ref *ref =
6194 (struct buffer_ref *)spd->partial[i].private;
6195
6196 if (--ref->ref)
6197 return;
6198
6199 ring_buffer_free_read_page(ref->buffer, ref->page);
6200 kfree(ref);
6201 spd->partial[i].private = 0;
6202}
6203
6204static ssize_t
6205tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6206 struct pipe_inode_info *pipe, size_t len,
6207 unsigned int flags)
6208{
6209 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006210 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006211 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6212 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006213 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006214 .pages = pages_def,
6215 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006216 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006217 .flags = flags,
6218 .ops = &buffer_pipe_buf_ops,
6219 .spd_release = buffer_spd_release,
6220 };
6221 struct buffer_ref *ref;
Steven Rostedt (VMware)6edea152017-12-22 20:38:57 -05006222 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006223 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006224
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006225#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006226 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6227 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006228#endif
6229
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006230 if (*ppos & (PAGE_SIZE - 1))
6231 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006232
6233 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006234 if (len < PAGE_SIZE)
6235 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006236 len &= PAGE_MASK;
6237 }
6238
Al Viro1ae22932016-09-17 18:31:46 -04006239 if (splice_grow_spd(pipe, &spd))
6240 return -ENOMEM;
6241
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006242 again:
6243 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006244 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006245
Al Viroa786c062014-04-11 12:01:03 -04006246 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006247 struct page *page;
6248 int r;
6249
6250 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006251 if (!ref) {
6252 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006253 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006254 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006255
Steven Rostedt7267fa62009-04-29 00:16:21 -04006256 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006257 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006258 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006259 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006260 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006261 kfree(ref);
6262 break;
6263 }
6264
6265 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006266 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006267 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07006268 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006269 kfree(ref);
6270 break;
6271 }
6272
Steven Rostedt2cadf912008-12-01 22:20:19 -05006273 page = virt_to_page(ref->page);
6274
6275 spd.pages[i] = page;
6276 spd.partial[i].len = PAGE_SIZE;
6277 spd.partial[i].offset = 0;
6278 spd.partial[i].private = (unsigned long)ref;
6279 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006280 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006281
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006282 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006283 }
6284
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006285 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006286 spd.nr_pages = i;
6287
6288 /* did we read anything? */
6289 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006290 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006291 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006292
Al Viro1ae22932016-09-17 18:31:46 -04006293 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006294 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006295 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006296
Rabin Vincente30f53a2014-11-10 19:46:34 +01006297 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006298 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006299 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006300
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006301 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006302 }
6303
6304 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006305out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006306 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006307
Steven Rostedt2cadf912008-12-01 22:20:19 -05006308 return ret;
6309}
6310
6311static const struct file_operations tracing_buffers_fops = {
6312 .open = tracing_buffers_open,
6313 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006314 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006315 .release = tracing_buffers_release,
6316 .splice_read = tracing_buffers_splice_read,
6317 .llseek = no_llseek,
6318};
6319
Steven Rostedtc8d77182009-04-29 18:03:45 -04006320static ssize_t
6321tracing_stats_read(struct file *filp, char __user *ubuf,
6322 size_t count, loff_t *ppos)
6323{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006324 struct inode *inode = file_inode(filp);
6325 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006326 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006327 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006328 struct trace_seq *s;
6329 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006330 unsigned long long t;
6331 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006332
Li Zefane4f2d102009-06-15 10:57:28 +08006333 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006334 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006335 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006336
6337 trace_seq_init(s);
6338
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006339 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006340 trace_seq_printf(s, "entries: %ld\n", cnt);
6341
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006342 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006343 trace_seq_printf(s, "overrun: %ld\n", cnt);
6344
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006345 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006346 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6347
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006348 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006349 trace_seq_printf(s, "bytes: %ld\n", cnt);
6350
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006351 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006352 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006353 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006354 usec_rem = do_div(t, USEC_PER_SEC);
6355 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6356 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006357
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006358 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006359 usec_rem = do_div(t, USEC_PER_SEC);
6360 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6361 } else {
6362 /* counter or tsc mode for trace_clock */
6363 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006364 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006365
6366 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006367 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006368 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006369
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006370 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006371 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6372
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006373 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006374 trace_seq_printf(s, "read events: %ld\n", cnt);
6375
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006376 count = simple_read_from_buffer(ubuf, count, ppos,
6377 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006378
6379 kfree(s);
6380
6381 return count;
6382}
6383
6384static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006385 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006386 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006387 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006388 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006389};
6390
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006391#ifdef CONFIG_DYNAMIC_FTRACE
6392
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006393int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006394{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006395 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006396}
6397
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006398static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006399tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006400 size_t cnt, loff_t *ppos)
6401{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006402 static char ftrace_dyn_info_buffer[1024];
6403 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006404 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006405 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006406 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006407 int r;
6408
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006409 mutex_lock(&dyn_info_mutex);
6410 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006411
Steven Rostedta26a2a22008-10-31 00:03:22 -04006412 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006413 buf[r++] = '\n';
6414
6415 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6416
6417 mutex_unlock(&dyn_info_mutex);
6418
6419 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006420}
6421
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006422static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006423 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006424 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006425 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006426};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006427#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006428
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006429#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6430static void
6431ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006432{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006433 tracing_snapshot();
6434}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006435
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006436static void
6437ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6438{
6439 unsigned long *count = (long *)data;
6440
6441 if (!*count)
6442 return;
6443
6444 if (*count != -1)
6445 (*count)--;
6446
6447 tracing_snapshot();
6448}
6449
6450static int
6451ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6452 struct ftrace_probe_ops *ops, void *data)
6453{
6454 long count = (long)data;
6455
6456 seq_printf(m, "%ps:", (void *)ip);
6457
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006458 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006459
6460 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006461 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006462 else
6463 seq_printf(m, ":count=%ld\n", count);
6464
6465 return 0;
6466}
6467
6468static struct ftrace_probe_ops snapshot_probe_ops = {
6469 .func = ftrace_snapshot,
6470 .print = ftrace_snapshot_print,
6471};
6472
6473static struct ftrace_probe_ops snapshot_count_probe_ops = {
6474 .func = ftrace_count_snapshot,
6475 .print = ftrace_snapshot_print,
6476};
6477
6478static int
6479ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6480 char *glob, char *cmd, char *param, int enable)
6481{
6482 struct ftrace_probe_ops *ops;
6483 void *count = (void *)-1;
6484 char *number;
6485 int ret;
6486
6487 /* hash funcs only work with set_ftrace_filter */
6488 if (!enable)
6489 return -EINVAL;
6490
6491 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6492
6493 if (glob[0] == '!') {
6494 unregister_ftrace_function_probe_func(glob+1, ops);
6495 return 0;
6496 }
6497
6498 if (!param)
6499 goto out_reg;
6500
6501 number = strsep(&param, ":");
6502
6503 if (!strlen(number))
6504 goto out_reg;
6505
6506 /*
6507 * We use the callback data field (which is a pointer)
6508 * as our counter.
6509 */
6510 ret = kstrtoul(number, 0, (unsigned long *)&count);
6511 if (ret)
6512 return ret;
6513
6514 out_reg:
Steven Rostedt (VMware)d4decac2017-04-19 12:07:08 -04006515 ret = alloc_snapshot(&global_trace);
6516 if (ret < 0)
6517 goto out;
6518
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006519 ret = register_ftrace_function_probe(glob, ops, count);
6520
Steven Rostedt (VMware)d4decac2017-04-19 12:07:08 -04006521 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006522 return ret < 0 ? ret : 0;
6523}
6524
6525static struct ftrace_func_command ftrace_snapshot_cmd = {
6526 .name = "snapshot",
6527 .func = ftrace_trace_snapshot_callback,
6528};
6529
Tom Zanussi38de93a2013-10-24 08:34:18 -05006530static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006531{
6532 return register_ftrace_command(&ftrace_snapshot_cmd);
6533}
6534#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006535static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006536#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006537
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006538static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006539{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006540 if (WARN_ON(!tr->dir))
6541 return ERR_PTR(-ENODEV);
6542
6543 /* Top directory uses NULL as the parent */
6544 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6545 return NULL;
6546
6547 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006548 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006549}
6550
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006551static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6552{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006553 struct dentry *d_tracer;
6554
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006555 if (tr->percpu_dir)
6556 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006557
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006558 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006559 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006560 return NULL;
6561
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006562 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006563
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006564 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006565 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006566
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006567 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006568}
6569
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006570static struct dentry *
6571trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6572 void *data, long cpu, const struct file_operations *fops)
6573{
6574 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6575
6576 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006577 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006578 return ret;
6579}
6580
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006581static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006582tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006583{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006584 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006585 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006586 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006587
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006588 if (!d_percpu)
6589 return;
6590
Steven Rostedtdd49a382010-10-20 21:51:26 -04006591 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006592 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006593 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006594 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006595 return;
6596 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006597
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006598 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006599 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006600 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006601
6602 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006603 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006604 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006605
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006606 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006607 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006608
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006609 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006610 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006611
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006612 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006613 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006614
6615#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006616 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006617 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006618
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006619 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006620 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006621#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006622}
6623
Steven Rostedt60a11772008-05-12 21:20:44 +02006624#ifdef CONFIG_FTRACE_SELFTEST
6625/* Let selftest have access to static functions in this file */
6626#include "trace_selftest.c"
6627#endif
6628
Steven Rostedt577b7852009-02-26 23:43:05 -05006629static ssize_t
6630trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6631 loff_t *ppos)
6632{
6633 struct trace_option_dentry *topt = filp->private_data;
6634 char *buf;
6635
6636 if (topt->flags->val & topt->opt->bit)
6637 buf = "1\n";
6638 else
6639 buf = "0\n";
6640
6641 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6642}
6643
6644static ssize_t
6645trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6646 loff_t *ppos)
6647{
6648 struct trace_option_dentry *topt = filp->private_data;
6649 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006650 int ret;
6651
Peter Huewe22fe9b52011-06-07 21:58:27 +02006652 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6653 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006654 return ret;
6655
Li Zefan8d18eaa2009-12-08 11:17:06 +08006656 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006657 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006658
6659 if (!!(topt->flags->val & topt->opt->bit) != val) {
6660 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006661 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006662 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006663 mutex_unlock(&trace_types_lock);
6664 if (ret)
6665 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006666 }
6667
6668 *ppos += cnt;
6669
6670 return cnt;
6671}
6672
6673
6674static const struct file_operations trace_options_fops = {
6675 .open = tracing_open_generic,
6676 .read = trace_options_read,
6677 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006678 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006679};
6680
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006681/*
6682 * In order to pass in both the trace_array descriptor as well as the index
6683 * to the flag that the trace option file represents, the trace_array
6684 * has a character array of trace_flags_index[], which holds the index
6685 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6686 * The address of this character array is passed to the flag option file
6687 * read/write callbacks.
6688 *
6689 * In order to extract both the index and the trace_array descriptor,
6690 * get_tr_index() uses the following algorithm.
6691 *
6692 * idx = *ptr;
6693 *
6694 * As the pointer itself contains the address of the index (remember
6695 * index[1] == 1).
6696 *
6697 * Then to get the trace_array descriptor, by subtracting that index
6698 * from the ptr, we get to the start of the index itself.
6699 *
6700 * ptr - idx == &index[0]
6701 *
6702 * Then a simple container_of() from that pointer gets us to the
6703 * trace_array descriptor.
6704 */
6705static void get_tr_index(void *data, struct trace_array **ptr,
6706 unsigned int *pindex)
6707{
6708 *pindex = *(unsigned char *)data;
6709
6710 *ptr = container_of(data - *pindex, struct trace_array,
6711 trace_flags_index);
6712}
6713
Steven Rostedta8259072009-02-26 22:19:12 -05006714static ssize_t
6715trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6716 loff_t *ppos)
6717{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006718 void *tr_index = filp->private_data;
6719 struct trace_array *tr;
6720 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006721 char *buf;
6722
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006723 get_tr_index(tr_index, &tr, &index);
6724
6725 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006726 buf = "1\n";
6727 else
6728 buf = "0\n";
6729
6730 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6731}
6732
6733static ssize_t
6734trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6735 loff_t *ppos)
6736{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006737 void *tr_index = filp->private_data;
6738 struct trace_array *tr;
6739 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006740 unsigned long val;
6741 int ret;
6742
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006743 get_tr_index(tr_index, &tr, &index);
6744
Peter Huewe22fe9b52011-06-07 21:58:27 +02006745 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6746 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006747 return ret;
6748
Zhaoleif2d84b62009-08-07 18:55:48 +08006749 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006750 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006751
6752 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006753 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006754 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006755
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006756 if (ret < 0)
6757 return ret;
6758
Steven Rostedta8259072009-02-26 22:19:12 -05006759 *ppos += cnt;
6760
6761 return cnt;
6762}
6763
Steven Rostedta8259072009-02-26 22:19:12 -05006764static const struct file_operations trace_options_core_fops = {
6765 .open = tracing_open_generic,
6766 .read = trace_options_core_read,
6767 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006768 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006769};
6770
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006771struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006772 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006773 struct dentry *parent,
6774 void *data,
6775 const struct file_operations *fops)
6776{
6777 struct dentry *ret;
6778
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006779 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006780 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07006781 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006782
6783 return ret;
6784}
6785
6786
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006787static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006788{
6789 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006790
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006791 if (tr->options)
6792 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006793
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006794 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006795 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006796 return NULL;
6797
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006798 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006799 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006800 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006801 return NULL;
6802 }
6803
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006804 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006805}
6806
Steven Rostedt577b7852009-02-26 23:43:05 -05006807static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006808create_trace_option_file(struct trace_array *tr,
6809 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006810 struct tracer_flags *flags,
6811 struct tracer_opt *opt)
6812{
6813 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006814
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006815 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006816 if (!t_options)
6817 return;
6818
6819 topt->flags = flags;
6820 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006821 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006822
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006823 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006824 &trace_options_fops);
6825
Steven Rostedt577b7852009-02-26 23:43:05 -05006826}
6827
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006828static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006829create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006830{
6831 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006832 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05006833 struct tracer_flags *flags;
6834 struct tracer_opt *opts;
6835 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006836 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05006837
6838 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006839 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05006840
6841 flags = tracer->flags;
6842
6843 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006844 return;
6845
6846 /*
6847 * If this is an instance, only create flags for tracers
6848 * the instance may have.
6849 */
6850 if (!trace_ok_for_array(tracer, tr))
6851 return;
6852
6853 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08006854 /* Make sure there's no duplicate flags. */
6855 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006856 return;
6857 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006858
6859 opts = flags->opts;
6860
6861 for (cnt = 0; opts[cnt].name; cnt++)
6862 ;
6863
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006864 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006865 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006866 return;
6867
6868 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6869 GFP_KERNEL);
6870 if (!tr_topts) {
6871 kfree(topts);
6872 return;
6873 }
6874
6875 tr->topts = tr_topts;
6876 tr->topts[tr->nr_topts].tracer = tracer;
6877 tr->topts[tr->nr_topts].topts = topts;
6878 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05006879
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006880 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006881 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006882 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006883 WARN_ONCE(topts[cnt].entry == NULL,
6884 "Failed to create trace option: %s",
6885 opts[cnt].name);
6886 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006887}
6888
Steven Rostedta8259072009-02-26 22:19:12 -05006889static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006890create_trace_option_core_file(struct trace_array *tr,
6891 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006892{
6893 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006894
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006895 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006896 if (!t_options)
6897 return NULL;
6898
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006899 return trace_create_file(option, 0644, t_options,
6900 (void *)&tr->trace_flags_index[index],
6901 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006902}
6903
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006904static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006905{
6906 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006907 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006908 int i;
6909
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006910 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006911 if (!t_options)
6912 return;
6913
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006914 for (i = 0; trace_options[i]; i++) {
6915 if (top_level ||
6916 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6917 create_trace_option_core_file(tr, trace_options[i], i);
6918 }
Steven Rostedta8259072009-02-26 22:19:12 -05006919}
6920
Steven Rostedt499e5472012-02-22 15:50:28 -05006921static ssize_t
6922rb_simple_read(struct file *filp, char __user *ubuf,
6923 size_t cnt, loff_t *ppos)
6924{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006925 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006926 char buf[64];
6927 int r;
6928
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006929 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006930 r = sprintf(buf, "%d\n", r);
6931
6932 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6933}
6934
6935static ssize_t
6936rb_simple_write(struct file *filp, const char __user *ubuf,
6937 size_t cnt, loff_t *ppos)
6938{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006939 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006940 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006941 unsigned long val;
6942 int ret;
6943
6944 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6945 if (ret)
6946 return ret;
6947
6948 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006949 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)dc697312018-08-01 15:40:57 -04006950 if (!!val == tracer_tracing_is_on(tr)) {
6951 val = 0; /* do nothing */
6952 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006953 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006954 if (tr->current_trace->start)
6955 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006956 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006957 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006958 if (tr->current_trace->stop)
6959 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006960 }
6961 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006962 }
6963
6964 (*ppos)++;
6965
6966 return cnt;
6967}
6968
6969static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006970 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006971 .read = rb_simple_read,
6972 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006973 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006974 .llseek = default_llseek,
6975};
6976
Steven Rostedt277ba042012-08-03 16:10:49 -04006977struct dentry *trace_instance_dir;
6978
6979static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006980init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006981
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006982static int
6983allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006984{
6985 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006986
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006987 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006988
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006989 buf->tr = tr;
6990
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006991 buf->buffer = ring_buffer_alloc(size, rb_flags);
6992 if (!buf->buffer)
6993 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006994
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006995 buf->data = alloc_percpu(struct trace_array_cpu);
6996 if (!buf->data) {
6997 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)5dc4cd22017-12-26 20:07:34 -05006998 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006999 return -ENOMEM;
7000 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007001
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007002 /* Allocate the first page for all buffers */
7003 set_buffer_entries(&tr->trace_buffer,
7004 ring_buffer_size(tr->trace_buffer.buffer, 0));
7005
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007006 return 0;
7007}
7008
7009static int allocate_trace_buffers(struct trace_array *tr, int size)
7010{
7011 int ret;
7012
7013 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7014 if (ret)
7015 return ret;
7016
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007017#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007018 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7019 allocate_snapshot ? size : 1);
7020 if (WARN_ON(ret)) {
7021 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia81e155e2017-12-26 15:12:53 +08007022 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007023 free_percpu(tr->trace_buffer.data);
Jing Xia81e155e2017-12-26 15:12:53 +08007024 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007025 return -ENOMEM;
7026 }
7027 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007028
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007029 /*
7030 * Only the top level trace array gets its snapshot allocated
7031 * from the kernel command line.
7032 */
7033 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007034#endif
7035 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007036}
7037
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007038static void free_trace_buffer(struct trace_buffer *buf)
7039{
7040 if (buf->buffer) {
7041 ring_buffer_free(buf->buffer);
7042 buf->buffer = NULL;
7043 free_percpu(buf->data);
7044 buf->data = NULL;
7045 }
7046}
7047
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007048static void free_trace_buffers(struct trace_array *tr)
7049{
7050 if (!tr)
7051 return;
7052
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007053 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007054
7055#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007056 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007057#endif
7058}
7059
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007060static void init_trace_flags_index(struct trace_array *tr)
7061{
7062 int i;
7063
7064 /* Used by the trace options files */
7065 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7066 tr->trace_flags_index[i] = i;
7067}
7068
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007069static void __update_tracer_options(struct trace_array *tr)
7070{
7071 struct tracer *t;
7072
7073 for (t = trace_types; t; t = t->next)
7074 add_tracer_options(tr, t);
7075}
7076
7077static void update_tracer_options(struct trace_array *tr)
7078{
7079 mutex_lock(&trace_types_lock);
7080 __update_tracer_options(tr);
7081 mutex_unlock(&trace_types_lock);
7082}
7083
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007084static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007085{
Steven Rostedt277ba042012-08-03 16:10:49 -04007086 struct trace_array *tr;
7087 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007088
7089 mutex_lock(&trace_types_lock);
7090
7091 ret = -EEXIST;
7092 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7093 if (tr->name && strcmp(tr->name, name) == 0)
7094 goto out_unlock;
7095 }
7096
7097 ret = -ENOMEM;
7098 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7099 if (!tr)
7100 goto out_unlock;
7101
7102 tr->name = kstrdup(name, GFP_KERNEL);
7103 if (!tr->name)
7104 goto out_free_tr;
7105
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007106 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7107 goto out_free_tr;
7108
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007109 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007110
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007111 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7112
Steven Rostedt277ba042012-08-03 16:10:49 -04007113 raw_spin_lock_init(&tr->start_lock);
7114
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007115 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7116
Steven Rostedt277ba042012-08-03 16:10:49 -04007117 tr->current_trace = &nop_trace;
7118
7119 INIT_LIST_HEAD(&tr->systems);
7120 INIT_LIST_HEAD(&tr->events);
7121
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007122 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007123 goto out_free_tr;
7124
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007125 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007126 if (!tr->dir)
7127 goto out_free_tr;
7128
7129 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007130 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007131 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007132 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007133 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007134
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007135 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007136 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007137 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007138
7139 list_add(&tr->list, &ftrace_trace_arrays);
7140
7141 mutex_unlock(&trace_types_lock);
7142
7143 return 0;
7144
7145 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007146 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007147 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007148 kfree(tr->name);
7149 kfree(tr);
7150
7151 out_unlock:
7152 mutex_unlock(&trace_types_lock);
7153
7154 return ret;
7155
7156}
7157
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007158static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007159{
7160 struct trace_array *tr;
7161 int found = 0;
7162 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007163 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007164
7165 mutex_lock(&trace_types_lock);
7166
7167 ret = -ENODEV;
7168 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7169 if (tr->name && strcmp(tr->name, name) == 0) {
7170 found = 1;
7171 break;
7172 }
7173 }
7174 if (!found)
7175 goto out_unlock;
7176
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007177 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007178 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007179 goto out_unlock;
7180
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007181 list_del(&tr->list);
7182
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007183 /* Disable all the flags that were enabled coming in */
7184 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7185 if ((1 << i) & ZEROED_TRACE_FLAGS)
7186 set_tracer_flag(tr, 1 << i, 0);
7187 }
7188
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007189 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007190 event_trace_del_tracer(tr);
Namhyung Kim7da0f8e2017-04-17 11:44:27 +09007191 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007192 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007193 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007194 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007195
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007196 for (i = 0; i < tr->nr_topts; i++) {
7197 kfree(tr->topts[i].topts);
7198 }
7199 kfree(tr->topts);
7200
Chunyu Hu919e4812017-07-20 18:36:09 +08007201 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007202 kfree(tr->name);
7203 kfree(tr);
7204
7205 ret = 0;
7206
7207 out_unlock:
7208 mutex_unlock(&trace_types_lock);
7209
7210 return ret;
7211}
7212
Steven Rostedt277ba042012-08-03 16:10:49 -04007213static __init void create_trace_instances(struct dentry *d_tracer)
7214{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007215 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7216 instance_mkdir,
7217 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007218 if (WARN_ON(!trace_instance_dir))
7219 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007220}
7221
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007222static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007223init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007224{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007225 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007226
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007227 trace_create_file("available_tracers", 0444, d_tracer,
7228 tr, &show_traces_fops);
7229
7230 trace_create_file("current_tracer", 0644, d_tracer,
7231 tr, &set_tracer_fops);
7232
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007233 trace_create_file("tracing_cpumask", 0644, d_tracer,
7234 tr, &tracing_cpumask_fops);
7235
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007236 trace_create_file("trace_options", 0644, d_tracer,
7237 tr, &tracing_iter_fops);
7238
7239 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007240 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007241
7242 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007243 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007244
7245 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007246 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007247
7248 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7249 tr, &tracing_total_entries_fops);
7250
Wang YanQing238ae932013-05-26 16:52:01 +08007251 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007252 tr, &tracing_free_buffer_fops);
7253
7254 trace_create_file("trace_marker", 0220, d_tracer,
7255 tr, &tracing_mark_fops);
7256
7257 trace_create_file("trace_clock", 0644, d_tracer, tr,
7258 &trace_clock_fops);
7259
7260 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007261 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007262
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007263 create_trace_options_dir(tr);
7264
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007265#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007266 trace_create_file("tracing_max_latency", 0644, d_tracer,
7267 &tr->max_latency, &tracing_max_lat_fops);
7268#endif
7269
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007270 if (ftrace_create_function_files(tr, d_tracer))
7271 WARN(1, "Could not allocate function filter files");
7272
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007273#ifdef CONFIG_TRACER_SNAPSHOT
7274 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007275 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007276#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007277
7278 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007279 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007280
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007281 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007282}
7283
Eric W. Biedermand3381fa2017-02-01 06:06:16 +13007284static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007285{
7286 struct vfsmount *mnt;
7287 struct file_system_type *type;
7288
7289 /*
7290 * To maintain backward compatibility for tools that mount
7291 * debugfs to get to the tracing facility, tracefs is automatically
7292 * mounted to the debugfs/tracing directory.
7293 */
7294 type = get_fs_type("tracefs");
7295 if (!type)
7296 return NULL;
Eric W. Biedermand3381fa2017-02-01 06:06:16 +13007297 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007298 put_filesystem(type);
7299 if (IS_ERR(mnt))
7300 return NULL;
7301 mntget(mnt);
7302
7303 return mnt;
7304}
7305
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007306/**
7307 * tracing_init_dentry - initialize top level trace array
7308 *
7309 * This is called when creating files or directories in the tracing
7310 * directory. It is called via fs_initcall() by any of the boot up code
7311 * and expects to return the dentry of the top level tracing directory.
7312 */
7313struct dentry *tracing_init_dentry(void)
7314{
7315 struct trace_array *tr = &global_trace;
7316
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007317 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007318 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007319 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007320
Jiaxing Wang8b129192015-11-06 16:04:16 +08007321 if (WARN_ON(!tracefs_initialized()) ||
7322 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7323 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007324 return ERR_PTR(-ENODEV);
7325
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007326 /*
7327 * As there may still be users that expect the tracing
7328 * files to exist in debugfs/tracing, we must automount
7329 * the tracefs file system there, so older tools still
7330 * work with the newer kerenl.
7331 */
7332 tr->dir = debugfs_create_automount("tracing", NULL,
7333 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007334 if (!tr->dir) {
7335 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7336 return ERR_PTR(-ENOMEM);
7337 }
7338
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007339 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007340}
7341
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007342extern struct trace_enum_map *__start_ftrace_enum_maps[];
7343extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7344
7345static void __init trace_enum_init(void)
7346{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007347 int len;
7348
7349 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007350 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007351}
7352
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007353#ifdef CONFIG_MODULES
7354static void trace_module_add_enums(struct module *mod)
7355{
7356 if (!mod->num_trace_enums)
7357 return;
7358
7359 /*
7360 * Modules with bad taint do not have events created, do
7361 * not bother with enums either.
7362 */
7363 if (trace_module_has_bad_taint(mod))
7364 return;
7365
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007366 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007367}
7368
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007369#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7370static void trace_module_remove_enums(struct module *mod)
7371{
7372 union trace_enum_map_item *map;
7373 union trace_enum_map_item **last = &trace_enum_maps;
7374
7375 if (!mod->num_trace_enums)
7376 return;
7377
7378 mutex_lock(&trace_enum_mutex);
7379
7380 map = trace_enum_maps;
7381
7382 while (map) {
7383 if (map->head.mod == mod)
7384 break;
7385 map = trace_enum_jmp_to_tail(map);
7386 last = &map->tail.next;
7387 map = map->tail.next;
7388 }
7389 if (!map)
7390 goto out;
7391
7392 *last = trace_enum_jmp_to_tail(map)->tail.next;
7393 kfree(map);
7394 out:
7395 mutex_unlock(&trace_enum_mutex);
7396}
7397#else
7398static inline void trace_module_remove_enums(struct module *mod) { }
7399#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7400
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007401static int trace_module_notify(struct notifier_block *self,
7402 unsigned long val, void *data)
7403{
7404 struct module *mod = data;
7405
7406 switch (val) {
7407 case MODULE_STATE_COMING:
7408 trace_module_add_enums(mod);
7409 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007410 case MODULE_STATE_GOING:
7411 trace_module_remove_enums(mod);
7412 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007413 }
7414
7415 return 0;
7416}
7417
7418static struct notifier_block trace_module_nb = {
7419 .notifier_call = trace_module_notify,
7420 .priority = 0,
7421};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007422#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007423
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007424static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007425{
7426 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007427
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007428 trace_access_lock_init();
7429
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007430 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007431 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007432 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007433
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007434 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007435 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007436
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007437 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007438 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007439
Li Zefan339ae5d2009-04-17 10:34:30 +08007440 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007441 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007442
Avadh Patel69abe6a2009-04-10 16:04:48 -04007443 trace_create_file("saved_cmdlines", 0444, d_tracer,
7444 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007445
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007446 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7447 NULL, &tracing_saved_cmdlines_size_fops);
7448
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007449 trace_enum_init();
7450
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007451 trace_create_enum_file(d_tracer);
7452
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007453#ifdef CONFIG_MODULES
7454 register_module_notifier(&trace_module_nb);
7455#endif
7456
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007457#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007458 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7459 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007460#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007461
Steven Rostedt277ba042012-08-03 16:10:49 -04007462 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007463
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007464 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007465
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007466 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007467}
7468
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007469static int trace_panic_handler(struct notifier_block *this,
7470 unsigned long event, void *unused)
7471{
Steven Rostedt944ac422008-10-23 19:26:08 -04007472 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007473 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007474 return NOTIFY_OK;
7475}
7476
7477static struct notifier_block trace_panic_notifier = {
7478 .notifier_call = trace_panic_handler,
7479 .next = NULL,
7480 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7481};
7482
7483static int trace_die_handler(struct notifier_block *self,
7484 unsigned long val,
7485 void *data)
7486{
7487 switch (val) {
7488 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007489 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007490 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007491 break;
7492 default:
7493 break;
7494 }
7495 return NOTIFY_OK;
7496}
7497
7498static struct notifier_block trace_die_notifier = {
7499 .notifier_call = trace_die_handler,
7500 .priority = 200
7501};
7502
7503/*
7504 * printk is set to max of 1024, we really don't need it that big.
7505 * Nothing should be printing 1000 characters anyway.
7506 */
7507#define TRACE_MAX_PRINT 1000
7508
7509/*
7510 * Define here KERN_TRACE so that we have one place to modify
7511 * it if we decide to change what log level the ftrace dump
7512 * should be at.
7513 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007514#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007515
Jason Wessel955b61e2010-08-05 09:22:23 -05007516void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007517trace_printk_seq(struct trace_seq *s)
7518{
7519 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007520 if (s->seq.len >= TRACE_MAX_PRINT)
7521 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007522
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007523 /*
7524 * More paranoid code. Although the buffer size is set to
7525 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7526 * an extra layer of protection.
7527 */
7528 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7529 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007530
7531 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007532 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007533
7534 printk(KERN_TRACE "%s", s->buffer);
7535
Steven Rostedtf9520752009-03-02 14:04:40 -05007536 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007537}
7538
Jason Wessel955b61e2010-08-05 09:22:23 -05007539void trace_init_global_iter(struct trace_iterator *iter)
7540{
7541 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007542 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007543 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007544 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007545
7546 if (iter->trace && iter->trace->open)
7547 iter->trace->open(iter);
7548
7549 /* Annotate start of buffers if we had overruns */
7550 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7551 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7552
7553 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7554 if (trace_clocks[iter->tr->clock_id].in_ns)
7555 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007556}
7557
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007558void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007559{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007560 /* use static because iter can be a bit big for the stack */
7561 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007562 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007563 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007564 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007565 unsigned long flags;
7566 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007567
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007568 /* Only allow one dump user at a time. */
7569 if (atomic_inc_return(&dump_running) != 1) {
7570 atomic_dec(&dump_running);
7571 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007572 }
7573
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007574 /*
7575 * Always turn off tracing when we dump.
7576 * We don't need to show trace output of what happens
7577 * between multiple crashes.
7578 *
7579 * If the user does a sysrq-z, then they can re-enable
7580 * tracing with echo 1 > tracing_on.
7581 */
7582 tracing_off();
7583
7584 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007585
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007586 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007587 trace_init_global_iter(&iter);
7588
Steven Rostedtd7690412008-10-01 00:29:53 -04007589 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307590 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007591 }
7592
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007593 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007594
Török Edwinb54d3de2008-11-22 13:28:48 +02007595 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007596 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007597
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007598 switch (oops_dump_mode) {
7599 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007600 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007601 break;
7602 case DUMP_ORIG:
7603 iter.cpu_file = raw_smp_processor_id();
7604 break;
7605 case DUMP_NONE:
7606 goto out_enable;
7607 default:
7608 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007609 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007610 }
7611
7612 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007613
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007614 /* Did function tracer already get disabled? */
7615 if (ftrace_is_dead()) {
7616 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7617 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7618 }
7619
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007620 /*
7621 * We need to stop all tracing on all CPUS to read the
7622 * the next buffer. This is a bit expensive, but is
7623 * not done often. We fill all what we can read,
7624 * and then release the locks again.
7625 */
7626
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007627 while (!trace_empty(&iter)) {
7628
7629 if (!cnt)
7630 printk(KERN_TRACE "---------------------------------\n");
7631
7632 cnt++;
7633
Miguel Ojeda2e415392019-05-23 14:45:35 +02007634 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007635 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007636
Jason Wessel955b61e2010-08-05 09:22:23 -05007637 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007638 int ret;
7639
7640 ret = print_trace_line(&iter);
7641 if (ret != TRACE_TYPE_NO_CONSUME)
7642 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007643 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007644 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007645
7646 trace_printk_seq(&iter.seq);
7647 }
7648
7649 if (!cnt)
7650 printk(KERN_TRACE " (ftrace buffer empty)\n");
7651 else
7652 printk(KERN_TRACE "---------------------------------\n");
7653
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007654 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007655 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007656
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007657 for_each_tracing_cpu(cpu) {
7658 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007659 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007660 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007661 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007662}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007663EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007664
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007665__init static int tracer_alloc_buffers(void)
7666{
Steven Rostedt73c51622009-03-11 13:42:01 -04007667 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307668 int ret = -ENOMEM;
7669
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007670 /*
7671 * Make sure we don't accidently add more trace options
7672 * than we have bits for.
7673 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007674 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007675
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307676 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7677 goto out;
7678
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007679 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307680 goto out_free_buffer_mask;
7681
Steven Rostedt07d777f2011-09-22 14:01:55 -04007682 /* Only allocate trace_printk buffers if a trace_printk exists */
7683 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007684 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007685 trace_printk_init_buffers();
7686
Steven Rostedt73c51622009-03-11 13:42:01 -04007687 /* To save memory, keep the ring buffer size to its minimum */
7688 if (ring_buffer_expanded)
7689 ring_buf_size = trace_buf_size;
7690 else
7691 ring_buf_size = 1;
7692
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307693 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007694 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007695
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007696 raw_spin_lock_init(&global_trace.start_lock);
7697
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007698 /* Used for event triggers */
7699 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7700 if (!temp_buffer)
7701 goto out_free_cpumask;
7702
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007703 if (trace_create_savedcmd() < 0)
7704 goto out_free_temp_buffer;
7705
Steven Rostedtab464282008-05-12 21:21:00 +02007706 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007707 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007708 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7709 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007710 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007711 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007712
Steven Rostedt499e5472012-02-22 15:50:28 -05007713 if (global_trace.buffer_disabled)
7714 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007715
Steven Rostedte1e232c2014-02-10 23:38:46 -05007716 if (trace_boot_clock) {
7717 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7718 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007719 pr_warn("Trace clock %s not defined, going back to default\n",
7720 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05007721 }
7722
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007723 /*
7724 * register_tracer() might reference current_trace, so it
7725 * needs to be set before we register anything. This is
7726 * just a bootstrap of current_trace anyway.
7727 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007728 global_trace.current_trace = &nop_trace;
7729
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007730 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7731
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007732 ftrace_init_global_array_ops(&global_trace);
7733
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007734 init_trace_flags_index(&global_trace);
7735
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007736 register_tracer(&nop_trace);
7737
Steven Rostedt60a11772008-05-12 21:20:44 +02007738 /* All seems OK, enable tracing */
7739 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007740
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007741 atomic_notifier_chain_register(&panic_notifier_list,
7742 &trace_panic_notifier);
7743
7744 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007745
Steven Rostedtae63b312012-05-03 23:09:03 -04007746 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7747
7748 INIT_LIST_HEAD(&global_trace.systems);
7749 INIT_LIST_HEAD(&global_trace.events);
7750 list_add(&global_trace.list, &ftrace_trace_arrays);
7751
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007752 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007753
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007754 register_snapshot_cmd();
7755
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007756 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007757
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007758out_free_savedcmd:
7759 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007760out_free_temp_buffer:
7761 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307762out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007763 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307764out_free_buffer_mask:
7765 free_cpumask_var(tracing_buffer_mask);
7766out:
7767 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007768}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007769
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007770void __init trace_init(void)
7771{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007772 if (tracepoint_printk) {
7773 tracepoint_print_iter =
7774 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7775 if (WARN_ON(!tracepoint_print_iter))
7776 tracepoint_printk = 0;
7777 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007778 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007779 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007780}
7781
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007782__init static int clear_boot_tracer(void)
7783{
7784 /*
7785 * The default tracer at boot buffer is an init section.
7786 * This function is called in lateinit. If we did not
7787 * find the boot tracer, then clear it out, to prevent
7788 * later registration from accessing the buffer that is
7789 * about to be freed.
7790 */
7791 if (!default_bootup_tracer)
7792 return 0;
7793
7794 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7795 default_bootup_tracer);
7796 default_bootup_tracer = NULL;
7797
7798 return 0;
7799}
7800
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007801fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)3170d9a2017-08-01 12:01:52 -04007802late_initcall_sync(clear_boot_tracer);