blob: ea8a2760de247de501a746d36e3e7ce771497f46 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050077static int
78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010079{
80 return 0;
81}
Steven Rostedt0f048702008-11-05 16:05:44 -050082
83/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040084 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
86 * occurred.
87 */
88static DEFINE_PER_CPU(bool, trace_cmdline_save);
89
90/*
Steven Rostedt0f048702008-11-05 16:05:44 -050091 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
94 * this back to zero.
95 */
Hannes Eder4fd27352009-02-10 19:44:12 +010096static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050097
Jason Wessel955b61e2010-08-05 09:22:23 -050098cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +020099
Steven Rostedt944ac422008-10-23 19:26:08 -0400100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400114 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115
116enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400117
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
122/* Map of enums to their values, for "enum_map" file */
123struct trace_enum_map_head {
124 struct module *mod;
125 unsigned long length;
126};
127
128union trace_enum_map_item;
129
130struct trace_enum_map_tail {
131 /*
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
134 */
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
137};
138
139static DEFINE_MUTEX(trace_enum_mutex);
140
141/*
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
147 */
148union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
152};
153
154static union trace_enum_map_item *trace_enum_maps;
155#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
156
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500158
Li Zefanee6c2c12009-09-18 14:06:47 +0800159#define MAX_TRACER_SIZE 100
160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500161static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100162
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500163static bool allocate_snapshot;
164
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200165static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100166{
Chen Gang67012ab2013-04-08 12:06:44 +0800167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400169 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100171 return 1;
172}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200173__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100174
Steven Rostedt944ac422008-10-23 19:26:08 -0400175static int __init set_ftrace_dump_on_oops(char *str)
176{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
179 return 1;
180 }
181
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
184 return 1;
185 }
186
187 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400188}
189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200190
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400191static int __init stop_trace_on_warning(char *str)
192{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195 return 1;
196}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200197__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400198
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400199static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500200{
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
204 return 1;
205}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400206__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500207
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400208
209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static int __init set_trace_boot_options(char *str)
212{
Chen Gang67012ab2013-04-08 12:06:44 +0800213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400214 return 0;
215}
216__setup("trace_options=", set_trace_boot_options);
217
Steven Rostedte1e232c2014-02-10 23:38:46 -0500218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219static char *trace_boot_clock __initdata;
220
221static int __init set_trace_boot_clock(char *str)
222{
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
225 return 0;
226}
227__setup("trace_clock=", set_trace_boot_clock);
228
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500229static int __init set_tracepoint_printk(char *str)
230{
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
233 return 1;
234}
235__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400236
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800237unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200238{
239 nsec += 500;
240 do_div(nsec, 1000);
241 return nsec;
242}
243
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400244/* trace_flags holds trace_options default values */
245#define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
251
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400252/* trace_options that are only supported by global_trace */
253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
255
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400256/* trace_flags that are default zero for instances */
257#define ZEROED_TRACE_FLAGS \
258 TRACE_ITER_EVENT_FORK
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400259
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200260/*
261 * The global_trace is the descriptor that holds the tracing
262 * buffers for the live tracing. For each CPU, it contains
263 * a link list of pages that will store trace entries. The
264 * page descriptor of the pages in the memory is used to hold
265 * the link list by linking the lru item in the page descriptor
266 * to each of the pages in the buffer per CPU.
267 *
268 * For each active CPU there is a data field that holds the
269 * pages for the buffer for that CPU. Each CPU has the same number
270 * of pages allocated for its buffer.
271 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400272static struct trace_array global_trace = {
273 .trace_flags = TRACE_DEFAULT_FLAGS,
274};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200275
Steven Rostedtae63b312012-05-03 23:09:03 -0400276LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400278int trace_array_get(struct trace_array *this_tr)
279{
280 struct trace_array *tr;
281 int ret = -ENODEV;
282
283 mutex_lock(&trace_types_lock);
284 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
285 if (tr == this_tr) {
286 tr->ref++;
287 ret = 0;
288 break;
289 }
290 }
291 mutex_unlock(&trace_types_lock);
292
293 return ret;
294}
295
296static void __trace_array_put(struct trace_array *this_tr)
297{
298 WARN_ON(!this_tr->ref);
299 this_tr->ref--;
300}
301
302void trace_array_put(struct trace_array *this_tr)
303{
304 mutex_lock(&trace_types_lock);
305 __trace_array_put(this_tr);
306 mutex_unlock(&trace_types_lock);
307}
308
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400309int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 struct ring_buffer *buffer,
311 struct ring_buffer_event *event)
312{
313 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
314 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400315 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500316 return 1;
317 }
318
319 return 0;
320}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500321
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400322void trace_free_pid_list(struct trace_pid_list *pid_list)
323{
324 vfree(pid_list->pids);
325 kfree(pid_list);
326}
327
Steven Rostedtd8275c42016-04-14 12:15:22 -0400328/**
329 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
330 * @filtered_pids: The list of pids to check
331 * @search_pid: The PID to find in @filtered_pids
332 *
333 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
334 */
335bool
336trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
337{
338 /*
339 * If pid_max changed after filtered_pids was created, we
340 * by default ignore all pids greater than the previous pid_max.
341 */
342 if (search_pid >= filtered_pids->pid_max)
343 return false;
344
345 return test_bit(search_pid, filtered_pids->pids);
346}
347
348/**
349 * trace_ignore_this_task - should a task be ignored for tracing
350 * @filtered_pids: The list of pids to check
351 * @task: The task that should be ignored if not filtered
352 *
353 * Checks if @task should be traced or not from @filtered_pids.
354 * Returns true if @task should *NOT* be traced.
355 * Returns false if @task should be traced.
356 */
357bool
358trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
359{
360 /*
361 * Return false, because if filtered_pids does not exist,
362 * all pids are good to trace.
363 */
364 if (!filtered_pids)
365 return false;
366
367 return !trace_find_filtered_pid(filtered_pids, task->pid);
368}
369
370/**
371 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
372 * @pid_list: The list to modify
373 * @self: The current task for fork or NULL for exit
374 * @task: The task to add or remove
375 *
376 * If adding a task, if @self is defined, the task is only added if @self
377 * is also included in @pid_list. This happens on fork and tasks should
378 * only be added when the parent is listed. If @self is NULL, then the
379 * @task pid will be removed from the list, which would happen on exit
380 * of a task.
381 */
382void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
383 struct task_struct *self,
384 struct task_struct *task)
385{
386 if (!pid_list)
387 return;
388
389 /* For forks, we only add if the forking task is listed */
390 if (self) {
391 if (!trace_find_filtered_pid(pid_list, self->pid))
392 return;
393 }
394
395 /* Sorry, but we don't support pid_max changing after setting */
396 if (task->pid >= pid_list->pid_max)
397 return;
398
399 /* "self" is set for forks, and NULL for exits */
400 if (self)
401 set_bit(task->pid, pid_list->pids);
402 else
403 clear_bit(task->pid, pid_list->pids);
404}
405
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400406/**
407 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
408 * @pid_list: The pid list to show
409 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
410 * @pos: The position of the file
411 *
412 * This is used by the seq_file "next" operation to iterate the pids
413 * listed in a trace_pid_list structure.
414 *
415 * Returns the pid+1 as we want to display pid of zero, but NULL would
416 * stop the iteration.
417 */
418void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
419{
420 unsigned long pid = (unsigned long)v;
421
422 (*pos)++;
423
424 /* pid already is +1 of the actual prevous bit */
425 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
426
427 /* Return pid + 1 to allow zero to be represented */
428 if (pid < pid_list->pid_max)
429 return (void *)(pid + 1);
430
431 return NULL;
432}
433
434/**
435 * trace_pid_start - Used for seq_file to start reading pid lists
436 * @pid_list: The pid list to show
437 * @pos: The position of the file
438 *
439 * This is used by seq_file "start" operation to start the iteration
440 * of listing pids.
441 *
442 * Returns the pid+1 as we want to display pid of zero, but NULL would
443 * stop the iteration.
444 */
445void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
446{
447 unsigned long pid;
448 loff_t l = 0;
449
450 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
451 if (pid >= pid_list->pid_max)
452 return NULL;
453
454 /* Return pid + 1 so that zero can be the exit value */
455 for (pid++; pid && l < *pos;
456 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
457 ;
458 return (void *)pid;
459}
460
461/**
462 * trace_pid_show - show the current pid in seq_file processing
463 * @m: The seq_file structure to write into
464 * @v: A void pointer of the pid (+1) value to display
465 *
466 * Can be directly used by seq_file operations to display the current
467 * pid value.
468 */
469int trace_pid_show(struct seq_file *m, void *v)
470{
471 unsigned long pid = (unsigned long)v - 1;
472
473 seq_printf(m, "%lu\n", pid);
474 return 0;
475}
476
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400477/* 128 should be much more than enough */
478#define PID_BUF_SIZE 127
479
480int trace_pid_write(struct trace_pid_list *filtered_pids,
481 struct trace_pid_list **new_pid_list,
482 const char __user *ubuf, size_t cnt)
483{
484 struct trace_pid_list *pid_list;
485 struct trace_parser parser;
486 unsigned long val;
487 int nr_pids = 0;
488 ssize_t read = 0;
489 ssize_t ret = 0;
490 loff_t pos;
491 pid_t pid;
492
493 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
494 return -ENOMEM;
495
496 /*
497 * Always recreate a new array. The write is an all or nothing
498 * operation. Always create a new array when adding new pids by
499 * the user. If the operation fails, then the current list is
500 * not modified.
501 */
502 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang3ddc2992019-04-19 21:22:59 -0500503 if (!pid_list) {
504 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400505 return -ENOMEM;
Wenwen Wang3ddc2992019-04-19 21:22:59 -0500506 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400507
508 pid_list->pid_max = READ_ONCE(pid_max);
509
510 /* Only truncating will shrink pid_max */
511 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
512 pid_list->pid_max = filtered_pids->pid_max;
513
514 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
515 if (!pid_list->pids) {
Wenwen Wang3ddc2992019-04-19 21:22:59 -0500516 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400517 kfree(pid_list);
518 return -ENOMEM;
519 }
520
521 if (filtered_pids) {
522 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000523 for_each_set_bit(pid, filtered_pids->pids,
524 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400525 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400526 nr_pids++;
527 }
528 }
529
530 while (cnt > 0) {
531
532 pos = 0;
533
534 ret = trace_get_user(&parser, ubuf, cnt, &pos);
535 if (ret < 0 || !trace_parser_loaded(&parser))
536 break;
537
538 read += ret;
539 ubuf += ret;
540 cnt -= ret;
541
542 parser.buffer[parser.idx] = 0;
543
544 ret = -EINVAL;
545 if (kstrtoul(parser.buffer, 0, &val))
546 break;
547 if (val >= pid_list->pid_max)
548 break;
549
550 pid = (pid_t)val;
551
552 set_bit(pid, pid_list->pids);
553 nr_pids++;
554
555 trace_parser_clear(&parser);
556 ret = 0;
557 }
558 trace_parser_put(&parser);
559
560 if (ret < 0) {
561 trace_free_pid_list(pid_list);
562 return ret;
563 }
564
565 if (!nr_pids) {
566 /* Cleared the list of pids */
567 trace_free_pid_list(pid_list);
568 read = ret;
569 pid_list = NULL;
570 }
571
572 *new_pid_list = pid_list;
573
574 return read;
575}
576
Fabian Frederickad1438a2014-04-17 21:44:42 +0200577static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400578{
579 u64 ts;
580
581 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700582 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400583 return trace_clock_local();
584
Alexander Z Lam94571582013-08-02 18:36:16 -0700585 ts = ring_buffer_time_stamp(buf->buffer, cpu);
586 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400587
588 return ts;
589}
590
Alexander Z Lam94571582013-08-02 18:36:16 -0700591cycle_t ftrace_now(int cpu)
592{
593 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
594}
595
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400596/**
597 * tracing_is_enabled - Show if global_trace has been disabled
598 *
599 * Shows if the global trace has been enabled or not. It uses the
600 * mirror flag "buffer_disabled" to be used in fast paths such as for
601 * the irqsoff tracer. But it may be inaccurate due to races. If you
602 * need to know the accurate state, use tracing_is_on() which is a little
603 * slower, but accurate.
604 */
Steven Rostedt90369902008-11-05 16:05:44 -0500605int tracing_is_enabled(void)
606{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400607 /*
608 * For quick access (irqsoff uses this in fast path), just
609 * return the mirror variable of the state of the ring buffer.
610 * It's a little racy, but we don't really care.
611 */
612 smp_rmb();
613 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500614}
615
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617 * trace_buf_size is the size in bytes that is allocated
618 * for a buffer. Note, the number of bytes is always rounded
619 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400620 *
621 * This number is purposely set to a low number of 16384.
622 * If the dump on oops happens, it will be much appreciated
623 * to not have to wait for all that output. Anyway this can be
624 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400626#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400627
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400628static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200629
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200630/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200631static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200632
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200633/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200634 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200635 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700636DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200637
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800638/*
639 * serialize the access of the ring buffer
640 *
641 * ring buffer serializes readers, but it is low level protection.
642 * The validity of the events (which returns by ring_buffer_peek() ..etc)
643 * are not protected by ring buffer.
644 *
645 * The content of events may become garbage if we allow other process consumes
646 * these events concurrently:
647 * A) the page of the consumed events may become a normal page
648 * (not reader page) in ring buffer, and this page will be rewrited
649 * by events producer.
650 * B) The page of the consumed events may become a page for splice_read,
651 * and this page will be returned to system.
652 *
653 * These primitives allow multi process access to different cpu ring buffer
654 * concurrently.
655 *
656 * These primitives don't distinguish read-only and read-consume access.
657 * Multi read-only access are also serialized.
658 */
659
660#ifdef CONFIG_SMP
661static DECLARE_RWSEM(all_cpu_access_lock);
662static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
663
664static inline void trace_access_lock(int cpu)
665{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500666 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800667 /* gain it for accessing the whole ring buffer. */
668 down_write(&all_cpu_access_lock);
669 } else {
670 /* gain it for accessing a cpu ring buffer. */
671
Steven Rostedtae3b5092013-01-23 15:22:59 -0500672 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800673 down_read(&all_cpu_access_lock);
674
675 /* Secondly block other access to this @cpu ring buffer. */
676 mutex_lock(&per_cpu(cpu_access_lock, cpu));
677 }
678}
679
680static inline void trace_access_unlock(int cpu)
681{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500682 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800683 up_write(&all_cpu_access_lock);
684 } else {
685 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
686 up_read(&all_cpu_access_lock);
687 }
688}
689
690static inline void trace_access_lock_init(void)
691{
692 int cpu;
693
694 for_each_possible_cpu(cpu)
695 mutex_init(&per_cpu(cpu_access_lock, cpu));
696}
697
698#else
699
700static DEFINE_MUTEX(access_lock);
701
702static inline void trace_access_lock(int cpu)
703{
704 (void)cpu;
705 mutex_lock(&access_lock);
706}
707
708static inline void trace_access_unlock(int cpu)
709{
710 (void)cpu;
711 mutex_unlock(&access_lock);
712}
713
714static inline void trace_access_lock_init(void)
715{
716}
717
718#endif
719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#ifdef CONFIG_STACKTRACE
721static void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400724static inline void ftrace_trace_stack(struct trace_array *tr,
725 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400726 unsigned long flags,
727 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400728
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400729#else
730static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
731 unsigned long flags,
732 int skip, int pc, struct pt_regs *regs)
733{
734}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400735static inline void ftrace_trace_stack(struct trace_array *tr,
736 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400737 unsigned long flags,
738 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400739{
740}
741
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400742#endif
743
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400744static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400745{
746 if (tr->trace_buffer.buffer)
747 ring_buffer_record_on(tr->trace_buffer.buffer);
748 /*
749 * This flag is looked at when buffers haven't been allocated
750 * yet, or by some tracers (like irqsoff), that just want to
751 * know if the ring buffer has been disabled, but it can handle
752 * races of where it gets disabled but we still do a record.
753 * As the check is in the fast path of the tracers, it is more
754 * important to be fast than accurate.
755 */
756 tr->buffer_disabled = 0;
757 /* Make the flag seen by readers */
758 smp_wmb();
759}
760
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200761/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500762 * tracing_on - enable tracing buffers
763 *
764 * This function enables tracing buffers that may have been
765 * disabled with tracing_off.
766 */
767void tracing_on(void)
768{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400769 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500770}
771EXPORT_SYMBOL_GPL(tracing_on);
772
773/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500774 * __trace_puts - write a constant string into the trace buffer.
775 * @ip: The address of the caller
776 * @str: The constant string to write
777 * @size: The size of the string.
778 */
779int __trace_puts(unsigned long ip, const char *str, int size)
780{
781 struct ring_buffer_event *event;
782 struct ring_buffer *buffer;
783 struct print_entry *entry;
784 unsigned long irq_flags;
785 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800786 int pc;
787
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400788 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800789 return 0;
790
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800791 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500792
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500793 if (unlikely(tracing_selftest_running || tracing_disabled))
794 return 0;
795
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500796 alloc = sizeof(*entry) + size + 2; /* possible \n added */
797
798 local_save_flags(irq_flags);
799 buffer = global_trace.trace_buffer.buffer;
800 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800801 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500802 if (!event)
803 return 0;
804
805 entry = ring_buffer_event_data(event);
806 entry->ip = ip;
807
808 memcpy(&entry->buf, str, size);
809
810 /* Add a newline if necessary */
811 if (entry->buf[size - 1] != '\n') {
812 entry->buf[size] = '\n';
813 entry->buf[size + 1] = '\0';
814 } else
815 entry->buf[size] = '\0';
816
817 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400818 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500819
820 return size;
821}
822EXPORT_SYMBOL_GPL(__trace_puts);
823
824/**
825 * __trace_bputs - write the pointer to a constant string into trace buffer
826 * @ip: The address of the caller
827 * @str: The constant string to write to the buffer to
828 */
829int __trace_bputs(unsigned long ip, const char *str)
830{
831 struct ring_buffer_event *event;
832 struct ring_buffer *buffer;
833 struct bputs_entry *entry;
834 unsigned long irq_flags;
835 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800836 int pc;
837
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400838 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800839 return 0;
840
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800841 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500842
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500843 if (unlikely(tracing_selftest_running || tracing_disabled))
844 return 0;
845
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500846 local_save_flags(irq_flags);
847 buffer = global_trace.trace_buffer.buffer;
848 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800849 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500850 if (!event)
851 return 0;
852
853 entry = ring_buffer_event_data(event);
854 entry->ip = ip;
855 entry->str = str;
856
857 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400858 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500859
860 return 1;
861}
862EXPORT_SYMBOL_GPL(__trace_bputs);
863
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500864#ifdef CONFIG_TRACER_SNAPSHOT
865/**
866 * trace_snapshot - take a snapshot of the current buffer.
867 *
868 * This causes a swap between the snapshot buffer and the current live
869 * tracing buffer. You can use this to take snapshots of the live
870 * trace when some condition is triggered, but continue to trace.
871 *
872 * Note, make sure to allocate the snapshot with either
873 * a tracing_snapshot_alloc(), or by doing it manually
874 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
875 *
876 * If the snapshot buffer is not allocated, it will stop tracing.
877 * Basically making a permanent snapshot.
878 */
879void tracing_snapshot(void)
880{
881 struct trace_array *tr = &global_trace;
882 struct tracer *tracer = tr->current_trace;
883 unsigned long flags;
884
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500885 if (in_nmi()) {
886 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
887 internal_trace_puts("*** snapshot is being ignored ***\n");
888 return;
889 }
890
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500891 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500892 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
893 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500894 tracing_off();
895 return;
896 }
897
898 /* Note, snapshot can not be used when the tracer uses it */
899 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500900 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
901 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500902 return;
903 }
904
905 local_irq_save(flags);
906 update_max_tr(tr, current, smp_processor_id());
907 local_irq_restore(flags);
908}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500909EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500910
911static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
912 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400913static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
914
915static int alloc_snapshot(struct trace_array *tr)
916{
917 int ret;
918
919 if (!tr->allocated_snapshot) {
920
921 /* allocate spare buffer */
922 ret = resize_buffer_duplicate_size(&tr->max_buffer,
923 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
924 if (ret < 0)
925 return ret;
926
927 tr->allocated_snapshot = true;
928 }
929
930 return 0;
931}
932
Fabian Frederickad1438a2014-04-17 21:44:42 +0200933static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400934{
935 /*
936 * We don't free the ring buffer. instead, resize it because
937 * The max_tr ring buffer has some state (e.g. ring->clock) and
938 * we want preserve it.
939 */
940 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
941 set_buffer_entries(&tr->max_buffer, 1);
942 tracing_reset_online_cpus(&tr->max_buffer);
943 tr->allocated_snapshot = false;
944}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500945
946/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500947 * tracing_alloc_snapshot - allocate snapshot buffer.
948 *
949 * This only allocates the snapshot buffer if it isn't already
950 * allocated - it doesn't also take a snapshot.
951 *
952 * This is meant to be used in cases where the snapshot buffer needs
953 * to be set up for events that can't sleep but need to be able to
954 * trigger a snapshot.
955 */
956int tracing_alloc_snapshot(void)
957{
958 struct trace_array *tr = &global_trace;
959 int ret;
960
961 ret = alloc_snapshot(tr);
962 WARN_ON(ret < 0);
963
964 return ret;
965}
966EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
967
968/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500969 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
970 *
971 * This is similar to trace_snapshot(), but it will allocate the
972 * snapshot buffer if it isn't already allocated. Use this only
973 * where it is safe to sleep, as the allocation may sleep.
974 *
975 * This causes a swap between the snapshot buffer and the current live
976 * tracing buffer. You can use this to take snapshots of the live
977 * trace when some condition is triggered, but continue to trace.
978 */
979void tracing_snapshot_alloc(void)
980{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500981 int ret;
982
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500983 ret = tracing_alloc_snapshot();
984 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400985 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500986
987 tracing_snapshot();
988}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500989EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500990#else
991void tracing_snapshot(void)
992{
993 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
994}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500995EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500996int tracing_alloc_snapshot(void)
997{
998 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
999 return -ENODEV;
1000}
1001EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001002void tracing_snapshot_alloc(void)
1003{
1004 /* Give warning */
1005 tracing_snapshot();
1006}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001007EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001008#endif /* CONFIG_TRACER_SNAPSHOT */
1009
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -04001010static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001011{
1012 if (tr->trace_buffer.buffer)
1013 ring_buffer_record_off(tr->trace_buffer.buffer);
1014 /*
1015 * This flag is looked at when buffers haven't been allocated
1016 * yet, or by some tracers (like irqsoff), that just want to
1017 * know if the ring buffer has been disabled, but it can handle
1018 * races of where it gets disabled but we still do a record.
1019 * As the check is in the fast path of the tracers, it is more
1020 * important to be fast than accurate.
1021 */
1022 tr->buffer_disabled = 1;
1023 /* Make the flag seen by readers */
1024 smp_wmb();
1025}
1026
Steven Rostedt499e5472012-02-22 15:50:28 -05001027/**
1028 * tracing_off - turn off tracing buffers
1029 *
1030 * This function stops the tracing buffers from recording data.
1031 * It does not disable any overhead the tracers themselves may
1032 * be causing. This function simply causes all recording to
1033 * the ring buffers to fail.
1034 */
1035void tracing_off(void)
1036{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001037 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001038}
1039EXPORT_SYMBOL_GPL(tracing_off);
1040
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001041void disable_trace_on_warning(void)
1042{
1043 if (__disable_trace_on_warning)
1044 tracing_off();
1045}
1046
Steven Rostedt499e5472012-02-22 15:50:28 -05001047/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001048 * tracer_tracing_is_on - show real state of ring buffer enabled
1049 * @tr : the trace array to know if ring buffer is enabled
1050 *
1051 * Shows real state of the ring buffer if it is enabled or not.
1052 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001053int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001054{
1055 if (tr->trace_buffer.buffer)
1056 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1057 return !tr->buffer_disabled;
1058}
1059
Steven Rostedt499e5472012-02-22 15:50:28 -05001060/**
1061 * tracing_is_on - show state of ring buffers enabled
1062 */
1063int tracing_is_on(void)
1064{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001065 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001066}
1067EXPORT_SYMBOL_GPL(tracing_is_on);
1068
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001070{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001071 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001072
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073 if (!str)
1074 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001075 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001076 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001077 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001078 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001079 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001080 return 1;
1081}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001082__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001083
Tim Bird0e950172010-02-25 15:36:43 -08001084static int __init set_tracing_thresh(char *str)
1085{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001086 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001087 int ret;
1088
1089 if (!str)
1090 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001091 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001092 if (ret < 0)
1093 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001094 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001095 return 1;
1096}
1097__setup("tracing_thresh=", set_tracing_thresh);
1098
Steven Rostedt57f50be2008-05-12 21:20:44 +02001099unsigned long nsecs_to_usecs(unsigned long nsecs)
1100{
1101 return nsecs / 1000;
1102}
1103
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001104/*
1105 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1106 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1107 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1108 * of strings in the order that the enums were defined.
1109 */
1110#undef C
1111#define C(a, b) b
1112
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001114static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001115 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001116 NULL
1117};
1118
Zhaolei5079f322009-08-25 16:12:56 +08001119static struct {
1120 u64 (*func)(void);
1121 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001122 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001123} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001124 { trace_clock_local, "local", 1 },
1125 { trace_clock_global, "global", 1 },
1126 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001127 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001128 { trace_clock, "perf", 1 },
1129 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001130 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001131 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001132};
1133
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001134/*
1135 * trace_parser_get_init - gets the buffer for trace parser
1136 */
1137int trace_parser_get_init(struct trace_parser *parser, int size)
1138{
1139 memset(parser, 0, sizeof(*parser));
1140
1141 parser->buffer = kmalloc(size, GFP_KERNEL);
1142 if (!parser->buffer)
1143 return 1;
1144
1145 parser->size = size;
1146 return 0;
1147}
1148
1149/*
1150 * trace_parser_put - frees the buffer for trace parser
1151 */
1152void trace_parser_put(struct trace_parser *parser)
1153{
1154 kfree(parser->buffer);
1155}
1156
1157/*
1158 * trace_get_user - reads the user input string separated by space
1159 * (matched by isspace(ch))
1160 *
1161 * For each string found the 'struct trace_parser' is updated,
1162 * and the function returns.
1163 *
1164 * Returns number of bytes read.
1165 *
1166 * See kernel/trace/trace.h for 'struct trace_parser' details.
1167 */
1168int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1169 size_t cnt, loff_t *ppos)
1170{
1171 char ch;
1172 size_t read = 0;
1173 ssize_t ret;
1174
1175 if (!*ppos)
1176 trace_parser_clear(parser);
1177
1178 ret = get_user(ch, ubuf++);
1179 if (ret)
1180 goto out;
1181
1182 read++;
1183 cnt--;
1184
1185 /*
1186 * The parser is not finished with the last write,
1187 * continue reading the user input without skipping spaces.
1188 */
1189 if (!parser->cont) {
1190 /* skip white space */
1191 while (cnt && isspace(ch)) {
1192 ret = get_user(ch, ubuf++);
1193 if (ret)
1194 goto out;
1195 read++;
1196 cnt--;
1197 }
1198
1199 /* only spaces were written */
1200 if (isspace(ch)) {
1201 *ppos += read;
1202 ret = read;
1203 goto out;
1204 }
1205
1206 parser->idx = 0;
1207 }
1208
1209 /* read the non-space input */
1210 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001211 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001212 parser->buffer[parser->idx++] = ch;
1213 else {
1214 ret = -EINVAL;
1215 goto out;
1216 }
1217 ret = get_user(ch, ubuf++);
1218 if (ret)
1219 goto out;
1220 read++;
1221 cnt--;
1222 }
1223
1224 /* We either got finished input or we have to wait for another call. */
1225 if (isspace(ch)) {
1226 parser->buffer[parser->idx] = 0;
1227 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001228 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001229 parser->cont = true;
1230 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001231 } else {
1232 ret = -EINVAL;
1233 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001234 }
1235
1236 *ppos += read;
1237 ret = read;
1238
1239out:
1240 return ret;
1241}
1242
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001243/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001244static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001245{
1246 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001247
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001248 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001249 return -EBUSY;
1250
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001251 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001252 if (cnt > len)
1253 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001254 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001255
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001256 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001257 return cnt;
1258}
1259
Tim Bird0e950172010-02-25 15:36:43 -08001260unsigned long __read_mostly tracing_thresh;
1261
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001262#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001263/*
1264 * Copy the new maximum trace into the separate maximum-trace
1265 * structure. (this way the maximum trace is permanently saved,
1266 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1267 */
1268static void
1269__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1270{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001271 struct trace_buffer *trace_buf = &tr->trace_buffer;
1272 struct trace_buffer *max_buf = &tr->max_buffer;
1273 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1274 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001275
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001276 max_buf->cpu = cpu;
1277 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001278
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001279 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001280 max_data->critical_start = data->critical_start;
1281 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001282
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001283 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001284 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001285 /*
1286 * If tsk == current, then use current_uid(), as that does not use
1287 * RCU. The irq tracer can be called out of RCU scope.
1288 */
1289 if (tsk == current)
1290 max_data->uid = current_uid();
1291 else
1292 max_data->uid = task_uid(tsk);
1293
Steven Rostedt8248ac02009-09-02 12:27:41 -04001294 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1295 max_data->policy = tsk->policy;
1296 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001297
1298 /* record this tasks comm */
1299 tracing_record_cmdline(tsk);
1300}
1301
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001302/**
1303 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1304 * @tr: tracer
1305 * @tsk: the task with the latency
1306 * @cpu: The cpu that initiated the trace.
1307 *
1308 * Flip the buffers between the @tr and the max_tr and record information
1309 * about which task was the cause of this latency.
1310 */
Ingo Molnare309b412008-05-12 21:20:51 +02001311void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001312update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1313{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001314 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001315
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001316 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001317 return;
1318
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001319 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001320
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001321 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001322 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001323 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001324 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001325 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001326
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001327 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001328
Masami Hiramatsua26030a2018-07-14 01:28:15 +09001329 /* Inherit the recordable setting from trace_buffer */
1330 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1331 ring_buffer_record_on(tr->max_buffer.buffer);
1332 else
1333 ring_buffer_record_off(tr->max_buffer.buffer);
1334
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001335 buf = tr->trace_buffer.buffer;
1336 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1337 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001338
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001339 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001340 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001341}
1342
1343/**
1344 * update_max_tr_single - only copy one trace over, and reset the rest
1345 * @tr - tracer
1346 * @tsk - task with the latency
1347 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001348 *
1349 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001350 */
Ingo Molnare309b412008-05-12 21:20:51 +02001351void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001352update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001354 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001356 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001357 return;
1358
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001359 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001360 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001361 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001362 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001363 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001364 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001365
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001366 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001367
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001368 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001369
Steven Rostedte8165db2009-09-03 19:13:05 -04001370 if (ret == -EBUSY) {
1371 /*
1372 * We failed to swap the buffer due to a commit taking
1373 * place on this CPU. We fail to record, but we reset
1374 * the max trace buffer (no one writes directly to it)
1375 * and flag that it failed.
1376 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001377 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001378 "Failed to swap buffers due to commit in progress\n");
1379 }
1380
Steven Rostedte8165db2009-09-03 19:13:05 -04001381 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001382
1383 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001384 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001385}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001386#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001387
Rabin Vincente30f53a2014-11-10 19:46:34 +01001388static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001389{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001390 /* Iterators are static, they should be filled or empty */
1391 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001392 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001393
Rabin Vincente30f53a2014-11-10 19:46:34 +01001394 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1395 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001396}
1397
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001398#ifdef CONFIG_FTRACE_STARTUP_TEST
1399static int run_tracer_selftest(struct tracer *type)
1400{
1401 struct trace_array *tr = &global_trace;
1402 struct tracer *saved_tracer = tr->current_trace;
1403 int ret;
1404
1405 if (!type->selftest || tracing_selftest_disabled)
1406 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001407
1408 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001409 * Run a selftest on this tracer.
1410 * Here we reset the trace buffer, and set the current
1411 * tracer to be this tracer. The tracer can then run some
1412 * internal tracing to verify that everything is in order.
1413 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001414 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001415 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001416
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001417 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001418
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001419#ifdef CONFIG_TRACER_MAX_TRACE
1420 if (type->use_max_tr) {
1421 /* If we expanded the buffers, make sure the max is expanded too */
1422 if (ring_buffer_expanded)
1423 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1424 RING_BUFFER_ALL_CPUS);
1425 tr->allocated_snapshot = true;
1426 }
1427#endif
1428
1429 /* the test is responsible for initializing and enabling */
1430 pr_info("Testing tracer %s: ", type->name);
1431 ret = type->selftest(type, tr);
1432 /* the test is responsible for resetting too */
1433 tr->current_trace = saved_tracer;
1434 if (ret) {
1435 printk(KERN_CONT "FAILED!\n");
1436 /* Add the warning after printing 'FAILED' */
1437 WARN_ON(1);
1438 return -1;
1439 }
1440 /* Only reset on passing, to avoid touching corrupted buffers */
1441 tracing_reset_online_cpus(&tr->trace_buffer);
1442
1443#ifdef CONFIG_TRACER_MAX_TRACE
1444 if (type->use_max_tr) {
1445 tr->allocated_snapshot = false;
1446
1447 /* Shrink the max buffer again */
1448 if (ring_buffer_expanded)
1449 ring_buffer_resize(tr->max_buffer.buffer, 1,
1450 RING_BUFFER_ALL_CPUS);
1451 }
1452#endif
1453
1454 printk(KERN_CONT "PASSED\n");
1455 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001456}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001457#else
1458static inline int run_tracer_selftest(struct tracer *type)
1459{
1460 return 0;
1461}
1462#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001463
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001464static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1465
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001466static void __init apply_trace_boot_options(void);
1467
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001468/**
1469 * register_tracer - register a tracer with the ftrace system.
1470 * @type - the plugin for the tracer
1471 *
1472 * Register a new plugin tracer.
1473 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001474int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001475{
1476 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001477 int ret = 0;
1478
1479 if (!type->name) {
1480 pr_info("Tracer must have a name\n");
1481 return -1;
1482 }
1483
Dan Carpenter24a461d2010-07-10 12:06:44 +02001484 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001485 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1486 return -1;
1487 }
1488
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001489 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001490
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001491 tracing_selftest_running = true;
1492
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001493 for (t = trace_types; t; t = t->next) {
1494 if (strcmp(type->name, t->name) == 0) {
1495 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001496 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497 type->name);
1498 ret = -1;
1499 goto out;
1500 }
1501 }
1502
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001503 if (!type->set_flag)
1504 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001505 if (!type->flags) {
1506 /*allocate a dummy tracer_flags*/
1507 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001508 if (!type->flags) {
1509 ret = -ENOMEM;
1510 goto out;
1511 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001512 type->flags->val = 0;
1513 type->flags->opts = dummy_tracer_opt;
1514 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001515 if (!type->flags->opts)
1516 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001517
Chunyu Hud39cdd22016-03-08 21:37:01 +08001518 /* store the tracer for __set_tracer_option */
1519 type->flags->trace = type;
1520
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001521 ret = run_tracer_selftest(type);
1522 if (ret < 0)
1523 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001524
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525 type->next = trace_types;
1526 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001527 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001528
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001529 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001530 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001531 mutex_unlock(&trace_types_lock);
1532
Steven Rostedtdac74942009-02-05 01:13:38 -05001533 if (ret || !default_bootup_tracer)
1534 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001535
Li Zefanee6c2c12009-09-18 14:06:47 +08001536 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001537 goto out_unlock;
1538
1539 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1540 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001541 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001542 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001543
1544 apply_trace_boot_options();
1545
Steven Rostedtdac74942009-02-05 01:13:38 -05001546 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001547 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001548#ifdef CONFIG_FTRACE_STARTUP_TEST
1549 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1550 type->name);
1551#endif
1552
1553 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001554 return ret;
1555}
1556
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001557void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001558{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001559 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001560
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001561 if (!buffer)
1562 return;
1563
Steven Rostedtf6339032009-09-04 12:35:16 -04001564 ring_buffer_record_disable(buffer);
1565
1566 /* Make sure all commits have finished */
1567 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001568 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001569
1570 ring_buffer_record_enable(buffer);
1571}
1572
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001573void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001574{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001575 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001576 int cpu;
1577
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001578 if (!buffer)
1579 return;
1580
Steven Rostedt621968c2009-09-04 12:02:35 -04001581 ring_buffer_record_disable(buffer);
1582
1583 /* Make sure all commits have finished */
1584 synchronize_sched();
1585
Alexander Z Lam94571582013-08-02 18:36:16 -07001586 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001587
1588 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001589 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001590
1591 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001592}
1593
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001594/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001595void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001596{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001597 struct trace_array *tr;
1598
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001599 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001600 tracing_reset_online_cpus(&tr->trace_buffer);
1601#ifdef CONFIG_TRACER_MAX_TRACE
1602 tracing_reset_online_cpus(&tr->max_buffer);
1603#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001604 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001605}
1606
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001607#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001608#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001609static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001610struct saved_cmdlines_buffer {
1611 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1612 unsigned *map_cmdline_to_pid;
1613 unsigned cmdline_num;
1614 int cmdline_idx;
1615 char *saved_cmdlines;
1616};
1617static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001618
Steven Rostedt25b0b442008-05-12 21:21:00 +02001619/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001620static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001621
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001622static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001623{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001624 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1625}
1626
1627static inline void set_cmdline(int idx, const char *cmdline)
1628{
1629 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1630}
1631
1632static int allocate_cmdlines_buffer(unsigned int val,
1633 struct saved_cmdlines_buffer *s)
1634{
1635 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1636 GFP_KERNEL);
1637 if (!s->map_cmdline_to_pid)
1638 return -ENOMEM;
1639
1640 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1641 if (!s->saved_cmdlines) {
1642 kfree(s->map_cmdline_to_pid);
1643 return -ENOMEM;
1644 }
1645
1646 s->cmdline_idx = 0;
1647 s->cmdline_num = val;
1648 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1649 sizeof(s->map_pid_to_cmdline));
1650 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1651 val * sizeof(*s->map_cmdline_to_pid));
1652
1653 return 0;
1654}
1655
1656static int trace_create_savedcmd(void)
1657{
1658 int ret;
1659
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001660 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001661 if (!savedcmd)
1662 return -ENOMEM;
1663
1664 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1665 if (ret < 0) {
1666 kfree(savedcmd);
1667 savedcmd = NULL;
1668 return -ENOMEM;
1669 }
1670
1671 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001672}
1673
Carsten Emdeb5130b12009-09-13 01:43:07 +02001674int is_tracing_stopped(void)
1675{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001676 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001677}
1678
Steven Rostedt0f048702008-11-05 16:05:44 -05001679/**
1680 * tracing_start - quick start of the tracer
1681 *
1682 * If tracing is enabled but was stopped by tracing_stop,
1683 * this will start the tracer back up.
1684 */
1685void tracing_start(void)
1686{
1687 struct ring_buffer *buffer;
1688 unsigned long flags;
1689
1690 if (tracing_disabled)
1691 return;
1692
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001693 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1694 if (--global_trace.stop_count) {
1695 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001696 /* Someone screwed up their debugging */
1697 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001698 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001699 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001700 goto out;
1701 }
1702
Steven Rostedta2f80712010-03-12 19:56:00 -05001703 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001704 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001705
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001706 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001707 if (buffer)
1708 ring_buffer_record_enable(buffer);
1709
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001710#ifdef CONFIG_TRACER_MAX_TRACE
1711 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001712 if (buffer)
1713 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001714#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001715
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001716 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001717
Steven Rostedt0f048702008-11-05 16:05:44 -05001718 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001719 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1720}
1721
1722static void tracing_start_tr(struct trace_array *tr)
1723{
1724 struct ring_buffer *buffer;
1725 unsigned long flags;
1726
1727 if (tracing_disabled)
1728 return;
1729
1730 /* If global, we need to also start the max tracer */
1731 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1732 return tracing_start();
1733
1734 raw_spin_lock_irqsave(&tr->start_lock, flags);
1735
1736 if (--tr->stop_count) {
1737 if (tr->stop_count < 0) {
1738 /* Someone screwed up their debugging */
1739 WARN_ON_ONCE(1);
1740 tr->stop_count = 0;
1741 }
1742 goto out;
1743 }
1744
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001745 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001746 if (buffer)
1747 ring_buffer_record_enable(buffer);
1748
1749 out:
1750 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001751}
1752
1753/**
1754 * tracing_stop - quick stop of the tracer
1755 *
1756 * Light weight way to stop tracing. Use in conjunction with
1757 * tracing_start.
1758 */
1759void tracing_stop(void)
1760{
1761 struct ring_buffer *buffer;
1762 unsigned long flags;
1763
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001764 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1765 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001766 goto out;
1767
Steven Rostedta2f80712010-03-12 19:56:00 -05001768 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001769 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001770
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001771 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001772 if (buffer)
1773 ring_buffer_record_disable(buffer);
1774
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001775#ifdef CONFIG_TRACER_MAX_TRACE
1776 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001777 if (buffer)
1778 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001779#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001780
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001781 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001782
Steven Rostedt0f048702008-11-05 16:05:44 -05001783 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001784 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1785}
1786
1787static void tracing_stop_tr(struct trace_array *tr)
1788{
1789 struct ring_buffer *buffer;
1790 unsigned long flags;
1791
1792 /* If global, we need to also stop the max tracer */
1793 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1794 return tracing_stop();
1795
1796 raw_spin_lock_irqsave(&tr->start_lock, flags);
1797 if (tr->stop_count++)
1798 goto out;
1799
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001800 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001801 if (buffer)
1802 ring_buffer_record_disable(buffer);
1803
1804 out:
1805 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001806}
1807
Ingo Molnare309b412008-05-12 21:20:51 +02001808void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001809
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001810static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001811{
Carsten Emdea635cf02009-03-18 09:00:41 +01001812 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001813
1814 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001815 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001816
1817 /*
1818 * It's not the end of the world if we don't get
1819 * the lock, but we also don't want to spin
1820 * nor do we want to disable interrupts,
1821 * so if we miss here, then better luck next time.
1822 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001823 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001824 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001825
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001826 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001827 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001828 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001829
Carsten Emdea635cf02009-03-18 09:00:41 +01001830 /*
1831 * Check whether the cmdline buffer at idx has a pid
1832 * mapped. We are going to overwrite that entry so we
1833 * need to clear the map_pid_to_cmdline. Otherwise we
1834 * would read the new comm for the old pid.
1835 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001836 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001837 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001838 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001839
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001840 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1841 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001842
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001843 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001844 }
1845
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001846 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001847
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001848 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001849
1850 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001851}
1852
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001853static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001854{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001855 unsigned map;
1856
Steven Rostedt4ca530852009-03-16 19:20:15 -04001857 if (!pid) {
1858 strcpy(comm, "<idle>");
1859 return;
1860 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001861
Steven Rostedt74bf4072010-01-25 15:11:53 -05001862 if (WARN_ON_ONCE(pid < 0)) {
1863 strcpy(comm, "<XXX>");
1864 return;
1865 }
1866
Steven Rostedt4ca530852009-03-16 19:20:15 -04001867 if (pid > PID_MAX_DEFAULT) {
1868 strcpy(comm, "<...>");
1869 return;
1870 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001871
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001872 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001873 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001874 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001875 else
1876 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001877}
1878
1879void trace_find_cmdline(int pid, char comm[])
1880{
1881 preempt_disable();
1882 arch_spin_lock(&trace_cmdline_lock);
1883
1884 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001885
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001886 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001887 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001888}
1889
Ingo Molnare309b412008-05-12 21:20:51 +02001890void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001891{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001892 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001893 return;
1894
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001895 if (!__this_cpu_read(trace_cmdline_save))
1896 return;
1897
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001898 if (trace_save_cmdline(tsk))
1899 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001900}
1901
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001902void
Steven Rostedt38697052008-10-01 13:14:09 -04001903tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1904 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001905{
1906 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001907
Steven Rostedt777e2082008-09-29 23:02:42 -04001908 entry->preempt_count = pc & 0xff;
1909 entry->pid = (tsk) ? tsk->pid : 0;
1910 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001911#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001912 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001913#else
1914 TRACE_FLAG_IRQS_NOSUPPORT |
1915#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01001916 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001917 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondeti04e002a2016-12-09 21:50:17 +05301918 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001919 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1920 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001921}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001922EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001923
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001924static __always_inline void
1925trace_event_setup(struct ring_buffer_event *event,
1926 int type, unsigned long flags, int pc)
1927{
1928 struct trace_entry *ent = ring_buffer_event_data(event);
1929
1930 tracing_generic_entry_update(ent, flags, pc);
1931 ent->type = type;
1932}
1933
Steven Rostedte77405a2009-09-02 14:17:06 -04001934struct ring_buffer_event *
1935trace_buffer_lock_reserve(struct ring_buffer *buffer,
1936 int type,
1937 unsigned long len,
1938 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001939{
1940 struct ring_buffer_event *event;
1941
Steven Rostedte77405a2009-09-02 14:17:06 -04001942 event = ring_buffer_lock_reserve(buffer, len);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001943 if (event != NULL)
1944 trace_event_setup(event, type, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001945
1946 return event;
1947}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001948
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001949DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1950DEFINE_PER_CPU(int, trace_buffered_event_cnt);
1951static int trace_buffered_event_ref;
1952
1953/**
1954 * trace_buffered_event_enable - enable buffering events
1955 *
1956 * When events are being filtered, it is quicker to use a temporary
1957 * buffer to write the event data into if there's a likely chance
1958 * that it will not be committed. The discard of the ring buffer
1959 * is not as fast as committing, and is much slower than copying
1960 * a commit.
1961 *
1962 * When an event is to be filtered, allocate per cpu buffers to
1963 * write the event data into, and if the event is filtered and discarded
1964 * it is simply dropped, otherwise, the entire data is to be committed
1965 * in one shot.
1966 */
1967void trace_buffered_event_enable(void)
1968{
1969 struct ring_buffer_event *event;
1970 struct page *page;
1971 int cpu;
1972
1973 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
1974
1975 if (trace_buffered_event_ref++)
1976 return;
1977
1978 for_each_tracing_cpu(cpu) {
1979 page = alloc_pages_node(cpu_to_node(cpu),
1980 GFP_KERNEL | __GFP_NORETRY, 0);
1981 if (!page)
1982 goto failed;
1983
1984 event = page_address(page);
1985 memset(event, 0, sizeof(*event));
1986
1987 per_cpu(trace_buffered_event, cpu) = event;
1988
1989 preempt_disable();
1990 if (cpu == smp_processor_id() &&
1991 this_cpu_read(trace_buffered_event) !=
1992 per_cpu(trace_buffered_event, cpu))
1993 WARN_ON_ONCE(1);
1994 preempt_enable();
1995 }
1996
1997 return;
1998 failed:
1999 trace_buffered_event_disable();
2000}
2001
2002static void enable_trace_buffered_event(void *data)
2003{
2004 /* Probably not needed, but do it anyway */
2005 smp_rmb();
2006 this_cpu_dec(trace_buffered_event_cnt);
2007}
2008
2009static void disable_trace_buffered_event(void *data)
2010{
2011 this_cpu_inc(trace_buffered_event_cnt);
2012}
2013
2014/**
2015 * trace_buffered_event_disable - disable buffering events
2016 *
2017 * When a filter is removed, it is faster to not use the buffered
2018 * events, and to commit directly into the ring buffer. Free up
2019 * the temp buffers when there are no more users. This requires
2020 * special synchronization with current events.
2021 */
2022void trace_buffered_event_disable(void)
2023{
2024 int cpu;
2025
2026 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2027
2028 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2029 return;
2030
2031 if (--trace_buffered_event_ref)
2032 return;
2033
2034 preempt_disable();
2035 /* For each CPU, set the buffer as used. */
2036 smp_call_function_many(tracing_buffer_mask,
2037 disable_trace_buffered_event, NULL, 1);
2038 preempt_enable();
2039
2040 /* Wait for all current users to finish */
2041 synchronize_sched();
2042
2043 for_each_tracing_cpu(cpu) {
2044 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2045 per_cpu(trace_buffered_event, cpu) = NULL;
2046 }
2047 /*
2048 * Make sure trace_buffered_event is NULL before clearing
2049 * trace_buffered_event_cnt.
2050 */
2051 smp_wmb();
2052
2053 preempt_disable();
2054 /* Do the work on each cpu */
2055 smp_call_function_many(tracing_buffer_mask,
2056 enable_trace_buffered_event, NULL, 1);
2057 preempt_enable();
2058}
2059
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002060void
2061__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
2062{
2063 __this_cpu_write(trace_cmdline_save, true);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002064
2065 /* If this is the temp buffer, we need to commit fully */
2066 if (this_cpu_read(trace_buffered_event) == event) {
2067 /* Length is in event->array[0] */
2068 ring_buffer_write(buffer, event->array[0], &event->array[1]);
2069 /* Release the temp buffer */
2070 this_cpu_dec(trace_buffered_event_cnt);
2071 } else
2072 ring_buffer_unlock_commit(buffer, event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002073}
2074
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002075static struct ring_buffer *temp_buffer;
2076
Steven Rostedtef5580d2009-02-27 19:38:04 -05002077struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002078trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002079 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002080 int type, unsigned long len,
2081 unsigned long flags, int pc)
2082{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002083 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002084 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002085
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002086 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002087
2088 if ((trace_file->flags &
2089 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2090 (entry = this_cpu_read(trace_buffered_event))) {
2091 /* Try to use the per cpu buffer first */
2092 val = this_cpu_inc_return(trace_buffered_event_cnt);
2093 if (val == 1) {
2094 trace_event_setup(entry, type, flags, pc);
2095 entry->array[0] = len;
2096 return entry;
2097 }
2098 this_cpu_dec(trace_buffered_event_cnt);
2099 }
2100
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002101 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002102 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002103 /*
2104 * If tracing is off, but we have triggers enabled
2105 * we still need to look at the event data. Use the temp_buffer
2106 * to store the trace event for the tigger to use. It's recusive
2107 * safe and will not be recorded anywhere.
2108 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002109 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002110 *current_rb = temp_buffer;
2111 entry = trace_buffer_lock_reserve(*current_rb,
2112 type, len, flags, pc);
2113 }
2114 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002115}
2116EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2117
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002118void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2119 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002120 struct ring_buffer_event *event,
2121 unsigned long flags, int pc,
2122 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002123{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002124 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002125
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002126 /*
2127 * If regs is not set, then skip the following callers:
2128 * trace_buffer_unlock_commit_regs
2129 * event_trigger_unlock_commit
2130 * trace_event_buffer_commit
2131 * trace_event_raw_event_sched_switch
2132 * Note, we can still get here via blktrace, wakeup tracer
2133 * and mmiotrace, but that's ok if they lose a function or
2134 * two. They are that meaningful.
2135 */
2136 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002137 ftrace_trace_userstack(buffer, flags, pc);
2138}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002139
Ingo Molnare309b412008-05-12 21:20:51 +02002140void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002141trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002142 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2143 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002144{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002145 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002146 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002147 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002148 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002149
Steven Rostedte77405a2009-09-02 14:17:06 -04002150 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002151 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002152 if (!event)
2153 return;
2154 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002155 entry->ip = ip;
2156 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002157
Tom Zanussif306cc82013-10-24 08:34:17 -05002158 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002159 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002160}
2161
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002162#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002163
2164#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2165struct ftrace_stack {
2166 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2167};
2168
2169static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2170static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2171
Steven Rostedte77405a2009-09-02 14:17:06 -04002172static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002173 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002174 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002175{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002176 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002177 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002178 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002179 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002180 int use_stack;
2181 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002182
2183 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002184 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002185
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002186 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002187 * Add two, for this function and the call to save_stack_trace()
2188 * If regs is set, then these functions will not be in the way.
2189 */
2190 if (!regs)
2191 trace.skip += 2;
2192
2193 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002194 * Since events can happen in NMIs there's no safe way to
2195 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2196 * or NMI comes in, it will just have to use the default
2197 * FTRACE_STACK_SIZE.
2198 */
2199 preempt_disable_notrace();
2200
Shan Wei82146522012-11-19 13:21:01 +08002201 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002202 /*
2203 * We don't need any atomic variables, just a barrier.
2204 * If an interrupt comes in, we don't care, because it would
2205 * have exited and put the counter back to what we want.
2206 * We just need a barrier to keep gcc from moving things
2207 * around.
2208 */
2209 barrier();
2210 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002211 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002212 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2213
2214 if (regs)
2215 save_stack_trace_regs(regs, &trace);
2216 else
2217 save_stack_trace(&trace);
2218
2219 if (trace.nr_entries > size)
2220 size = trace.nr_entries;
2221 } else
2222 /* From now on, use_stack is a boolean */
2223 use_stack = 0;
2224
2225 size *= sizeof(unsigned long);
2226
2227 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
2228 sizeof(*entry) + size, flags, pc);
2229 if (!event)
2230 goto out;
2231 entry = ring_buffer_event_data(event);
2232
2233 memset(&entry->caller, 0, size);
2234
2235 if (use_stack)
2236 memcpy(&entry->caller, trace.entries,
2237 trace.nr_entries * sizeof(unsigned long));
2238 else {
2239 trace.max_entries = FTRACE_STACK_ENTRIES;
2240 trace.entries = entry->caller;
2241 if (regs)
2242 save_stack_trace_regs(regs, &trace);
2243 else
2244 save_stack_trace(&trace);
2245 }
2246
2247 entry->size = trace.nr_entries;
2248
Tom Zanussif306cc82013-10-24 08:34:17 -05002249 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002250 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002251
2252 out:
2253 /* Again, don't let gcc optimize things here */
2254 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002255 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002256 preempt_enable_notrace();
2257
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002258}
2259
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002260static inline void ftrace_trace_stack(struct trace_array *tr,
2261 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002262 unsigned long flags,
2263 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002264{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002265 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002266 return;
2267
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002268 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002269}
2270
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002271void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2272 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002273{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002274 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04002275}
2276
Steven Rostedt03889382009-12-11 09:48:22 -05002277/**
2278 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002279 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002280 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002281void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002282{
2283 unsigned long flags;
2284
2285 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002286 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002287
2288 local_save_flags(flags);
2289
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002290 /*
2291 * Skip 3 more, seems to get us at the caller of
2292 * this function.
2293 */
2294 skip += 3;
2295 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2296 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002297}
2298
Steven Rostedt91e86e52010-11-10 12:56:12 +01002299static DEFINE_PER_CPU(int, user_stack_count);
2300
Steven Rostedte77405a2009-09-02 14:17:06 -04002301void
2302ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002303{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002304 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002305 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002306 struct userstack_entry *entry;
2307 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002308
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002309 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002310 return;
2311
Steven Rostedtb6345872010-03-12 20:03:30 -05002312 /*
2313 * NMIs can not handle page faults, even with fix ups.
2314 * The save user stack can (and often does) fault.
2315 */
2316 if (unlikely(in_nmi()))
2317 return;
2318
Steven Rostedt91e86e52010-11-10 12:56:12 +01002319 /*
2320 * prevent recursion, since the user stack tracing may
2321 * trigger other kernel events.
2322 */
2323 preempt_disable();
2324 if (__this_cpu_read(user_stack_count))
2325 goto out;
2326
2327 __this_cpu_inc(user_stack_count);
2328
Steven Rostedte77405a2009-09-02 14:17:06 -04002329 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002330 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002331 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002332 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002333 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002334
Steven Rostedt48659d32009-09-11 11:36:23 -04002335 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002336 memset(&entry->caller, 0, sizeof(entry->caller));
2337
2338 trace.nr_entries = 0;
2339 trace.max_entries = FTRACE_STACK_ENTRIES;
2340 trace.skip = 0;
2341 trace.entries = entry->caller;
2342
2343 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002344 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002345 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002346
Li Zefan1dbd1952010-12-09 15:47:56 +08002347 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002348 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002349 out:
2350 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002351}
2352
Hannes Eder4fd27352009-02-10 19:44:12 +01002353#ifdef UNUSED
2354static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002355{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002356 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002357}
Hannes Eder4fd27352009-02-10 19:44:12 +01002358#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002359
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002360#endif /* CONFIG_STACKTRACE */
2361
Steven Rostedt07d777f2011-09-22 14:01:55 -04002362/* created for use with alloc_percpu */
2363struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002364 int nesting;
2365 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002366};
2367
2368static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002369
2370/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002371 * Thise allows for lockless recording. If we're nested too deeply, then
2372 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002373 */
2374static char *get_trace_buf(void)
2375{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002376 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002377
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002378 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002379 return NULL;
2380
Steven Rostedt (VMware)96cf9182017-09-05 11:32:01 -04002381 buffer->nesting++;
2382
2383 /* Interrupts must see nesting incremented before we use the buffer */
2384 barrier();
2385 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002386}
2387
2388static void put_trace_buf(void)
2389{
Steven Rostedt (VMware)96cf9182017-09-05 11:32:01 -04002390 /* Don't let the decrement of nesting leak before this */
2391 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002392 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002393}
2394
2395static int alloc_percpu_trace_buffer(void)
2396{
2397 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002398
2399 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002400 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2401 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002402
2403 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002404 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002405}
2406
Steven Rostedt81698832012-10-11 10:15:05 -04002407static int buffers_allocated;
2408
Steven Rostedt07d777f2011-09-22 14:01:55 -04002409void trace_printk_init_buffers(void)
2410{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002411 if (buffers_allocated)
2412 return;
2413
2414 if (alloc_percpu_trace_buffer())
2415 return;
2416
Steven Rostedt2184db42014-05-28 13:14:40 -04002417 /* trace_printk() is for debug use only. Don't use it in production. */
2418
Joe Perchesa395d6a2016-03-22 14:28:09 -07002419 pr_warn("\n");
2420 pr_warn("**********************************************************\n");
2421 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2422 pr_warn("** **\n");
2423 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2424 pr_warn("** **\n");
2425 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2426 pr_warn("** unsafe for production use. **\n");
2427 pr_warn("** **\n");
2428 pr_warn("** If you see this message and you are not debugging **\n");
2429 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2430 pr_warn("** **\n");
2431 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2432 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002433
Steven Rostedtb382ede62012-10-10 21:44:34 -04002434 /* Expand the buffers to set size */
2435 tracing_update_buffers();
2436
Steven Rostedt07d777f2011-09-22 14:01:55 -04002437 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002438
2439 /*
2440 * trace_printk_init_buffers() can be called by modules.
2441 * If that happens, then we need to start cmdline recording
2442 * directly here. If the global_trace.buffer is already
2443 * allocated here, then this was called by module code.
2444 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002445 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002446 tracing_start_cmdline_record();
2447}
2448
2449void trace_printk_start_comm(void)
2450{
2451 /* Start tracing comms if trace printk is set */
2452 if (!buffers_allocated)
2453 return;
2454 tracing_start_cmdline_record();
2455}
2456
2457static void trace_printk_start_stop_comm(int enabled)
2458{
2459 if (!buffers_allocated)
2460 return;
2461
2462 if (enabled)
2463 tracing_start_cmdline_record();
2464 else
2465 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002466}
2467
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002468/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002469 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002470 *
2471 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002472int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002473{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002474 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002475 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002476 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002477 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002478 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002479 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002480 char *tbuffer;
2481 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002482
2483 if (unlikely(tracing_selftest_running || tracing_disabled))
2484 return 0;
2485
2486 /* Don't pollute graph traces with trace_vprintk internals */
2487 pause_graph_tracing();
2488
2489 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002490 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002491
Steven Rostedt07d777f2011-09-22 14:01:55 -04002492 tbuffer = get_trace_buf();
2493 if (!tbuffer) {
2494 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002495 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002496 }
2497
2498 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2499
2500 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002501 goto out;
2502
Steven Rostedt07d777f2011-09-22 14:01:55 -04002503 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002504 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002505 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002506 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2507 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002508 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002509 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002510 entry = ring_buffer_event_data(event);
2511 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002512 entry->fmt = fmt;
2513
Steven Rostedt07d777f2011-09-22 14:01:55 -04002514 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002515 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002516 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002517 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002518 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002519
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002520out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002521 put_trace_buf();
2522
2523out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002524 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002525 unpause_graph_tracing();
2526
2527 return len;
2528}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002529EXPORT_SYMBOL_GPL(trace_vbprintk);
2530
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002531__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002532static int
2533__trace_array_vprintk(struct ring_buffer *buffer,
2534 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002535{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002536 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002537 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002538 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002539 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002540 unsigned long flags;
2541 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002542
2543 if (tracing_disabled || tracing_selftest_running)
2544 return 0;
2545
Steven Rostedt07d777f2011-09-22 14:01:55 -04002546 /* Don't pollute graph traces with trace_vprintk internals */
2547 pause_graph_tracing();
2548
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002549 pc = preempt_count();
2550 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002551
Steven Rostedt07d777f2011-09-22 14:01:55 -04002552
2553 tbuffer = get_trace_buf();
2554 if (!tbuffer) {
2555 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002556 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002557 }
2558
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002559 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002560
Steven Rostedt07d777f2011-09-22 14:01:55 -04002561 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002562 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002563 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002564 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002565 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002566 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002567 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002568 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002569
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002570 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002571 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002572 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002573 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002574 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002575
2576out:
2577 put_trace_buf();
2578
2579out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002580 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002581 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002582
2583 return len;
2584}
Steven Rostedt659372d2009-09-03 19:11:07 -04002585
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002586__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002587int trace_array_vprintk(struct trace_array *tr,
2588 unsigned long ip, const char *fmt, va_list args)
2589{
2590 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2591}
2592
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002593__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002594int trace_array_printk(struct trace_array *tr,
2595 unsigned long ip, const char *fmt, ...)
2596{
2597 int ret;
2598 va_list ap;
2599
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002601 return 0;
2602
2603 va_start(ap, fmt);
2604 ret = trace_array_vprintk(tr, ip, fmt, ap);
2605 va_end(ap);
2606 return ret;
2607}
2608
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002609__printf(3, 4)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002610int trace_array_printk_buf(struct ring_buffer *buffer,
2611 unsigned long ip, const char *fmt, ...)
2612{
2613 int ret;
2614 va_list ap;
2615
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002616 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002617 return 0;
2618
2619 va_start(ap, fmt);
2620 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2621 va_end(ap);
2622 return ret;
2623}
2624
Mathieu Malaterrebca139f2018-03-08 21:58:43 +01002625__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04002626int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2627{
Steven Rostedta813a152009-10-09 01:41:35 -04002628 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002629}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002630EXPORT_SYMBOL_GPL(trace_vprintk);
2631
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002632static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002633{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002634 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2635
Steven Rostedt5a90f572008-09-03 17:42:51 -04002636 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002637 if (buf_iter)
2638 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002639}
2640
Ingo Molnare309b412008-05-12 21:20:51 +02002641static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002642peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2643 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002644{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002645 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002646 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002647
Steven Rostedtd7690412008-10-01 00:29:53 -04002648 if (buf_iter)
2649 event = ring_buffer_iter_peek(buf_iter, ts);
2650 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002651 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002652 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002653
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002654 if (event) {
2655 iter->ent_size = ring_buffer_event_length(event);
2656 return ring_buffer_event_data(event);
2657 }
2658 iter->ent_size = 0;
2659 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002660}
Steven Rostedtd7690412008-10-01 00:29:53 -04002661
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002662static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002663__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2664 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002665{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002666 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002667 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002668 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002669 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002670 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002671 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002672 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002673 int cpu;
2674
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002675 /*
2676 * If we are in a per_cpu trace file, don't bother by iterating over
2677 * all cpu and peek directly.
2678 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002679 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002680 if (ring_buffer_empty_cpu(buffer, cpu_file))
2681 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002682 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002683 if (ent_cpu)
2684 *ent_cpu = cpu_file;
2685
2686 return ent;
2687 }
2688
Steven Rostedtab464282008-05-12 21:21:00 +02002689 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002690
2691 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002692 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002693
Steven Rostedtbc21b472010-03-31 19:49:26 -04002694 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002695
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002696 /*
2697 * Pick the entry with the smallest timestamp:
2698 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002699 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002700 next = ent;
2701 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002702 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002703 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002704 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002705 }
2706 }
2707
Steven Rostedt12b5da32012-03-27 10:43:28 -04002708 iter->ent_size = next_size;
2709
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002710 if (ent_cpu)
2711 *ent_cpu = next_cpu;
2712
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002713 if (ent_ts)
2714 *ent_ts = next_ts;
2715
Steven Rostedtbc21b472010-03-31 19:49:26 -04002716 if (missing_events)
2717 *missing_events = next_lost;
2718
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002719 return next;
2720}
2721
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002722/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002723struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2724 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002725{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002726 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002727}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002728
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002729/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002730void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002731{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002732 iter->ent = __find_next_entry(iter, &iter->cpu,
2733 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002734
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002735 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002736 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002737
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002738 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002739}
2740
Ingo Molnare309b412008-05-12 21:20:51 +02002741static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002742{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002743 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002744 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002745}
2746
Ingo Molnare309b412008-05-12 21:20:51 +02002747static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002748{
2749 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002750 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002751 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002752
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002753 WARN_ON_ONCE(iter->leftover);
2754
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002755 (*pos)++;
2756
2757 /* can't go backwards */
2758 if (iter->idx > i)
2759 return NULL;
2760
2761 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002762 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002763 else
2764 ent = iter;
2765
2766 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002767 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002768
2769 iter->pos = *pos;
2770
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002771 return ent;
2772}
2773
Jason Wessel955b61e2010-08-05 09:22:23 -05002774void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002775{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002776 struct ring_buffer_event *event;
2777 struct ring_buffer_iter *buf_iter;
2778 unsigned long entries = 0;
2779 u64 ts;
2780
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002781 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002782
Steven Rostedt6d158a82012-06-27 20:46:14 -04002783 buf_iter = trace_buffer_iter(iter, cpu);
2784 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002785 return;
2786
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002787 ring_buffer_iter_reset(buf_iter);
2788
2789 /*
2790 * We could have the case with the max latency tracers
2791 * that a reset never took place on a cpu. This is evident
2792 * by the timestamp being before the start of the buffer.
2793 */
2794 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002795 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002796 break;
2797 entries++;
2798 ring_buffer_read(buf_iter, NULL);
2799 }
2800
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002801 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002802}
2803
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002804/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002805 * The current tracer is copied to avoid a global locking
2806 * all around.
2807 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002808static void *s_start(struct seq_file *m, loff_t *pos)
2809{
2810 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002811 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002812 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002813 void *p = NULL;
2814 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002815 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002816
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002817 /*
2818 * copy the tracer to avoid using a global lock all around.
2819 * iter->trace is a copy of current_trace, the pointer to the
2820 * name may be used instead of a strcmp(), as iter->trace->name
2821 * will point to the same string as current_trace->name.
2822 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002823 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002824 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2825 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002826 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002827
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002828#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002829 if (iter->snapshot && iter->trace->use_max_tr)
2830 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002831#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002832
2833 if (!iter->snapshot)
2834 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002835
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002836 if (*pos != iter->pos) {
2837 iter->ent = NULL;
2838 iter->cpu = 0;
2839 iter->idx = -1;
2840
Steven Rostedtae3b5092013-01-23 15:22:59 -05002841 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002842 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002843 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002844 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002845 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002846
Lai Jiangshanac91d852010-03-02 17:54:50 +08002847 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002848 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2849 ;
2850
2851 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002852 /*
2853 * If we overflowed the seq_file before, then we want
2854 * to just reuse the trace_seq buffer again.
2855 */
2856 if (iter->leftover)
2857 p = iter;
2858 else {
2859 l = *pos - 1;
2860 p = s_next(m, p, &l);
2861 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002862 }
2863
Lai Jiangshan4f535962009-05-18 19:35:34 +08002864 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002865 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002866 return p;
2867}
2868
2869static void s_stop(struct seq_file *m, void *p)
2870{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002871 struct trace_iterator *iter = m->private;
2872
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002873#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002874 if (iter->snapshot && iter->trace->use_max_tr)
2875 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002876#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002877
2878 if (!iter->snapshot)
2879 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002880
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002881 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002882 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002883}
2884
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002885static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002886get_total_entries(struct trace_buffer *buf,
2887 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002888{
2889 unsigned long count;
2890 int cpu;
2891
2892 *total = 0;
2893 *entries = 0;
2894
2895 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002896 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002897 /*
2898 * If this buffer has skipped entries, then we hold all
2899 * entries for the trace and we need to ignore the
2900 * ones before the time stamp.
2901 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002902 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2903 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002904 /* total is the same as the entries */
2905 *total += count;
2906 } else
2907 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002908 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002909 *entries += count;
2910 }
2911}
2912
Ingo Molnare309b412008-05-12 21:20:51 +02002913static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002914{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002915 seq_puts(m, "# _------=> CPU# \n"
2916 "# / _-----=> irqs-off \n"
2917 "# | / _----=> need-resched \n"
2918 "# || / _---=> hardirq/softirq \n"
2919 "# ||| / _--=> preempt-depth \n"
2920 "# |||| / delay \n"
2921 "# cmd pid ||||| time | caller \n"
2922 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002923}
2924
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002925static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002926{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002927 unsigned long total;
2928 unsigned long entries;
2929
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002930 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002931 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2932 entries, total, num_online_cpus());
2933 seq_puts(m, "#\n");
2934}
2935
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002936static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002937{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002938 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002939 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2940 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002941}
2942
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002943static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002944{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002945 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002946 seq_puts(m, "# _-----=> irqs-off\n"
2947 "# / _----=> need-resched\n"
2948 "# | / _---=> hardirq/softirq\n"
2949 "# || / _--=> preempt-depth\n"
2950 "# ||| / delay\n"
2951 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2952 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002953}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002954
Jiri Olsa62b915f2010-04-02 19:01:22 +02002955void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002956print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2957{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002958 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002959 struct trace_buffer *buf = iter->trace_buffer;
2960 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002961 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002962 unsigned long entries;
2963 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002964 const char *name = "preemption";
2965
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002966 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002967
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002968 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002969
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002970 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002971 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002972 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002973 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002974 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002975 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002976 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002977 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002978 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002979 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002980#if defined(CONFIG_PREEMPT_NONE)
2981 "server",
2982#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2983 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002984#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002985 "preempt",
2986#else
2987 "unknown",
2988#endif
2989 /* These are reserved for later use */
2990 0, 0, 0, 0);
2991#ifdef CONFIG_SMP
2992 seq_printf(m, " #P:%d)\n", num_online_cpus());
2993#else
2994 seq_puts(m, ")\n");
2995#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002996 seq_puts(m, "# -----------------\n");
2997 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002998 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002999 data->comm, data->pid,
3000 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003001 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003002 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003003
3004 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003005 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003006 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3007 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003008 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003009 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3010 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003011 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003012 }
3013
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003014 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003015}
3016
Steven Rostedta3097202008-11-07 22:36:02 -05003017static void test_cpu_buff_start(struct trace_iterator *iter)
3018{
3019 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003020 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003021
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003022 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003023 return;
3024
3025 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3026 return;
3027
Matthias Kaehlcke8bd71282017-04-21 16:41:10 -07003028 if (cpumask_available(iter->started) &&
3029 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003030 return;
3031
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003032 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003033 return;
3034
Matthias Kaehlcke8bd71282017-04-21 16:41:10 -07003035 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003036 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003037
3038 /* Don't print started cpu buffer for the first entry of the trace */
3039 if (iter->idx > 1)
3040 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3041 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003042}
3043
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003044static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003045{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003046 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003047 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003048 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003049 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003050 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003051
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003052 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003053
Steven Rostedta3097202008-11-07 22:36:02 -05003054 test_cpu_buff_start(iter);
3055
Steven Rostedtf633cef2008-12-23 23:24:13 -05003056 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003057
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003058 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003059 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3060 trace_print_lat_context(iter);
3061 else
3062 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003063 }
3064
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003065 if (trace_seq_has_overflowed(s))
3066 return TRACE_TYPE_PARTIAL_LINE;
3067
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003068 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003069 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003070
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003071 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003072
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003073 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003074}
3075
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003076static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003077{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003078 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003079 struct trace_seq *s = &iter->seq;
3080 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003081 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003082
3083 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003084
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003085 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003086 trace_seq_printf(s, "%d %d %llu ",
3087 entry->pid, iter->cpu, iter->ts);
3088
3089 if (trace_seq_has_overflowed(s))
3090 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003091
Steven Rostedtf633cef2008-12-23 23:24:13 -05003092 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003093 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003094 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003095
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003096 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003097
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003098 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003099}
3100
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003101static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003102{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003103 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003104 struct trace_seq *s = &iter->seq;
3105 unsigned char newline = '\n';
3106 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003107 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003108
3109 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003110
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003111 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003112 SEQ_PUT_HEX_FIELD(s, entry->pid);
3113 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3114 SEQ_PUT_HEX_FIELD(s, iter->ts);
3115 if (trace_seq_has_overflowed(s))
3116 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003117 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003118
Steven Rostedtf633cef2008-12-23 23:24:13 -05003119 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003120 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003121 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003122 if (ret != TRACE_TYPE_HANDLED)
3123 return ret;
3124 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003125
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003126 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003127
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003128 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003129}
3130
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003131static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003132{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003133 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003134 struct trace_seq *s = &iter->seq;
3135 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003136 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003137
3138 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003139
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003140 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003141 SEQ_PUT_FIELD(s, entry->pid);
3142 SEQ_PUT_FIELD(s, iter->cpu);
3143 SEQ_PUT_FIELD(s, iter->ts);
3144 if (trace_seq_has_overflowed(s))
3145 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003146 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003147
Steven Rostedtf633cef2008-12-23 23:24:13 -05003148 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003149 return event ? event->funcs->binary(iter, 0, event) :
3150 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003151}
3152
Jiri Olsa62b915f2010-04-02 19:01:22 +02003153int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003154{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003155 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003156 int cpu;
3157
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003158 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003159 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003160 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003161 buf_iter = trace_buffer_iter(iter, cpu);
3162 if (buf_iter) {
3163 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003164 return 0;
3165 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003166 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003167 return 0;
3168 }
3169 return 1;
3170 }
3171
Steven Rostedtab464282008-05-12 21:21:00 +02003172 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003173 buf_iter = trace_buffer_iter(iter, cpu);
3174 if (buf_iter) {
3175 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003176 return 0;
3177 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003178 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003179 return 0;
3180 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003181 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003182
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003183 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003184}
3185
Lai Jiangshan4f535962009-05-18 19:35:34 +08003186/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003187enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003188{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003189 struct trace_array *tr = iter->tr;
3190 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003191 enum print_line_t ret;
3192
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003193 if (iter->lost_events) {
3194 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3195 iter->cpu, iter->lost_events);
3196 if (trace_seq_has_overflowed(&iter->seq))
3197 return TRACE_TYPE_PARTIAL_LINE;
3198 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003199
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003200 if (iter->trace && iter->trace->print_line) {
3201 ret = iter->trace->print_line(iter);
3202 if (ret != TRACE_TYPE_UNHANDLED)
3203 return ret;
3204 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003205
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003206 if (iter->ent->type == TRACE_BPUTS &&
3207 trace_flags & TRACE_ITER_PRINTK &&
3208 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3209 return trace_print_bputs_msg_only(iter);
3210
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003211 if (iter->ent->type == TRACE_BPRINT &&
3212 trace_flags & TRACE_ITER_PRINTK &&
3213 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003214 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003215
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003216 if (iter->ent->type == TRACE_PRINT &&
3217 trace_flags & TRACE_ITER_PRINTK &&
3218 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003219 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003220
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003221 if (trace_flags & TRACE_ITER_BIN)
3222 return print_bin_fmt(iter);
3223
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003224 if (trace_flags & TRACE_ITER_HEX)
3225 return print_hex_fmt(iter);
3226
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003227 if (trace_flags & TRACE_ITER_RAW)
3228 return print_raw_fmt(iter);
3229
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003230 return print_trace_fmt(iter);
3231}
3232
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003233void trace_latency_header(struct seq_file *m)
3234{
3235 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003236 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003237
3238 /* print nothing if the buffers are empty */
3239 if (trace_empty(iter))
3240 return;
3241
3242 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3243 print_trace_header(m, iter);
3244
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003245 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003246 print_lat_help_header(m);
3247}
3248
Jiri Olsa62b915f2010-04-02 19:01:22 +02003249void trace_default_header(struct seq_file *m)
3250{
3251 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003252 struct trace_array *tr = iter->tr;
3253 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003254
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003255 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3256 return;
3257
Jiri Olsa62b915f2010-04-02 19:01:22 +02003258 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3259 /* print nothing if the buffers are empty */
3260 if (trace_empty(iter))
3261 return;
3262 print_trace_header(m, iter);
3263 if (!(trace_flags & TRACE_ITER_VERBOSE))
3264 print_lat_help_header(m);
3265 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003266 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3267 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003268 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003269 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003270 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003271 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003272 }
3273}
3274
Steven Rostedte0a413f2011-09-29 21:26:16 -04003275static void test_ftrace_alive(struct seq_file *m)
3276{
3277 if (!ftrace_is_dead())
3278 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003279 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3280 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003281}
3282
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003283#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003284static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003285{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003286 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3287 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3288 "# Takes a snapshot of the main buffer.\n"
3289 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3290 "# (Doesn't have to be '2' works with any number that\n"
3291 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003292}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003293
3294static void show_snapshot_percpu_help(struct seq_file *m)
3295{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003296 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003297#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003298 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3299 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003300#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003301 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3302 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003303#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003304 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3305 "# (Doesn't have to be '2' works with any number that\n"
3306 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003307}
3308
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003309static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3310{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003311 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003312 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003313 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003314 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003315
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003316 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003317 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3318 show_snapshot_main_help(m);
3319 else
3320 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003321}
3322#else
3323/* Should never be called */
3324static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3325#endif
3326
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003327static int s_show(struct seq_file *m, void *v)
3328{
3329 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003330 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003331
3332 if (iter->ent == NULL) {
3333 if (iter->tr) {
3334 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3335 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003336 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003337 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003338 if (iter->snapshot && trace_empty(iter))
3339 print_snapshot_help(m, iter);
3340 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003341 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003342 else
3343 trace_default_header(m);
3344
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003345 } else if (iter->leftover) {
3346 /*
3347 * If we filled the seq_file buffer earlier, we
3348 * want to just show it now.
3349 */
3350 ret = trace_print_seq(m, &iter->seq);
3351
3352 /* ret should this time be zero, but you never know */
3353 iter->leftover = ret;
3354
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003355 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003356 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003357 ret = trace_print_seq(m, &iter->seq);
3358 /*
3359 * If we overflow the seq_file buffer, then it will
3360 * ask us for this data again at start up.
3361 * Use that instead.
3362 * ret is 0 if seq_file write succeeded.
3363 * -1 otherwise.
3364 */
3365 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003366 }
3367
3368 return 0;
3369}
3370
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003371/*
3372 * Should be used after trace_array_get(), trace_types_lock
3373 * ensures that i_cdev was already initialized.
3374 */
3375static inline int tracing_get_cpu(struct inode *inode)
3376{
3377 if (inode->i_cdev) /* See trace_create_cpu_file() */
3378 return (long)inode->i_cdev - 1;
3379 return RING_BUFFER_ALL_CPUS;
3380}
3381
James Morris88e9d342009-09-22 16:43:43 -07003382static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003383 .start = s_start,
3384 .next = s_next,
3385 .stop = s_stop,
3386 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003387};
3388
Ingo Molnare309b412008-05-12 21:20:51 +02003389static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003390__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003391{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003392 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003393 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003394 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003395
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003396 if (tracing_disabled)
3397 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003398
Jiri Olsa50e18b92012-04-25 10:23:39 +02003399 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003400 if (!iter)
3401 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003402
Gil Fruchter72917232015-06-09 10:32:35 +03003403 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003404 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003405 if (!iter->buffer_iter)
3406 goto release;
3407
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003408 /*
3409 * We make a copy of the current tracer to avoid concurrent
3410 * changes on it while we are reading.
3411 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003412 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003413 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003414 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003415 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003416
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003417 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003418
Li Zefan79f55992009-06-15 14:58:26 +08003419 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003420 goto fail;
3421
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003422 iter->tr = tr;
3423
3424#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003425 /* Currently only the top directory has a snapshot */
3426 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003427 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003428 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003429#endif
3430 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003431 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003432 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003433 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003434 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003436 /* Notify the tracer early; before we stop tracing. */
3437 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003438 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003439
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003440 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003441 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003442 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3443
David Sharp8be07092012-11-13 12:18:22 -08003444 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003445 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003446 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3447
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003448 /* stop the trace while dumping if we are not opening "snapshot" */
3449 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003450 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003451
Steven Rostedtae3b5092013-01-23 15:22:59 -05003452 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003453 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003454 iter->buffer_iter[cpu] =
Douglas Anderson3085d412019-03-08 11:32:04 -08003455 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3456 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07003457 }
3458 ring_buffer_read_prepare_sync();
3459 for_each_tracing_cpu(cpu) {
3460 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003461 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003462 }
3463 } else {
3464 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003465 iter->buffer_iter[cpu] =
Douglas Anderson3085d412019-03-08 11:32:04 -08003466 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3467 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07003468 ring_buffer_read_prepare_sync();
3469 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003470 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003471 }
3472
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003473 mutex_unlock(&trace_types_lock);
3474
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003475 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003476
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003477 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003478 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003479 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003480 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003481release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003482 seq_release_private(inode, file);
3483 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003484}
3485
3486int tracing_open_generic(struct inode *inode, struct file *filp)
3487{
Steven Rostedt60a11772008-05-12 21:20:44 +02003488 if (tracing_disabled)
3489 return -ENODEV;
3490
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003491 filp->private_data = inode->i_private;
3492 return 0;
3493}
3494
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003495bool tracing_is_disabled(void)
3496{
3497 return (tracing_disabled) ? true: false;
3498}
3499
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003500/*
3501 * Open and update trace_array ref count.
3502 * Must have the current trace_array passed to it.
3503 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003504static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003505{
3506 struct trace_array *tr = inode->i_private;
3507
3508 if (tracing_disabled)
3509 return -ENODEV;
3510
3511 if (trace_array_get(tr) < 0)
3512 return -ENODEV;
3513
3514 filp->private_data = inode->i_private;
3515
3516 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003517}
3518
Hannes Eder4fd27352009-02-10 19:44:12 +01003519static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003520{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003521 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003522 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003523 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003524 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003525
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003526 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003527 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003528 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003529 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003530
Oleg Nesterov6484c712013-07-23 17:26:10 +02003531 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003532 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003534
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003535 for_each_tracing_cpu(cpu) {
3536 if (iter->buffer_iter[cpu])
3537 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3538 }
3539
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003540 if (iter->trace && iter->trace->close)
3541 iter->trace->close(iter);
3542
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003543 if (!iter->snapshot)
3544 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003545 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003546
3547 __trace_array_put(tr);
3548
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003549 mutex_unlock(&trace_types_lock);
3550
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003551 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003552 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003553 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003554 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003555 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003556
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003557 return 0;
3558}
3559
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003560static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3561{
3562 struct trace_array *tr = inode->i_private;
3563
3564 trace_array_put(tr);
3565 return 0;
3566}
3567
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003568static int tracing_single_release_tr(struct inode *inode, struct file *file)
3569{
3570 struct trace_array *tr = inode->i_private;
3571
3572 trace_array_put(tr);
3573
3574 return single_release(inode, file);
3575}
3576
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003577static int tracing_open(struct inode *inode, struct file *file)
3578{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003579 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003580 struct trace_iterator *iter;
3581 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003582
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003583 if (trace_array_get(tr) < 0)
3584 return -ENODEV;
3585
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003586 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003587 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3588 int cpu = tracing_get_cpu(inode);
Bo Yan5fb4be22017-09-18 10:03:35 -07003589 struct trace_buffer *trace_buf = &tr->trace_buffer;
3590
3591#ifdef CONFIG_TRACER_MAX_TRACE
3592 if (tr->current_trace->print_max)
3593 trace_buf = &tr->max_buffer;
3594#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02003595
3596 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan5fb4be22017-09-18 10:03:35 -07003597 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003598 else
Bo Yan5fb4be22017-09-18 10:03:35 -07003599 tracing_reset(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003600 }
3601
3602 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003603 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003604 if (IS_ERR(iter))
3605 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003606 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003607 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3608 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003609
3610 if (ret < 0)
3611 trace_array_put(tr);
3612
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003613 return ret;
3614}
3615
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003616/*
3617 * Some tracers are not suitable for instance buffers.
3618 * A tracer is always available for the global array (toplevel)
3619 * or if it explicitly states that it is.
3620 */
3621static bool
3622trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3623{
3624 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3625}
3626
3627/* Find the next tracer that this trace array may use */
3628static struct tracer *
3629get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3630{
3631 while (t && !trace_ok_for_array(t, tr))
3632 t = t->next;
3633
3634 return t;
3635}
3636
Ingo Molnare309b412008-05-12 21:20:51 +02003637static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003638t_next(struct seq_file *m, void *v, loff_t *pos)
3639{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003640 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003641 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003642
3643 (*pos)++;
3644
3645 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003646 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003647
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003648 return t;
3649}
3650
3651static void *t_start(struct seq_file *m, loff_t *pos)
3652{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003653 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003654 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003655 loff_t l = 0;
3656
3657 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003658
3659 t = get_tracer_for_array(tr, trace_types);
3660 for (; t && l < *pos; t = t_next(m, t, &l))
3661 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003662
3663 return t;
3664}
3665
3666static void t_stop(struct seq_file *m, void *p)
3667{
3668 mutex_unlock(&trace_types_lock);
3669}
3670
3671static int t_show(struct seq_file *m, void *v)
3672{
3673 struct tracer *t = v;
3674
3675 if (!t)
3676 return 0;
3677
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003678 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003679 if (t->next)
3680 seq_putc(m, ' ');
3681 else
3682 seq_putc(m, '\n');
3683
3684 return 0;
3685}
3686
James Morris88e9d342009-09-22 16:43:43 -07003687static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003688 .start = t_start,
3689 .next = t_next,
3690 .stop = t_stop,
3691 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003692};
3693
3694static int show_traces_open(struct inode *inode, struct file *file)
3695{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003696 struct trace_array *tr = inode->i_private;
3697 struct seq_file *m;
3698 int ret;
3699
Steven Rostedt60a11772008-05-12 21:20:44 +02003700 if (tracing_disabled)
3701 return -ENODEV;
3702
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003703 ret = seq_open(file, &show_traces_seq_ops);
3704 if (ret)
3705 return ret;
3706
3707 m = file->private_data;
3708 m->private = tr;
3709
3710 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003711}
3712
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003713static ssize_t
3714tracing_write_stub(struct file *filp, const char __user *ubuf,
3715 size_t count, loff_t *ppos)
3716{
3717 return count;
3718}
3719
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003720loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003721{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003722 int ret;
3723
Slava Pestov364829b2010-11-24 15:13:16 -08003724 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003725 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003726 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003727 file->f_pos = ret = 0;
3728
3729 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003730}
3731
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003732static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003733 .open = tracing_open,
3734 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003735 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003736 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003737 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003738};
3739
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003740static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003741 .open = show_traces_open,
3742 .read = seq_read,
3743 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003744 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003745};
3746
3747static ssize_t
3748tracing_cpumask_read(struct file *filp, char __user *ubuf,
3749 size_t count, loff_t *ppos)
3750{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003751 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Dud760f902017-11-30 11:39:43 +08003752 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003753 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003754
Changbin Dud760f902017-11-30 11:39:43 +08003755 len = snprintf(NULL, 0, "%*pb\n",
3756 cpumask_pr_args(tr->tracing_cpumask)) + 1;
3757 mask_str = kmalloc(len, GFP_KERNEL);
3758 if (!mask_str)
3759 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003760
Changbin Dud760f902017-11-30 11:39:43 +08003761 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08003762 cpumask_pr_args(tr->tracing_cpumask));
3763 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003764 count = -EINVAL;
3765 goto out_err;
3766 }
Changbin Dud760f902017-11-30 11:39:43 +08003767 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003768
3769out_err:
Changbin Dud760f902017-11-30 11:39:43 +08003770 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003771
3772 return count;
3773}
3774
3775static ssize_t
3776tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3777 size_t count, loff_t *ppos)
3778{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003779 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303780 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003781 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303782
3783 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3784 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003785
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303786 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003787 if (err)
3788 goto err_unlock;
3789
Steven Rostedta5e25882008-12-02 15:34:05 -05003790 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003791 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003792 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003793 /*
3794 * Increase/decrease the disabled counter if we are
3795 * about to flip a bit in the cpumask:
3796 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003797 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303798 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003799 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3800 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003801 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003802 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303803 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003804 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3805 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003806 }
3807 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003808 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003809 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003810
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003811 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303812 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003813
Ingo Molnarc7078de2008-05-12 21:20:52 +02003814 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003815
3816err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003817 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003818
3819 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003820}
3821
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003822static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003823 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003824 .read = tracing_cpumask_read,
3825 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003826 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003827 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003828};
3829
Li Zefanfdb372e2009-12-08 11:15:59 +08003830static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003831{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003832 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003833 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003834 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003835 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003836
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003837 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003838 tracer_flags = tr->current_trace->flags->val;
3839 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003840
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003841 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003842 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003843 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003844 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003845 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003846 }
3847
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003848 for (i = 0; trace_opts[i].name; i++) {
3849 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003850 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003851 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003852 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003853 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003854 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003855
Li Zefanfdb372e2009-12-08 11:15:59 +08003856 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003857}
3858
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003859static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003860 struct tracer_flags *tracer_flags,
3861 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003862{
Chunyu Hud39cdd22016-03-08 21:37:01 +08003863 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003864 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003865
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003866 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003867 if (ret)
3868 return ret;
3869
3870 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003871 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003872 else
Zhaolei77708412009-08-07 18:53:21 +08003873 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003874 return 0;
3875}
3876
Li Zefan8d18eaa2009-12-08 11:17:06 +08003877/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003878static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003879{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003880 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003881 struct tracer_flags *tracer_flags = trace->flags;
3882 struct tracer_opt *opts = NULL;
3883 int i;
3884
3885 for (i = 0; tracer_flags->opts[i].name; i++) {
3886 opts = &tracer_flags->opts[i];
3887
3888 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003889 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003890 }
3891
3892 return -EINVAL;
3893}
3894
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003895/* Some tracers require overwrite to stay enabled */
3896int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3897{
3898 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3899 return -1;
3900
3901 return 0;
3902}
3903
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003904int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003905{
3906 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003907 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003908 return 0;
3909
3910 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003911 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003912 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003913 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003914
3915 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003916 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003917 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003918 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003919
3920 if (mask == TRACE_ITER_RECORD_CMD)
3921 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003922
Steven Rostedtc37775d2016-04-13 16:59:18 -04003923 if (mask == TRACE_ITER_EVENT_FORK)
3924 trace_event_follow_fork(tr, enabled);
3925
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003926 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003927 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003928#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003929 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003930#endif
3931 }
Steven Rostedt81698832012-10-11 10:15:05 -04003932
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003933 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04003934 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003935 trace_printk_control(enabled);
3936 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003937
3938 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003939}
3940
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003941static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003942{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003943 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003944 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003945 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003946 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003947 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003948
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003949 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003950
Li Zefan8d18eaa2009-12-08 11:17:06 +08003951 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003952 neg = 1;
3953 cmp += 2;
3954 }
3955
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003956 mutex_lock(&trace_types_lock);
3957
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003958 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003959 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003960 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003961 break;
3962 }
3963 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003964
3965 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003966 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003967 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003968
3969 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003970
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003971 /*
3972 * If the first trailing whitespace is replaced with '\0' by strstrip,
3973 * turn it back into a space.
3974 */
3975 if (orig_len > strlen(option))
3976 option[strlen(option)] = ' ';
3977
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003978 return ret;
3979}
3980
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003981static void __init apply_trace_boot_options(void)
3982{
3983 char *buf = trace_boot_options_buf;
3984 char *option;
3985
3986 while (true) {
3987 option = strsep(&buf, ",");
3988
3989 if (!option)
3990 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003991
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05003992 if (*option)
3993 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003994
3995 /* Put back the comma to allow this to be called again */
3996 if (buf)
3997 *(buf - 1) = ',';
3998 }
3999}
4000
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004001static ssize_t
4002tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4003 size_t cnt, loff_t *ppos)
4004{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004005 struct seq_file *m = filp->private_data;
4006 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004007 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004008 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004009
4010 if (cnt >= sizeof(buf))
4011 return -EINVAL;
4012
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004013 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004014 return -EFAULT;
4015
Steven Rostedta8dd2172013-01-09 20:54:17 -05004016 buf[cnt] = 0;
4017
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004018 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004019 if (ret < 0)
4020 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004021
Jiri Olsacf8517c2009-10-23 19:36:16 -04004022 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004023
4024 return cnt;
4025}
4026
Li Zefanfdb372e2009-12-08 11:15:59 +08004027static int tracing_trace_options_open(struct inode *inode, struct file *file)
4028{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004029 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004030 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004031
Li Zefanfdb372e2009-12-08 11:15:59 +08004032 if (tracing_disabled)
4033 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004034
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004035 if (trace_array_get(tr) < 0)
4036 return -ENODEV;
4037
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004038 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4039 if (ret < 0)
4040 trace_array_put(tr);
4041
4042 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004043}
4044
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004045static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004046 .open = tracing_trace_options_open,
4047 .read = seq_read,
4048 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004049 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004050 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004051};
4052
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004053static const char readme_msg[] =
4054 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004055 "# echo 0 > tracing_on : quick way to disable tracing\n"
4056 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4057 " Important files:\n"
4058 " trace\t\t\t- The static contents of the buffer\n"
4059 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4060 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4061 " current_tracer\t- function and latency tracers\n"
4062 " available_tracers\t- list of configured tracers for current_tracer\n"
4063 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4064 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4065 " trace_clock\t\t-change the clock used to order events\n"
4066 " local: Per cpu clock but may not be synced across CPUs\n"
4067 " global: Synced across CPUs but slows tracing down.\n"
4068 " counter: Not a clock, but just an increment\n"
4069 " uptime: Jiffy counter from time of boot\n"
4070 " perf: Same clock that perf events use\n"
4071#ifdef CONFIG_X86_64
4072 " x86-tsc: TSC cycle counter\n"
4073#endif
4074 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4075 " tracing_cpumask\t- Limit which CPUs to trace\n"
4076 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4077 "\t\t\t Remove sub-buffer with rmdir\n"
4078 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004079 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4080 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004081 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004082#ifdef CONFIG_DYNAMIC_FTRACE
4083 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004084 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4085 "\t\t\t functions\n"
4086 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4087 "\t modules: Can select a group via module\n"
4088 "\t Format: :mod:<module-name>\n"
4089 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4090 "\t triggers: a command to perform when function is hit\n"
4091 "\t Format: <function>:<trigger>[:count]\n"
4092 "\t trigger: traceon, traceoff\n"
4093 "\t\t enable_event:<system>:<event>\n"
4094 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004095#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004096 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004097#endif
4098#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004099 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004100#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004101 "\t\t dump\n"
4102 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004103 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4104 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4105 "\t The first one will disable tracing every time do_fault is hit\n"
4106 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4107 "\t The first time do trap is hit and it disables tracing, the\n"
4108 "\t counter will decrement to 2. If tracing is already disabled,\n"
4109 "\t the counter will not decrement. It only decrements when the\n"
4110 "\t trigger did work\n"
4111 "\t To remove trigger without count:\n"
4112 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4113 "\t To remove trigger with a count:\n"
4114 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004115 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004116 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4117 "\t modules: Can select a group via module command :mod:\n"
4118 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004119#endif /* CONFIG_DYNAMIC_FTRACE */
4120#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004121 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4122 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004123#endif
4124#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4125 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004126 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004127 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4128#endif
4129#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004130 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4131 "\t\t\t snapshot buffer. Read the contents for more\n"
4132 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004133#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004134#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004135 " stack_trace\t\t- Shows the max stack trace when active\n"
4136 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004137 "\t\t\t Write into this file to reset the max size (trigger a\n"
4138 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004139#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004140 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4141 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004142#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004143#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu86425622016-08-18 17:58:15 +09004144#ifdef CONFIG_KPROBE_EVENT
4145 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4146 "\t\t\t Write into this file to define/undefine new trace events.\n"
4147#endif
4148#ifdef CONFIG_UPROBE_EVENT
4149 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4150 "\t\t\t Write into this file to define/undefine new trace events.\n"
4151#endif
4152#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
4153 "\t accepts: event-definitions (one definition per line)\n"
4154 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4155 "\t -:[<group>/]<event>\n"
4156#ifdef CONFIG_KPROBE_EVENT
4157 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4158#endif
4159#ifdef CONFIG_UPROBE_EVENT
4160 "\t place: <path>:<offset>\n"
4161#endif
4162 "\t args: <name>=fetcharg[:type]\n"
4163 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4164 "\t $stack<index>, $stack, $retval, $comm\n"
4165 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4166 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4167#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004168 " events/\t\t- Directory containing all trace event subsystems:\n"
4169 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4170 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004171 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4172 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004173 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004174 " events/<system>/<event>/\t- Directory containing control files for\n"
4175 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004176 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4177 " filter\t\t- If set, only events passing filter are traced\n"
4178 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004179 "\t Format: <trigger>[:count][if <filter>]\n"
4180 "\t trigger: traceon, traceoff\n"
4181 "\t enable_event:<system>:<event>\n"
4182 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004183#ifdef CONFIG_HIST_TRIGGERS
4184 "\t enable_hist:<system>:<event>\n"
4185 "\t disable_hist:<system>:<event>\n"
4186#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004187#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004188 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004189#endif
4190#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004191 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004192#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004193#ifdef CONFIG_HIST_TRIGGERS
4194 "\t\t hist (see below)\n"
4195#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004196 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4197 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4198 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4199 "\t events/block/block_unplug/trigger\n"
4200 "\t The first disables tracing every time block_unplug is hit.\n"
4201 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4202 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4203 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4204 "\t Like function triggers, the counter is only decremented if it\n"
4205 "\t enabled or disabled tracing.\n"
4206 "\t To remove a trigger without a count:\n"
4207 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4208 "\t To remove a trigger with a count:\n"
4209 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4210 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004211#ifdef CONFIG_HIST_TRIGGERS
4212 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004213 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004214 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004215 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004216 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004217 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004218 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004219 "\t [if <filter>]\n\n"
4220 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004221 "\t table using the key(s) and value(s) named, and the value of a\n"
4222 "\t sum called 'hitcount' is incremented. Keys and values\n"
4223 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004224 "\t can be any field, or the special string 'stacktrace'.\n"
4225 "\t Compound keys consisting of up to two fields can be specified\n"
4226 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4227 "\t fields. Sort keys consisting of up to two fields can be\n"
4228 "\t specified using the 'sort' keyword. The sort direction can\n"
4229 "\t be modified by appending '.descending' or '.ascending' to a\n"
4230 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004231 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4232 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4233 "\t its histogram data will be shared with other triggers of the\n"
4234 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004235 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004236 "\t table in its entirety to stdout. If there are multiple hist\n"
4237 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004238 "\t trigger in the output. The table displayed for a named\n"
4239 "\t trigger will be the same as any other instance having the\n"
4240 "\t same name. The default format used to display a given field\n"
4241 "\t can be modified by appending any of the following modifiers\n"
4242 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004243 "\t .hex display a number as a hex value\n"
4244 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004245 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004246 "\t .execname display a common_pid as a program name\n"
4247 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004248 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004249 "\t The 'pause' parameter can be used to pause an existing hist\n"
4250 "\t trigger or to start a hist trigger but not log any events\n"
4251 "\t until told to do so. 'continue' can be used to start or\n"
4252 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004253 "\t The 'clear' parameter will clear the contents of a running\n"
4254 "\t hist trigger and leave its current paused/active state\n"
4255 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004256 "\t The enable_hist and disable_hist triggers can be used to\n"
4257 "\t have one event conditionally start and stop another event's\n"
4258 "\t already-attached hist trigger. The syntax is analagous to\n"
4259 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004260#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004261;
4262
4263static ssize_t
4264tracing_readme_read(struct file *filp, char __user *ubuf,
4265 size_t cnt, loff_t *ppos)
4266{
4267 return simple_read_from_buffer(ubuf, cnt, ppos,
4268 readme_msg, strlen(readme_msg));
4269}
4270
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004271static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004272 .open = tracing_open_generic,
4273 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004274 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004275};
4276
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004277static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004278{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004279 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004280
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004281 if (*pos || m->count)
4282 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004283
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004284 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004285
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004286 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4287 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004288 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004289 continue;
4290
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004291 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004292 }
4293
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004294 return NULL;
4295}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004296
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004297static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4298{
4299 void *v;
4300 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004301
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004302 preempt_disable();
4303 arch_spin_lock(&trace_cmdline_lock);
4304
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004305 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004306 while (l <= *pos) {
4307 v = saved_cmdlines_next(m, v, &l);
4308 if (!v)
4309 return NULL;
4310 }
4311
4312 return v;
4313}
4314
4315static void saved_cmdlines_stop(struct seq_file *m, void *v)
4316{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004317 arch_spin_unlock(&trace_cmdline_lock);
4318 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004319}
4320
4321static int saved_cmdlines_show(struct seq_file *m, void *v)
4322{
4323 char buf[TASK_COMM_LEN];
4324 unsigned int *pid = v;
4325
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004326 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004327 seq_printf(m, "%d %s\n", *pid, buf);
4328 return 0;
4329}
4330
4331static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4332 .start = saved_cmdlines_start,
4333 .next = saved_cmdlines_next,
4334 .stop = saved_cmdlines_stop,
4335 .show = saved_cmdlines_show,
4336};
4337
4338static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4339{
4340 if (tracing_disabled)
4341 return -ENODEV;
4342
4343 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004344}
4345
4346static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004347 .open = tracing_saved_cmdlines_open,
4348 .read = seq_read,
4349 .llseek = seq_lseek,
4350 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004351};
4352
4353static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004354tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4355 size_t cnt, loff_t *ppos)
4356{
4357 char buf[64];
4358 int r;
4359
4360 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004361 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004362 arch_spin_unlock(&trace_cmdline_lock);
4363
4364 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4365}
4366
4367static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4368{
4369 kfree(s->saved_cmdlines);
4370 kfree(s->map_cmdline_to_pid);
4371 kfree(s);
4372}
4373
4374static int tracing_resize_saved_cmdlines(unsigned int val)
4375{
4376 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4377
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004378 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004379 if (!s)
4380 return -ENOMEM;
4381
4382 if (allocate_cmdlines_buffer(val, s) < 0) {
4383 kfree(s);
4384 return -ENOMEM;
4385 }
4386
4387 arch_spin_lock(&trace_cmdline_lock);
4388 savedcmd_temp = savedcmd;
4389 savedcmd = s;
4390 arch_spin_unlock(&trace_cmdline_lock);
4391 free_saved_cmdlines_buffer(savedcmd_temp);
4392
4393 return 0;
4394}
4395
4396static ssize_t
4397tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4398 size_t cnt, loff_t *ppos)
4399{
4400 unsigned long val;
4401 int ret;
4402
4403 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4404 if (ret)
4405 return ret;
4406
4407 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4408 if (!val || val > PID_MAX_DEFAULT)
4409 return -EINVAL;
4410
4411 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4412 if (ret < 0)
4413 return ret;
4414
4415 *ppos += cnt;
4416
4417 return cnt;
4418}
4419
4420static const struct file_operations tracing_saved_cmdlines_size_fops = {
4421 .open = tracing_open_generic,
4422 .read = tracing_saved_cmdlines_size_read,
4423 .write = tracing_saved_cmdlines_size_write,
4424};
4425
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004426#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4427static union trace_enum_map_item *
4428update_enum_map(union trace_enum_map_item *ptr)
4429{
4430 if (!ptr->map.enum_string) {
4431 if (ptr->tail.next) {
4432 ptr = ptr->tail.next;
4433 /* Set ptr to the next real item (skip head) */
4434 ptr++;
4435 } else
4436 return NULL;
4437 }
4438 return ptr;
4439}
4440
4441static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4442{
4443 union trace_enum_map_item *ptr = v;
4444
4445 /*
4446 * Paranoid! If ptr points to end, we don't want to increment past it.
4447 * This really should never happen.
4448 */
4449 ptr = update_enum_map(ptr);
4450 if (WARN_ON_ONCE(!ptr))
4451 return NULL;
4452
4453 ptr++;
4454
4455 (*pos)++;
4456
4457 ptr = update_enum_map(ptr);
4458
4459 return ptr;
4460}
4461
4462static void *enum_map_start(struct seq_file *m, loff_t *pos)
4463{
4464 union trace_enum_map_item *v;
4465 loff_t l = 0;
4466
4467 mutex_lock(&trace_enum_mutex);
4468
4469 v = trace_enum_maps;
4470 if (v)
4471 v++;
4472
4473 while (v && l < *pos) {
4474 v = enum_map_next(m, v, &l);
4475 }
4476
4477 return v;
4478}
4479
4480static void enum_map_stop(struct seq_file *m, void *v)
4481{
4482 mutex_unlock(&trace_enum_mutex);
4483}
4484
4485static int enum_map_show(struct seq_file *m, void *v)
4486{
4487 union trace_enum_map_item *ptr = v;
4488
4489 seq_printf(m, "%s %ld (%s)\n",
4490 ptr->map.enum_string, ptr->map.enum_value,
4491 ptr->map.system);
4492
4493 return 0;
4494}
4495
4496static const struct seq_operations tracing_enum_map_seq_ops = {
4497 .start = enum_map_start,
4498 .next = enum_map_next,
4499 .stop = enum_map_stop,
4500 .show = enum_map_show,
4501};
4502
4503static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4504{
4505 if (tracing_disabled)
4506 return -ENODEV;
4507
4508 return seq_open(filp, &tracing_enum_map_seq_ops);
4509}
4510
4511static const struct file_operations tracing_enum_map_fops = {
4512 .open = tracing_enum_map_open,
4513 .read = seq_read,
4514 .llseek = seq_lseek,
4515 .release = seq_release,
4516};
4517
4518static inline union trace_enum_map_item *
4519trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4520{
4521 /* Return tail of array given the head */
4522 return ptr + ptr->head.length + 1;
4523}
4524
4525static void
4526trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4527 int len)
4528{
4529 struct trace_enum_map **stop;
4530 struct trace_enum_map **map;
4531 union trace_enum_map_item *map_array;
4532 union trace_enum_map_item *ptr;
4533
4534 stop = start + len;
4535
4536 /*
4537 * The trace_enum_maps contains the map plus a head and tail item,
4538 * where the head holds the module and length of array, and the
4539 * tail holds a pointer to the next list.
4540 */
4541 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4542 if (!map_array) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07004543 pr_warn("Unable to allocate trace enum mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004544 return;
4545 }
4546
4547 mutex_lock(&trace_enum_mutex);
4548
4549 if (!trace_enum_maps)
4550 trace_enum_maps = map_array;
4551 else {
4552 ptr = trace_enum_maps;
4553 for (;;) {
4554 ptr = trace_enum_jmp_to_tail(ptr);
4555 if (!ptr->tail.next)
4556 break;
4557 ptr = ptr->tail.next;
4558
4559 }
4560 ptr->tail.next = map_array;
4561 }
4562 map_array->head.mod = mod;
4563 map_array->head.length = len;
4564 map_array++;
4565
4566 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4567 map_array->map = **map;
4568 map_array++;
4569 }
4570 memset(map_array, 0, sizeof(*map_array));
4571
4572 mutex_unlock(&trace_enum_mutex);
4573}
4574
4575static void trace_create_enum_file(struct dentry *d_tracer)
4576{
4577 trace_create_file("enum_map", 0444, d_tracer,
4578 NULL, &tracing_enum_map_fops);
4579}
4580
4581#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4582static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4583static inline void trace_insert_enum_map_file(struct module *mod,
4584 struct trace_enum_map **start, int len) { }
4585#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4586
4587static void trace_insert_enum_map(struct module *mod,
4588 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004589{
4590 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004591
4592 if (len <= 0)
4593 return;
4594
4595 map = start;
4596
4597 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004598
4599 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004600}
4601
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004602static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004603tracing_set_trace_read(struct file *filp, char __user *ubuf,
4604 size_t cnt, loff_t *ppos)
4605{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004606 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004607 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004608 int r;
4609
4610 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004611 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004612 mutex_unlock(&trace_types_lock);
4613
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004614 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004615}
4616
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004617int tracer_init(struct tracer *t, struct trace_array *tr)
4618{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004619 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004620 return t->init(tr);
4621}
4622
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004623static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004624{
4625 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004626
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004627 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004628 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004629}
4630
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004631#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004632/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004633static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4634 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004635{
4636 int cpu, ret = 0;
4637
4638 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4639 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004640 ret = ring_buffer_resize(trace_buf->buffer,
4641 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004642 if (ret < 0)
4643 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004644 per_cpu_ptr(trace_buf->data, cpu)->entries =
4645 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004646 }
4647 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004648 ret = ring_buffer_resize(trace_buf->buffer,
4649 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004650 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004651 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4652 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004653 }
4654
4655 return ret;
4656}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004657#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004658
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004659static int __tracing_resize_ring_buffer(struct trace_array *tr,
4660 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004661{
4662 int ret;
4663
4664 /*
4665 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004666 * we use the size that was given, and we can forget about
4667 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004668 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004669 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004670
Steven Rostedtb382ede62012-10-10 21:44:34 -04004671 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004672 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004673 return 0;
4674
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004675 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004676 if (ret < 0)
4677 return ret;
4678
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004679#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004680 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4681 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004682 goto out;
4683
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004684 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004685 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004686 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4687 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004688 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004689 /*
4690 * AARGH! We are left with different
4691 * size max buffer!!!!
4692 * The max buffer is our "snapshot" buffer.
4693 * When a tracer needs a snapshot (one of the
4694 * latency tracers), it swaps the max buffer
4695 * with the saved snap shot. We succeeded to
4696 * update the size of the main buffer, but failed to
4697 * update the size of the max buffer. But when we tried
4698 * to reset the main buffer to the original size, we
4699 * failed there too. This is very unlikely to
4700 * happen, but if it does, warn and kill all
4701 * tracing.
4702 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004703 WARN_ON(1);
4704 tracing_disabled = 1;
4705 }
4706 return ret;
4707 }
4708
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004709 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004710 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004711 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004712 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004713
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004714 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004715#endif /* CONFIG_TRACER_MAX_TRACE */
4716
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004717 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004718 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004719 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004720 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004721
4722 return ret;
4723}
4724
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004725static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4726 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004727{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004728 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004729
4730 mutex_lock(&trace_types_lock);
4731
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004732 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4733 /* make sure, this cpu is enabled in the mask */
4734 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4735 ret = -EINVAL;
4736 goto out;
4737 }
4738 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004739
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004740 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004741 if (ret < 0)
4742 ret = -ENOMEM;
4743
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004744out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004745 mutex_unlock(&trace_types_lock);
4746
4747 return ret;
4748}
4749
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004750
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004751/**
4752 * tracing_update_buffers - used by tracing facility to expand ring buffers
4753 *
4754 * To save on memory when the tracing is never used on a system with it
4755 * configured in. The ring buffers are set to a minimum size. But once
4756 * a user starts to use the tracing facility, then they need to grow
4757 * to their default size.
4758 *
4759 * This function is to be called when a tracer is about to be used.
4760 */
4761int tracing_update_buffers(void)
4762{
4763 int ret = 0;
4764
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004765 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004766 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004767 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004768 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004769 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004770
4771 return ret;
4772}
4773
Steven Rostedt577b7852009-02-26 23:43:05 -05004774struct trace_option_dentry;
4775
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004776static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004777create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004778
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004779/*
4780 * Used to clear out the tracer before deletion of an instance.
4781 * Must have trace_types_lock held.
4782 */
4783static void tracing_set_nop(struct trace_array *tr)
4784{
4785 if (tr->current_trace == &nop_trace)
4786 return;
4787
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004788 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004789
4790 if (tr->current_trace->reset)
4791 tr->current_trace->reset(tr);
4792
4793 tr->current_trace = &nop_trace;
4794}
4795
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004796static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004797{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004798 /* Only enable if the directory has been created already. */
4799 if (!tr->dir)
4800 return;
4801
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004802 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004803}
4804
4805static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4806{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004807 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004808#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004809 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004810#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004811 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004812
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004813 mutex_lock(&trace_types_lock);
4814
Steven Rostedt73c51622009-03-11 13:42:01 -04004815 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004816 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004817 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004818 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004819 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004820 ret = 0;
4821 }
4822
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004823 for (t = trace_types; t; t = t->next) {
4824 if (strcmp(t->name, buf) == 0)
4825 break;
4826 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004827 if (!t) {
4828 ret = -EINVAL;
4829 goto out;
4830 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004831 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004832 goto out;
4833
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004834 /* Some tracers are only allowed for the top level buffer */
4835 if (!trace_ok_for_array(t, tr)) {
4836 ret = -EINVAL;
4837 goto out;
4838 }
4839
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004840 /* If trace pipe files are being read, we can't change the tracer */
4841 if (tr->current_trace->ref) {
4842 ret = -EBUSY;
4843 goto out;
4844 }
4845
Steven Rostedt9f029e82008-11-12 15:24:24 -05004846 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004847
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004848 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004849
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004850 if (tr->current_trace->reset)
4851 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004852
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004853 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004854 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004855
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004856#ifdef CONFIG_TRACER_MAX_TRACE
4857 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004858
4859 if (had_max_tr && !t->use_max_tr) {
4860 /*
4861 * We need to make sure that the update_max_tr sees that
4862 * current_trace changed to nop_trace to keep it from
4863 * swapping the buffers after we resize it.
4864 * The update_max_tr is called from interrupts disabled
4865 * so a synchronized_sched() is sufficient.
4866 */
4867 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004868 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004869 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004870#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004871
4872#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004873 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004874 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004875 if (ret < 0)
4876 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004877 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004878#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004879
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004880 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004881 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004882 if (ret)
4883 goto out;
4884 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004885
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004886 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004887 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004888 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004889 out:
4890 mutex_unlock(&trace_types_lock);
4891
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004892 return ret;
4893}
4894
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004895static ssize_t
4896tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4897 size_t cnt, loff_t *ppos)
4898{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004899 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004900 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004901 int i;
4902 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004903 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004904
Steven Rostedt60063a62008-10-28 10:44:24 -04004905 ret = cnt;
4906
Li Zefanee6c2c12009-09-18 14:06:47 +08004907 if (cnt > MAX_TRACER_SIZE)
4908 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004909
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004910 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004911 return -EFAULT;
4912
4913 buf[cnt] = 0;
4914
4915 /* strip ending whitespace. */
4916 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4917 buf[i] = 0;
4918
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004919 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004920 if (err)
4921 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004922
Jiri Olsacf8517c2009-10-23 19:36:16 -04004923 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004924
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004925 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004926}
4927
4928static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004929tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4930 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004931{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004932 char buf[64];
4933 int r;
4934
Steven Rostedtcffae432008-05-12 21:21:00 +02004935 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004936 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004937 if (r > sizeof(buf))
4938 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004939 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004940}
4941
4942static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004943tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4944 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004945{
Hannes Eder5e398412009-02-10 19:44:34 +01004946 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004947 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004948
Peter Huewe22fe9b52011-06-07 21:58:27 +02004949 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4950 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004951 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004952
4953 *ptr = val * 1000;
4954
4955 return cnt;
4956}
4957
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004958static ssize_t
4959tracing_thresh_read(struct file *filp, char __user *ubuf,
4960 size_t cnt, loff_t *ppos)
4961{
4962 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4963}
4964
4965static ssize_t
4966tracing_thresh_write(struct file *filp, const char __user *ubuf,
4967 size_t cnt, loff_t *ppos)
4968{
4969 struct trace_array *tr = filp->private_data;
4970 int ret;
4971
4972 mutex_lock(&trace_types_lock);
4973 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4974 if (ret < 0)
4975 goto out;
4976
4977 if (tr->current_trace->update_thresh) {
4978 ret = tr->current_trace->update_thresh(tr);
4979 if (ret < 0)
4980 goto out;
4981 }
4982
4983 ret = cnt;
4984out:
4985 mutex_unlock(&trace_types_lock);
4986
4987 return ret;
4988}
4989
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04004990#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08004991
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004992static ssize_t
4993tracing_max_lat_read(struct file *filp, char __user *ubuf,
4994 size_t cnt, loff_t *ppos)
4995{
4996 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4997}
4998
4999static ssize_t
5000tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5001 size_t cnt, loff_t *ppos)
5002{
5003 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5004}
5005
Chen Gange428abb2015-11-10 05:15:15 +08005006#endif
5007
Steven Rostedtb3806b42008-05-12 21:20:46 +02005008static int tracing_open_pipe(struct inode *inode, struct file *filp)
5009{
Oleg Nesterov15544202013-07-23 17:25:57 +02005010 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005011 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005012 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005013
5014 if (tracing_disabled)
5015 return -ENODEV;
5016
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005017 if (trace_array_get(tr) < 0)
5018 return -ENODEV;
5019
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005020 mutex_lock(&trace_types_lock);
5021
Steven Rostedtb3806b42008-05-12 21:20:46 +02005022 /* create a buffer to store the information to pass to userspace */
5023 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005024 if (!iter) {
5025 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005026 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005027 goto out;
5028 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005029
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005030 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005031 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005032
5033 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5034 ret = -ENOMEM;
5035 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305036 }
5037
Steven Rostedta3097202008-11-07 22:36:02 -05005038 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305039 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005040
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005041 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005042 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5043
David Sharp8be07092012-11-13 12:18:22 -08005044 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005045 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005046 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5047
Oleg Nesterov15544202013-07-23 17:25:57 +02005048 iter->tr = tr;
5049 iter->trace_buffer = &tr->trace_buffer;
5050 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005051 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005052 filp->private_data = iter;
5053
Steven Rostedt107bad82008-05-12 21:21:01 +02005054 if (iter->trace->pipe_open)
5055 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005056
Arnd Bergmannb4447862010-07-07 23:40:11 +02005057 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005058
5059 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005060out:
5061 mutex_unlock(&trace_types_lock);
5062 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005063
5064fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005065 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005066 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005067 mutex_unlock(&trace_types_lock);
5068 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005069}
5070
5071static int tracing_release_pipe(struct inode *inode, struct file *file)
5072{
5073 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005074 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005075
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005076 mutex_lock(&trace_types_lock);
5077
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005078 tr->current_trace->ref--;
5079
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005080 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005081 iter->trace->pipe_close(iter);
5082
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005083 mutex_unlock(&trace_types_lock);
5084
Rusty Russell44623442009-01-01 10:12:23 +10305085 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005086 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005087 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005088
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005089 trace_array_put(tr);
5090
Steven Rostedtb3806b42008-05-12 21:20:46 +02005091 return 0;
5092}
5093
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005094static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005095trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005096{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005097 struct trace_array *tr = iter->tr;
5098
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005099 /* Iterators are static, they should be filled or empty */
5100 if (trace_buffer_iter(iter, iter->cpu_file))
5101 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005102
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005103 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005104 /*
5105 * Always select as readable when in blocking mode
5106 */
5107 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005108 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005109 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005110 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005111}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005112
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005113static unsigned int
5114tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5115{
5116 struct trace_iterator *iter = filp->private_data;
5117
5118 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005119}
5120
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005121/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005122static int tracing_wait_pipe(struct file *filp)
5123{
5124 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005125 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005126
5127 while (trace_empty(iter)) {
5128
5129 if ((filp->f_flags & O_NONBLOCK)) {
5130 return -EAGAIN;
5131 }
5132
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005133 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005134 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005135 * We still block if tracing is disabled, but we have never
5136 * read anything. This allows a user to cat this file, and
5137 * then enable tracing. But after we have read something,
5138 * we give an EOF when tracing is again disabled.
5139 *
5140 * iter->pos will be 0 if we haven't read anything.
5141 */
Tahsin Erdogan97d402e2017-09-17 03:23:48 -07005142 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005143 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005144
5145 mutex_unlock(&iter->mutex);
5146
Rabin Vincente30f53a2014-11-10 19:46:34 +01005147 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005148
5149 mutex_lock(&iter->mutex);
5150
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005151 if (ret)
5152 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005153 }
5154
5155 return 1;
5156}
5157
Steven Rostedtb3806b42008-05-12 21:20:46 +02005158/*
5159 * Consumer reader.
5160 */
5161static ssize_t
5162tracing_read_pipe(struct file *filp, char __user *ubuf,
5163 size_t cnt, loff_t *ppos)
5164{
5165 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005166 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005167
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005168 /*
5169 * Avoid more than one consumer on a single file descriptor
5170 * This is just a matter of traces coherency, the ring buffer itself
5171 * is protected.
5172 */
5173 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005174
5175 /* return any leftover data */
5176 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5177 if (sret != -EBUSY)
5178 goto out;
5179
5180 trace_seq_init(&iter->seq);
5181
Steven Rostedt107bad82008-05-12 21:21:01 +02005182 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005183 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5184 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005185 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005186 }
5187
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005188waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005189 sret = tracing_wait_pipe(filp);
5190 if (sret <= 0)
5191 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005192
5193 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005194 if (trace_empty(iter)) {
5195 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005196 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005197 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005198
5199 if (cnt >= PAGE_SIZE)
5200 cnt = PAGE_SIZE - 1;
5201
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005202 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005203 memset(&iter->seq, 0,
5204 sizeof(struct trace_iterator) -
5205 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005206 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005207 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005208
Lai Jiangshan4f535962009-05-18 19:35:34 +08005209 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005210 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005211 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005212 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005213 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005214
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005215 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005216 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005217 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005218 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005219 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005220 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005221 if (ret != TRACE_TYPE_NO_CONSUME)
5222 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005223
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005224 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005225 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005226
5227 /*
5228 * Setting the full flag means we reached the trace_seq buffer
5229 * size and we should leave by partial output condition above.
5230 * One of the trace_seq_* functions is not used properly.
5231 */
5232 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5233 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005234 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005235 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005236 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005237
Steven Rostedtb3806b42008-05-12 21:20:46 +02005238 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005239 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005240 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005241 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005242
5243 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005244 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005245 * entries, go back to wait for more entries.
5246 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005247 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005248 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005249
Steven Rostedt107bad82008-05-12 21:21:01 +02005250out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005251 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005252
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005253 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005254}
5255
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005256static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5257 unsigned int idx)
5258{
5259 __free_page(spd->pages[idx]);
5260}
5261
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005262static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005263 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005264 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005265 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005266 .steal = generic_pipe_buf_steal,
5267 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005268};
5269
Steven Rostedt34cd4992009-02-09 12:06:29 -05005270static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005271tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005272{
5273 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005274 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005275 int ret;
5276
5277 /* Seq buffer is page-sized, exactly what we need. */
5278 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005279 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005280 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005281
5282 if (trace_seq_has_overflowed(&iter->seq)) {
5283 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005284 break;
5285 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005286
5287 /*
5288 * This should not be hit, because it should only
5289 * be set if the iter->seq overflowed. But check it
5290 * anyway to be safe.
5291 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005292 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005293 iter->seq.seq.len = save_len;
5294 break;
5295 }
5296
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005297 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005298 if (rem < count) {
5299 rem = 0;
5300 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005301 break;
5302 }
5303
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005304 if (ret != TRACE_TYPE_NO_CONSUME)
5305 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005306 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005307 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005308 rem = 0;
5309 iter->ent = NULL;
5310 break;
5311 }
5312 }
5313
5314 return rem;
5315}
5316
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005317static ssize_t tracing_splice_read_pipe(struct file *filp,
5318 loff_t *ppos,
5319 struct pipe_inode_info *pipe,
5320 size_t len,
5321 unsigned int flags)
5322{
Jens Axboe35f3d142010-05-20 10:43:18 +02005323 struct page *pages_def[PIPE_DEF_BUFFERS];
5324 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005325 struct trace_iterator *iter = filp->private_data;
5326 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005327 .pages = pages_def,
5328 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005329 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005330 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005331 .flags = flags,
5332 .ops = &tracing_pipe_buf_ops,
5333 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005334 };
5335 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005336 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005337 unsigned int i;
5338
Jens Axboe35f3d142010-05-20 10:43:18 +02005339 if (splice_grow_spd(pipe, &spd))
5340 return -ENOMEM;
5341
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005342 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005343
5344 if (iter->trace->splice_read) {
5345 ret = iter->trace->splice_read(iter, filp,
5346 ppos, pipe, len, flags);
5347 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005348 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005349 }
5350
5351 ret = tracing_wait_pipe(filp);
5352 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005353 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005354
Jason Wessel955b61e2010-08-05 09:22:23 -05005355 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005356 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005357 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005358 }
5359
Lai Jiangshan4f535962009-05-18 19:35:34 +08005360 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005361 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005362
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005363 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005364 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005365 spd.pages[i] = alloc_page(GFP_KERNEL);
5366 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005367 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005368
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005369 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005370
5371 /* Copy the data into the page, so we can start over. */
5372 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005373 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005374 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005375 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005376 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005377 break;
5378 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005379 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005380 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005381
Steven Rostedtf9520752009-03-02 14:04:40 -05005382 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005383 }
5384
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005385 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005386 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005387 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005388
5389 spd.nr_pages = i;
5390
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005391 if (i)
5392 ret = splice_to_pipe(pipe, &spd);
5393 else
5394 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005395out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005396 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005397 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005398
Steven Rostedt34cd4992009-02-09 12:06:29 -05005399out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005400 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005401 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005402}
5403
Steven Rostedta98a3c32008-05-12 21:20:59 +02005404static ssize_t
5405tracing_entries_read(struct file *filp, char __user *ubuf,
5406 size_t cnt, loff_t *ppos)
5407{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005408 struct inode *inode = file_inode(filp);
5409 struct trace_array *tr = inode->i_private;
5410 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005411 char buf[64];
5412 int r = 0;
5413 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005414
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005415 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005416
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005417 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005418 int cpu, buf_size_same;
5419 unsigned long size;
5420
5421 size = 0;
5422 buf_size_same = 1;
5423 /* check if all cpu sizes are same */
5424 for_each_tracing_cpu(cpu) {
5425 /* fill in the size from first enabled cpu */
5426 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005427 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5428 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005429 buf_size_same = 0;
5430 break;
5431 }
5432 }
5433
5434 if (buf_size_same) {
5435 if (!ring_buffer_expanded)
5436 r = sprintf(buf, "%lu (expanded: %lu)\n",
5437 size >> 10,
5438 trace_buf_size >> 10);
5439 else
5440 r = sprintf(buf, "%lu\n", size >> 10);
5441 } else
5442 r = sprintf(buf, "X\n");
5443 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005444 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005445
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005446 mutex_unlock(&trace_types_lock);
5447
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005448 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5449 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005450}
5451
5452static ssize_t
5453tracing_entries_write(struct file *filp, const char __user *ubuf,
5454 size_t cnt, loff_t *ppos)
5455{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005456 struct inode *inode = file_inode(filp);
5457 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005458 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005459 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005460
Peter Huewe22fe9b52011-06-07 21:58:27 +02005461 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5462 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005463 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005464
5465 /* must have at least 1 entry */
5466 if (!val)
5467 return -EINVAL;
5468
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005469 /* value is in KB */
5470 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005471 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005472 if (ret < 0)
5473 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005474
Jiri Olsacf8517c2009-10-23 19:36:16 -04005475 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005476
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005477 return cnt;
5478}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005479
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005480static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005481tracing_total_entries_read(struct file *filp, char __user *ubuf,
5482 size_t cnt, loff_t *ppos)
5483{
5484 struct trace_array *tr = filp->private_data;
5485 char buf[64];
5486 int r, cpu;
5487 unsigned long size = 0, expanded_size = 0;
5488
5489 mutex_lock(&trace_types_lock);
5490 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005491 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005492 if (!ring_buffer_expanded)
5493 expanded_size += trace_buf_size >> 10;
5494 }
5495 if (ring_buffer_expanded)
5496 r = sprintf(buf, "%lu\n", size);
5497 else
5498 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5499 mutex_unlock(&trace_types_lock);
5500
5501 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5502}
5503
5504static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005505tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5506 size_t cnt, loff_t *ppos)
5507{
5508 /*
5509 * There is no need to read what the user has written, this function
5510 * is just to make sure that there is no error when "echo" is used
5511 */
5512
5513 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005514
5515 return cnt;
5516}
5517
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005518static int
5519tracing_free_buffer_release(struct inode *inode, struct file *filp)
5520{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005521 struct trace_array *tr = inode->i_private;
5522
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005523 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005524 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005525 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005526 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005527 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005528
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005529 trace_array_put(tr);
5530
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005531 return 0;
5532}
5533
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005534static ssize_t
5535tracing_mark_write(struct file *filp, const char __user *ubuf,
5536 size_t cnt, loff_t *fpos)
5537{
Steven Rostedtd696b582011-09-22 11:50:27 -04005538 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005539 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005540 struct ring_buffer_event *event;
5541 struct ring_buffer *buffer;
5542 struct print_entry *entry;
5543 unsigned long irq_flags;
5544 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005545 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005546 int nr_pages = 1;
5547 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005548 int offset;
5549 int size;
5550 int len;
5551 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005552 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005553
Steven Rostedtc76f0692008-11-07 22:36:02 -05005554 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005555 return -EINVAL;
5556
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005557 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005558 return -EINVAL;
5559
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005560 if (cnt > TRACE_BUF_SIZE)
5561 cnt = TRACE_BUF_SIZE;
5562
Steven Rostedtd696b582011-09-22 11:50:27 -04005563 /*
5564 * Userspace is injecting traces into the kernel trace buffer.
5565 * We want to be as non intrusive as possible.
5566 * To do so, we do not want to allocate any special buffers
5567 * or take any locks, but instead write the userspace data
5568 * straight into the ring buffer.
5569 *
5570 * First we need to pin the userspace buffer into memory,
5571 * which, most likely it is, because it just referenced it.
5572 * But there's no guarantee that it is. By using get_user_pages_fast()
5573 * and kmap_atomic/kunmap_atomic() we can get access to the
5574 * pages directly. We then write the data directly into the
5575 * ring buffer.
5576 */
5577 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005578
Steven Rostedtd696b582011-09-22 11:50:27 -04005579 /* check if we cross pages */
5580 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5581 nr_pages = 2;
5582
5583 offset = addr & (PAGE_SIZE - 1);
5584 addr &= PAGE_MASK;
5585
5586 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5587 if (ret < nr_pages) {
5588 while (--ret >= 0)
5589 put_page(pages[ret]);
5590 written = -EFAULT;
5591 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005592 }
5593
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005594 for (i = 0; i < nr_pages; i++)
5595 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005596
5597 local_save_flags(irq_flags);
5598 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005599 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005600 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5601 irq_flags, preempt_count());
5602 if (!event) {
5603 /* Ring buffer disabled, return as if not open for write */
5604 written = -EBADF;
5605 goto out_unlock;
5606 }
5607
5608 entry = ring_buffer_event_data(event);
5609 entry->ip = _THIS_IP_;
5610
5611 if (nr_pages == 2) {
5612 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005613 memcpy(&entry->buf, map_page[0] + offset, len);
5614 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005615 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005616 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005617
5618 if (entry->buf[cnt - 1] != '\n') {
5619 entry->buf[cnt] = '\n';
5620 entry->buf[cnt + 1] = '\0';
5621 } else
5622 entry->buf[cnt] = '\0';
5623
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005624 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005625
5626 written = cnt;
5627
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005628 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005629
Steven Rostedtd696b582011-09-22 11:50:27 -04005630 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005631 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005632 kunmap_atomic(map_page[i]);
5633 put_page(pages[i]);
5634 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005635 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005636 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005637}
5638
Li Zefan13f16d22009-12-08 11:16:11 +08005639static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005640{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005641 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005642 int i;
5643
5644 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005645 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005646 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005647 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5648 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005649 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005650
Li Zefan13f16d22009-12-08 11:16:11 +08005651 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005652}
5653
Steven Rostedte1e232c2014-02-10 23:38:46 -05005654static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005655{
Zhaolei5079f322009-08-25 16:12:56 +08005656 int i;
5657
Zhaolei5079f322009-08-25 16:12:56 +08005658 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5659 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5660 break;
5661 }
5662 if (i == ARRAY_SIZE(trace_clocks))
5663 return -EINVAL;
5664
Zhaolei5079f322009-08-25 16:12:56 +08005665 mutex_lock(&trace_types_lock);
5666
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005667 tr->clock_id = i;
5668
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005669 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005670
David Sharp60303ed2012-10-11 16:27:52 -07005671 /*
5672 * New clock may not be consistent with the previous clock.
5673 * Reset the buffer so that it doesn't have incomparable timestamps.
5674 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005675 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005676
5677#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liucf0523362017-09-05 16:57:19 -05005678 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005679 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005680 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005681#endif
David Sharp60303ed2012-10-11 16:27:52 -07005682
Zhaolei5079f322009-08-25 16:12:56 +08005683 mutex_unlock(&trace_types_lock);
5684
Steven Rostedte1e232c2014-02-10 23:38:46 -05005685 return 0;
5686}
5687
5688static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5689 size_t cnt, loff_t *fpos)
5690{
5691 struct seq_file *m = filp->private_data;
5692 struct trace_array *tr = m->private;
5693 char buf[64];
5694 const char *clockstr;
5695 int ret;
5696
5697 if (cnt >= sizeof(buf))
5698 return -EINVAL;
5699
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005700 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05005701 return -EFAULT;
5702
5703 buf[cnt] = 0;
5704
5705 clockstr = strstrip(buf);
5706
5707 ret = tracing_set_clock(tr, clockstr);
5708 if (ret)
5709 return ret;
5710
Zhaolei5079f322009-08-25 16:12:56 +08005711 *fpos += cnt;
5712
5713 return cnt;
5714}
5715
Li Zefan13f16d22009-12-08 11:16:11 +08005716static int tracing_clock_open(struct inode *inode, struct file *file)
5717{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005718 struct trace_array *tr = inode->i_private;
5719 int ret;
5720
Li Zefan13f16d22009-12-08 11:16:11 +08005721 if (tracing_disabled)
5722 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005723
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005724 if (trace_array_get(tr))
5725 return -ENODEV;
5726
5727 ret = single_open(file, tracing_clock_show, inode->i_private);
5728 if (ret < 0)
5729 trace_array_put(tr);
5730
5731 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005732}
5733
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005734struct ftrace_buffer_info {
5735 struct trace_iterator iter;
5736 void *spare;
5737 unsigned int read;
5738};
5739
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005740#ifdef CONFIG_TRACER_SNAPSHOT
5741static int tracing_snapshot_open(struct inode *inode, struct file *file)
5742{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005743 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005744 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005745 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005746 int ret = 0;
5747
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005748 if (trace_array_get(tr) < 0)
5749 return -ENODEV;
5750
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005751 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005752 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005753 if (IS_ERR(iter))
5754 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005755 } else {
5756 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005757 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005758 m = kzalloc(sizeof(*m), GFP_KERNEL);
5759 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005760 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005761 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5762 if (!iter) {
5763 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005764 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005765 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005766 ret = 0;
5767
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005768 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005769 iter->trace_buffer = &tr->max_buffer;
5770 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005771 m->private = iter;
5772 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005773 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005774out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005775 if (ret < 0)
5776 trace_array_put(tr);
5777
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005778 return ret;
5779}
5780
5781static ssize_t
5782tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5783 loff_t *ppos)
5784{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005785 struct seq_file *m = filp->private_data;
5786 struct trace_iterator *iter = m->private;
5787 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005788 unsigned long val;
5789 int ret;
5790
5791 ret = tracing_update_buffers();
5792 if (ret < 0)
5793 return ret;
5794
5795 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5796 if (ret)
5797 return ret;
5798
5799 mutex_lock(&trace_types_lock);
5800
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005801 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005802 ret = -EBUSY;
5803 goto out;
5804 }
5805
5806 switch (val) {
5807 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005808 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5809 ret = -EINVAL;
5810 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005811 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005812 if (tr->allocated_snapshot)
5813 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005814 break;
5815 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005816/* Only allow per-cpu swap if the ring buffer supports it */
5817#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5818 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5819 ret = -EINVAL;
5820 break;
5821 }
5822#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005823 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005824 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005825 if (ret < 0)
5826 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005827 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005828 local_irq_disable();
5829 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005830 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005831 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005832 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005833 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005834 local_irq_enable();
5835 break;
5836 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005837 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005838 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5839 tracing_reset_online_cpus(&tr->max_buffer);
5840 else
5841 tracing_reset(&tr->max_buffer, iter->cpu_file);
5842 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005843 break;
5844 }
5845
5846 if (ret >= 0) {
5847 *ppos += cnt;
5848 ret = cnt;
5849 }
5850out:
5851 mutex_unlock(&trace_types_lock);
5852 return ret;
5853}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005854
5855static int tracing_snapshot_release(struct inode *inode, struct file *file)
5856{
5857 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005858 int ret;
5859
5860 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005861
5862 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005863 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005864
5865 /* If write only, the seq_file is just a stub */
5866 if (m)
5867 kfree(m->private);
5868 kfree(m);
5869
5870 return 0;
5871}
5872
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005873static int tracing_buffers_open(struct inode *inode, struct file *filp);
5874static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5875 size_t count, loff_t *ppos);
5876static int tracing_buffers_release(struct inode *inode, struct file *file);
5877static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5878 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5879
5880static int snapshot_raw_open(struct inode *inode, struct file *filp)
5881{
5882 struct ftrace_buffer_info *info;
5883 int ret;
5884
5885 ret = tracing_buffers_open(inode, filp);
5886 if (ret < 0)
5887 return ret;
5888
5889 info = filp->private_data;
5890
5891 if (info->iter.trace->use_max_tr) {
5892 tracing_buffers_release(inode, filp);
5893 return -EBUSY;
5894 }
5895
5896 info->iter.snapshot = true;
5897 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5898
5899 return ret;
5900}
5901
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005902#endif /* CONFIG_TRACER_SNAPSHOT */
5903
5904
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005905static const struct file_operations tracing_thresh_fops = {
5906 .open = tracing_open_generic,
5907 .read = tracing_thresh_read,
5908 .write = tracing_thresh_write,
5909 .llseek = generic_file_llseek,
5910};
5911
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005912#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005913static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005914 .open = tracing_open_generic,
5915 .read = tracing_max_lat_read,
5916 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005917 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005918};
Chen Gange428abb2015-11-10 05:15:15 +08005919#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005920
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005921static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005922 .open = tracing_open_generic,
5923 .read = tracing_set_trace_read,
5924 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005925 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005926};
5927
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005928static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005929 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005930 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005931 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005932 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005933 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005934 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005935};
5936
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005937static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005938 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005939 .read = tracing_entries_read,
5940 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005941 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005942 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005943};
5944
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005945static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005946 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005947 .read = tracing_total_entries_read,
5948 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005949 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005950};
5951
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005952static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005953 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005954 .write = tracing_free_buffer_write,
5955 .release = tracing_free_buffer_release,
5956};
5957
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005958static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005959 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005960 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005961 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005962 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005963};
5964
Zhaolei5079f322009-08-25 16:12:56 +08005965static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005966 .open = tracing_clock_open,
5967 .read = seq_read,
5968 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005969 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005970 .write = tracing_clock_write,
5971};
5972
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005973#ifdef CONFIG_TRACER_SNAPSHOT
5974static const struct file_operations snapshot_fops = {
5975 .open = tracing_snapshot_open,
5976 .read = seq_read,
5977 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005978 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005979 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005980};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005981
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005982static const struct file_operations snapshot_raw_fops = {
5983 .open = snapshot_raw_open,
5984 .read = tracing_buffers_read,
5985 .release = tracing_buffers_release,
5986 .splice_read = tracing_buffers_splice_read,
5987 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005988};
5989
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005990#endif /* CONFIG_TRACER_SNAPSHOT */
5991
Steven Rostedt2cadf912008-12-01 22:20:19 -05005992static int tracing_buffers_open(struct inode *inode, struct file *filp)
5993{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005994 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005995 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005996 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005997
5998 if (tracing_disabled)
5999 return -ENODEV;
6000
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006001 if (trace_array_get(tr) < 0)
6002 return -ENODEV;
6003
Steven Rostedt2cadf912008-12-01 22:20:19 -05006004 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006005 if (!info) {
6006 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006007 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006008 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006009
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006010 mutex_lock(&trace_types_lock);
6011
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006012 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006013 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006014 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006015 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006016 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006017 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006018 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006019
6020 filp->private_data = info;
6021
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006022 tr->current_trace->ref++;
6023
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006024 mutex_unlock(&trace_types_lock);
6025
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006026 ret = nonseekable_open(inode, filp);
6027 if (ret < 0)
6028 trace_array_put(tr);
6029
6030 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006031}
6032
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006033static unsigned int
6034tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6035{
6036 struct ftrace_buffer_info *info = filp->private_data;
6037 struct trace_iterator *iter = &info->iter;
6038
6039 return trace_poll(iter, filp, poll_table);
6040}
6041
Steven Rostedt2cadf912008-12-01 22:20:19 -05006042static ssize_t
6043tracing_buffers_read(struct file *filp, char __user *ubuf,
6044 size_t count, loff_t *ppos)
6045{
6046 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006047 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006048 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006049 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006050
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006051 if (!count)
6052 return 0;
6053
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006054#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006055 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6056 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006057#endif
6058
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006059 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006060 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6061 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006062 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006063 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006064
Steven Rostedt2cadf912008-12-01 22:20:19 -05006065 /* Do we have previous read data to read? */
6066 if (info->read < PAGE_SIZE)
6067 goto read;
6068
Steven Rostedtb6273442013-02-28 13:44:11 -05006069 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006070 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006071 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006072 &info->spare,
6073 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006074 iter->cpu_file, 0);
6075 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006076
6077 if (ret < 0) {
6078 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006079 if ((filp->f_flags & O_NONBLOCK))
6080 return -EAGAIN;
6081
Rabin Vincente30f53a2014-11-10 19:46:34 +01006082 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006083 if (ret)
6084 return ret;
6085
Steven Rostedtb6273442013-02-28 13:44:11 -05006086 goto again;
6087 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006088 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006089 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006090
Steven Rostedt436fc282011-10-14 10:44:25 -04006091 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006092 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006093 size = PAGE_SIZE - info->read;
6094 if (size > count)
6095 size = count;
6096
6097 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006098 if (ret == size)
6099 return -EFAULT;
6100
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006101 size -= ret;
6102
Steven Rostedt2cadf912008-12-01 22:20:19 -05006103 *ppos += size;
6104 info->read += size;
6105
6106 return size;
6107}
6108
6109static int tracing_buffers_release(struct inode *inode, struct file *file)
6110{
6111 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006112 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006113
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006114 mutex_lock(&trace_types_lock);
6115
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006116 iter->tr->current_trace->ref--;
6117
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006118 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006119
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006120 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006121 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006122 kfree(info);
6123
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006124 mutex_unlock(&trace_types_lock);
6125
Steven Rostedt2cadf912008-12-01 22:20:19 -05006126 return 0;
6127}
6128
6129struct buffer_ref {
6130 struct ring_buffer *buffer;
6131 void *page;
6132 int ref;
6133};
6134
6135static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6136 struct pipe_buffer *buf)
6137{
6138 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6139
6140 if (--ref->ref)
6141 return;
6142
6143 ring_buffer_free_read_page(ref->buffer, ref->page);
6144 kfree(ref);
6145 buf->private = 0;
6146}
6147
Matthew Wilcox95570902019-04-05 14:02:10 -07006148static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006149 struct pipe_buffer *buf)
6150{
6151 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6152
Matthew Wilcox95570902019-04-05 14:02:10 -07006153 if (ref->ref > INT_MAX/2)
6154 return false;
6155
Steven Rostedt2cadf912008-12-01 22:20:19 -05006156 ref->ref++;
Matthew Wilcox95570902019-04-05 14:02:10 -07006157 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006158}
6159
6160/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006161static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006162 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006163 .confirm = generic_pipe_buf_confirm,
6164 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006165 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006166 .get = buffer_pipe_buf_get,
6167};
6168
6169/*
6170 * Callback from splice_to_pipe(), if we need to release some pages
6171 * at the end of the spd in case we error'ed out in filling the pipe.
6172 */
6173static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6174{
6175 struct buffer_ref *ref =
6176 (struct buffer_ref *)spd->partial[i].private;
6177
6178 if (--ref->ref)
6179 return;
6180
6181 ring_buffer_free_read_page(ref->buffer, ref->page);
6182 kfree(ref);
6183 spd->partial[i].private = 0;
6184}
6185
6186static ssize_t
6187tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6188 struct pipe_inode_info *pipe, size_t len,
6189 unsigned int flags)
6190{
6191 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006192 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006193 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6194 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006195 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006196 .pages = pages_def,
6197 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006198 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006199 .flags = flags,
6200 .ops = &buffer_pipe_buf_ops,
6201 .spd_release = buffer_spd_release,
6202 };
6203 struct buffer_ref *ref;
Steven Rostedt (VMware)6edea152017-12-22 20:38:57 -05006204 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006205 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006206
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006207#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006208 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6209 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006210#endif
6211
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006212 if (*ppos & (PAGE_SIZE - 1))
6213 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006214
6215 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006216 if (len < PAGE_SIZE)
6217 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006218 len &= PAGE_MASK;
6219 }
6220
Al Viro1ae22932016-09-17 18:31:46 -04006221 if (splice_grow_spd(pipe, &spd))
6222 return -ENOMEM;
6223
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006224 again:
6225 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006226 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006227
Al Viroa786c062014-04-11 12:01:03 -04006228 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006229 struct page *page;
6230 int r;
6231
6232 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006233 if (!ref) {
6234 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006235 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006236 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006237
Steven Rostedt7267fa62009-04-29 00:16:21 -04006238 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006239 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006240 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006241 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006242 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006243 kfree(ref);
6244 break;
6245 }
6246
6247 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006248 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006249 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07006250 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006251 kfree(ref);
6252 break;
6253 }
6254
Steven Rostedt2cadf912008-12-01 22:20:19 -05006255 page = virt_to_page(ref->page);
6256
6257 spd.pages[i] = page;
6258 spd.partial[i].len = PAGE_SIZE;
6259 spd.partial[i].offset = 0;
6260 spd.partial[i].private = (unsigned long)ref;
6261 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006262 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006263
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006264 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006265 }
6266
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006267 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006268 spd.nr_pages = i;
6269
6270 /* did we read anything? */
6271 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006272 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006273 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006274
Al Viro1ae22932016-09-17 18:31:46 -04006275 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006276 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006277 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006278
Rabin Vincente30f53a2014-11-10 19:46:34 +01006279 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006280 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006281 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006282
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006283 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006284 }
6285
6286 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006287out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006288 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006289
Steven Rostedt2cadf912008-12-01 22:20:19 -05006290 return ret;
6291}
6292
6293static const struct file_operations tracing_buffers_fops = {
6294 .open = tracing_buffers_open,
6295 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006296 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006297 .release = tracing_buffers_release,
6298 .splice_read = tracing_buffers_splice_read,
6299 .llseek = no_llseek,
6300};
6301
Steven Rostedtc8d77182009-04-29 18:03:45 -04006302static ssize_t
6303tracing_stats_read(struct file *filp, char __user *ubuf,
6304 size_t count, loff_t *ppos)
6305{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006306 struct inode *inode = file_inode(filp);
6307 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006308 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006309 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006310 struct trace_seq *s;
6311 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006312 unsigned long long t;
6313 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006314
Li Zefane4f2d102009-06-15 10:57:28 +08006315 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006316 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006317 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006318
6319 trace_seq_init(s);
6320
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006321 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006322 trace_seq_printf(s, "entries: %ld\n", cnt);
6323
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006324 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006325 trace_seq_printf(s, "overrun: %ld\n", cnt);
6326
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006327 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006328 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6329
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006330 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006331 trace_seq_printf(s, "bytes: %ld\n", cnt);
6332
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006333 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006334 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006335 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006336 usec_rem = do_div(t, USEC_PER_SEC);
6337 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6338 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006339
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006340 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006341 usec_rem = do_div(t, USEC_PER_SEC);
6342 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6343 } else {
6344 /* counter or tsc mode for trace_clock */
6345 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006346 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006347
6348 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006349 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006350 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006351
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006352 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006353 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6354
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006355 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006356 trace_seq_printf(s, "read events: %ld\n", cnt);
6357
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006358 count = simple_read_from_buffer(ubuf, count, ppos,
6359 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006360
6361 kfree(s);
6362
6363 return count;
6364}
6365
6366static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006367 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006368 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006369 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006370 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006371};
6372
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006373#ifdef CONFIG_DYNAMIC_FTRACE
6374
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006375int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006376{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006377 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006378}
6379
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006380static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006381tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006382 size_t cnt, loff_t *ppos)
6383{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006384 static char ftrace_dyn_info_buffer[1024];
6385 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006386 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006387 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006388 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006389 int r;
6390
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006391 mutex_lock(&dyn_info_mutex);
6392 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006393
Steven Rostedta26a2a22008-10-31 00:03:22 -04006394 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006395 buf[r++] = '\n';
6396
6397 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6398
6399 mutex_unlock(&dyn_info_mutex);
6400
6401 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006402}
6403
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006404static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006405 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006406 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006407 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006408};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006409#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006410
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006411#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6412static void
6413ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006414{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006415 tracing_snapshot();
6416}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006417
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006418static void
6419ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6420{
6421 unsigned long *count = (long *)data;
6422
6423 if (!*count)
6424 return;
6425
6426 if (*count != -1)
6427 (*count)--;
6428
6429 tracing_snapshot();
6430}
6431
6432static int
6433ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6434 struct ftrace_probe_ops *ops, void *data)
6435{
6436 long count = (long)data;
6437
6438 seq_printf(m, "%ps:", (void *)ip);
6439
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006440 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006441
6442 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006443 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006444 else
6445 seq_printf(m, ":count=%ld\n", count);
6446
6447 return 0;
6448}
6449
6450static struct ftrace_probe_ops snapshot_probe_ops = {
6451 .func = ftrace_snapshot,
6452 .print = ftrace_snapshot_print,
6453};
6454
6455static struct ftrace_probe_ops snapshot_count_probe_ops = {
6456 .func = ftrace_count_snapshot,
6457 .print = ftrace_snapshot_print,
6458};
6459
6460static int
6461ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6462 char *glob, char *cmd, char *param, int enable)
6463{
6464 struct ftrace_probe_ops *ops;
6465 void *count = (void *)-1;
6466 char *number;
6467 int ret;
6468
6469 /* hash funcs only work with set_ftrace_filter */
6470 if (!enable)
6471 return -EINVAL;
6472
6473 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6474
6475 if (glob[0] == '!') {
6476 unregister_ftrace_function_probe_func(glob+1, ops);
6477 return 0;
6478 }
6479
6480 if (!param)
6481 goto out_reg;
6482
6483 number = strsep(&param, ":");
6484
6485 if (!strlen(number))
6486 goto out_reg;
6487
6488 /*
6489 * We use the callback data field (which is a pointer)
6490 * as our counter.
6491 */
6492 ret = kstrtoul(number, 0, (unsigned long *)&count);
6493 if (ret)
6494 return ret;
6495
6496 out_reg:
Steven Rostedt (VMware)d4decac2017-04-19 12:07:08 -04006497 ret = alloc_snapshot(&global_trace);
6498 if (ret < 0)
6499 goto out;
6500
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006501 ret = register_ftrace_function_probe(glob, ops, count);
6502
Steven Rostedt (VMware)d4decac2017-04-19 12:07:08 -04006503 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006504 return ret < 0 ? ret : 0;
6505}
6506
6507static struct ftrace_func_command ftrace_snapshot_cmd = {
6508 .name = "snapshot",
6509 .func = ftrace_trace_snapshot_callback,
6510};
6511
Tom Zanussi38de93a2013-10-24 08:34:18 -05006512static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006513{
6514 return register_ftrace_command(&ftrace_snapshot_cmd);
6515}
6516#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006517static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006518#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006519
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006520static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006521{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006522 if (WARN_ON(!tr->dir))
6523 return ERR_PTR(-ENODEV);
6524
6525 /* Top directory uses NULL as the parent */
6526 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6527 return NULL;
6528
6529 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006530 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006531}
6532
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006533static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6534{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006535 struct dentry *d_tracer;
6536
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006537 if (tr->percpu_dir)
6538 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006539
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006540 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006541 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006542 return NULL;
6543
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006544 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006545
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006546 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006547 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006548
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006549 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006550}
6551
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006552static struct dentry *
6553trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6554 void *data, long cpu, const struct file_operations *fops)
6555{
6556 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6557
6558 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006559 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006560 return ret;
6561}
6562
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006563static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006564tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006565{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006566 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006567 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006568 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006569
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006570 if (!d_percpu)
6571 return;
6572
Steven Rostedtdd49a382010-10-20 21:51:26 -04006573 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006574 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006575 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006576 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006577 return;
6578 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006579
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006580 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006581 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006582 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006583
6584 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006585 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006586 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006587
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006588 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006589 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006590
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006591 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006592 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006593
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006594 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006595 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006596
6597#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006598 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006599 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006600
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006601 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006602 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006603#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006604}
6605
Steven Rostedt60a11772008-05-12 21:20:44 +02006606#ifdef CONFIG_FTRACE_SELFTEST
6607/* Let selftest have access to static functions in this file */
6608#include "trace_selftest.c"
6609#endif
6610
Steven Rostedt577b7852009-02-26 23:43:05 -05006611static ssize_t
6612trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6613 loff_t *ppos)
6614{
6615 struct trace_option_dentry *topt = filp->private_data;
6616 char *buf;
6617
6618 if (topt->flags->val & topt->opt->bit)
6619 buf = "1\n";
6620 else
6621 buf = "0\n";
6622
6623 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6624}
6625
6626static ssize_t
6627trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6628 loff_t *ppos)
6629{
6630 struct trace_option_dentry *topt = filp->private_data;
6631 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006632 int ret;
6633
Peter Huewe22fe9b52011-06-07 21:58:27 +02006634 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6635 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006636 return ret;
6637
Li Zefan8d18eaa2009-12-08 11:17:06 +08006638 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006639 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006640
6641 if (!!(topt->flags->val & topt->opt->bit) != val) {
6642 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006643 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006644 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006645 mutex_unlock(&trace_types_lock);
6646 if (ret)
6647 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006648 }
6649
6650 *ppos += cnt;
6651
6652 return cnt;
6653}
6654
6655
6656static const struct file_operations trace_options_fops = {
6657 .open = tracing_open_generic,
6658 .read = trace_options_read,
6659 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006660 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006661};
6662
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006663/*
6664 * In order to pass in both the trace_array descriptor as well as the index
6665 * to the flag that the trace option file represents, the trace_array
6666 * has a character array of trace_flags_index[], which holds the index
6667 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6668 * The address of this character array is passed to the flag option file
6669 * read/write callbacks.
6670 *
6671 * In order to extract both the index and the trace_array descriptor,
6672 * get_tr_index() uses the following algorithm.
6673 *
6674 * idx = *ptr;
6675 *
6676 * As the pointer itself contains the address of the index (remember
6677 * index[1] == 1).
6678 *
6679 * Then to get the trace_array descriptor, by subtracting that index
6680 * from the ptr, we get to the start of the index itself.
6681 *
6682 * ptr - idx == &index[0]
6683 *
6684 * Then a simple container_of() from that pointer gets us to the
6685 * trace_array descriptor.
6686 */
6687static void get_tr_index(void *data, struct trace_array **ptr,
6688 unsigned int *pindex)
6689{
6690 *pindex = *(unsigned char *)data;
6691
6692 *ptr = container_of(data - *pindex, struct trace_array,
6693 trace_flags_index);
6694}
6695
Steven Rostedta8259072009-02-26 22:19:12 -05006696static ssize_t
6697trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6698 loff_t *ppos)
6699{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006700 void *tr_index = filp->private_data;
6701 struct trace_array *tr;
6702 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006703 char *buf;
6704
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006705 get_tr_index(tr_index, &tr, &index);
6706
6707 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006708 buf = "1\n";
6709 else
6710 buf = "0\n";
6711
6712 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6713}
6714
6715static ssize_t
6716trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6717 loff_t *ppos)
6718{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006719 void *tr_index = filp->private_data;
6720 struct trace_array *tr;
6721 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006722 unsigned long val;
6723 int ret;
6724
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006725 get_tr_index(tr_index, &tr, &index);
6726
Peter Huewe22fe9b52011-06-07 21:58:27 +02006727 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6728 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006729 return ret;
6730
Zhaoleif2d84b62009-08-07 18:55:48 +08006731 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006732 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006733
6734 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006735 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006736 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006737
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006738 if (ret < 0)
6739 return ret;
6740
Steven Rostedta8259072009-02-26 22:19:12 -05006741 *ppos += cnt;
6742
6743 return cnt;
6744}
6745
Steven Rostedta8259072009-02-26 22:19:12 -05006746static const struct file_operations trace_options_core_fops = {
6747 .open = tracing_open_generic,
6748 .read = trace_options_core_read,
6749 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006750 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006751};
6752
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006753struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006754 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006755 struct dentry *parent,
6756 void *data,
6757 const struct file_operations *fops)
6758{
6759 struct dentry *ret;
6760
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006761 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006762 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07006763 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006764
6765 return ret;
6766}
6767
6768
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006769static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006770{
6771 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006772
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006773 if (tr->options)
6774 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006775
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006776 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006777 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006778 return NULL;
6779
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006780 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006781 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006782 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006783 return NULL;
6784 }
6785
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006786 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006787}
6788
Steven Rostedt577b7852009-02-26 23:43:05 -05006789static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006790create_trace_option_file(struct trace_array *tr,
6791 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006792 struct tracer_flags *flags,
6793 struct tracer_opt *opt)
6794{
6795 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006796
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006797 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006798 if (!t_options)
6799 return;
6800
6801 topt->flags = flags;
6802 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006803 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006804
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006805 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006806 &trace_options_fops);
6807
Steven Rostedt577b7852009-02-26 23:43:05 -05006808}
6809
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006810static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006811create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006812{
6813 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006814 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05006815 struct tracer_flags *flags;
6816 struct tracer_opt *opts;
6817 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006818 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05006819
6820 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006821 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05006822
6823 flags = tracer->flags;
6824
6825 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006826 return;
6827
6828 /*
6829 * If this is an instance, only create flags for tracers
6830 * the instance may have.
6831 */
6832 if (!trace_ok_for_array(tracer, tr))
6833 return;
6834
6835 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08006836 /* Make sure there's no duplicate flags. */
6837 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006838 return;
6839 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006840
6841 opts = flags->opts;
6842
6843 for (cnt = 0; opts[cnt].name; cnt++)
6844 ;
6845
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006846 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006847 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006848 return;
6849
6850 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6851 GFP_KERNEL);
6852 if (!tr_topts) {
6853 kfree(topts);
6854 return;
6855 }
6856
6857 tr->topts = tr_topts;
6858 tr->topts[tr->nr_topts].tracer = tracer;
6859 tr->topts[tr->nr_topts].topts = topts;
6860 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05006861
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006862 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006863 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006864 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006865 WARN_ONCE(topts[cnt].entry == NULL,
6866 "Failed to create trace option: %s",
6867 opts[cnt].name);
6868 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006869}
6870
Steven Rostedta8259072009-02-26 22:19:12 -05006871static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006872create_trace_option_core_file(struct trace_array *tr,
6873 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006874{
6875 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006876
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006877 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006878 if (!t_options)
6879 return NULL;
6880
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006881 return trace_create_file(option, 0644, t_options,
6882 (void *)&tr->trace_flags_index[index],
6883 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006884}
6885
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006886static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006887{
6888 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006889 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006890 int i;
6891
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006892 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006893 if (!t_options)
6894 return;
6895
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006896 for (i = 0; trace_options[i]; i++) {
6897 if (top_level ||
6898 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6899 create_trace_option_core_file(tr, trace_options[i], i);
6900 }
Steven Rostedta8259072009-02-26 22:19:12 -05006901}
6902
Steven Rostedt499e5472012-02-22 15:50:28 -05006903static ssize_t
6904rb_simple_read(struct file *filp, char __user *ubuf,
6905 size_t cnt, loff_t *ppos)
6906{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006907 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006908 char buf[64];
6909 int r;
6910
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006911 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006912 r = sprintf(buf, "%d\n", r);
6913
6914 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6915}
6916
6917static ssize_t
6918rb_simple_write(struct file *filp, const char __user *ubuf,
6919 size_t cnt, loff_t *ppos)
6920{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006921 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006922 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006923 unsigned long val;
6924 int ret;
6925
6926 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6927 if (ret)
6928 return ret;
6929
6930 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006931 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)dc697312018-08-01 15:40:57 -04006932 if (!!val == tracer_tracing_is_on(tr)) {
6933 val = 0; /* do nothing */
6934 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006935 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006936 if (tr->current_trace->start)
6937 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006938 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006939 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006940 if (tr->current_trace->stop)
6941 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006942 }
6943 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006944 }
6945
6946 (*ppos)++;
6947
6948 return cnt;
6949}
6950
6951static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006952 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006953 .read = rb_simple_read,
6954 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006955 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006956 .llseek = default_llseek,
6957};
6958
Steven Rostedt277ba042012-08-03 16:10:49 -04006959struct dentry *trace_instance_dir;
6960
6961static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006962init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006963
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006964static int
6965allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006966{
6967 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006968
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006969 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006970
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006971 buf->tr = tr;
6972
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006973 buf->buffer = ring_buffer_alloc(size, rb_flags);
6974 if (!buf->buffer)
6975 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006976
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006977 buf->data = alloc_percpu(struct trace_array_cpu);
6978 if (!buf->data) {
6979 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)5dc4cd22017-12-26 20:07:34 -05006980 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006981 return -ENOMEM;
6982 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006983
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006984 /* Allocate the first page for all buffers */
6985 set_buffer_entries(&tr->trace_buffer,
6986 ring_buffer_size(tr->trace_buffer.buffer, 0));
6987
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006988 return 0;
6989}
6990
6991static int allocate_trace_buffers(struct trace_array *tr, int size)
6992{
6993 int ret;
6994
6995 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6996 if (ret)
6997 return ret;
6998
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006999#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007000 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7001 allocate_snapshot ? size : 1);
7002 if (WARN_ON(ret)) {
7003 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia81e155e2017-12-26 15:12:53 +08007004 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007005 free_percpu(tr->trace_buffer.data);
Jing Xia81e155e2017-12-26 15:12:53 +08007006 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007007 return -ENOMEM;
7008 }
7009 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007010
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007011 /*
7012 * Only the top level trace array gets its snapshot allocated
7013 * from the kernel command line.
7014 */
7015 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007016#endif
7017 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007018}
7019
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007020static void free_trace_buffer(struct trace_buffer *buf)
7021{
7022 if (buf->buffer) {
7023 ring_buffer_free(buf->buffer);
7024 buf->buffer = NULL;
7025 free_percpu(buf->data);
7026 buf->data = NULL;
7027 }
7028}
7029
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007030static void free_trace_buffers(struct trace_array *tr)
7031{
7032 if (!tr)
7033 return;
7034
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007035 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007036
7037#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007038 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007039#endif
7040}
7041
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007042static void init_trace_flags_index(struct trace_array *tr)
7043{
7044 int i;
7045
7046 /* Used by the trace options files */
7047 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7048 tr->trace_flags_index[i] = i;
7049}
7050
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007051static void __update_tracer_options(struct trace_array *tr)
7052{
7053 struct tracer *t;
7054
7055 for (t = trace_types; t; t = t->next)
7056 add_tracer_options(tr, t);
7057}
7058
7059static void update_tracer_options(struct trace_array *tr)
7060{
7061 mutex_lock(&trace_types_lock);
7062 __update_tracer_options(tr);
7063 mutex_unlock(&trace_types_lock);
7064}
7065
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007066static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007067{
Steven Rostedt277ba042012-08-03 16:10:49 -04007068 struct trace_array *tr;
7069 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007070
7071 mutex_lock(&trace_types_lock);
7072
7073 ret = -EEXIST;
7074 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7075 if (tr->name && strcmp(tr->name, name) == 0)
7076 goto out_unlock;
7077 }
7078
7079 ret = -ENOMEM;
7080 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7081 if (!tr)
7082 goto out_unlock;
7083
7084 tr->name = kstrdup(name, GFP_KERNEL);
7085 if (!tr->name)
7086 goto out_free_tr;
7087
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007088 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7089 goto out_free_tr;
7090
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007091 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007092
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007093 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7094
Steven Rostedt277ba042012-08-03 16:10:49 -04007095 raw_spin_lock_init(&tr->start_lock);
7096
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007097 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7098
Steven Rostedt277ba042012-08-03 16:10:49 -04007099 tr->current_trace = &nop_trace;
7100
7101 INIT_LIST_HEAD(&tr->systems);
7102 INIT_LIST_HEAD(&tr->events);
7103
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007104 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007105 goto out_free_tr;
7106
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007107 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007108 if (!tr->dir)
7109 goto out_free_tr;
7110
7111 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007112 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007113 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007114 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007115 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007116
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007117 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007118 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007119 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007120
7121 list_add(&tr->list, &ftrace_trace_arrays);
7122
7123 mutex_unlock(&trace_types_lock);
7124
7125 return 0;
7126
7127 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007128 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007129 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007130 kfree(tr->name);
7131 kfree(tr);
7132
7133 out_unlock:
7134 mutex_unlock(&trace_types_lock);
7135
7136 return ret;
7137
7138}
7139
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007140static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007141{
7142 struct trace_array *tr;
7143 int found = 0;
7144 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007145 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007146
7147 mutex_lock(&trace_types_lock);
7148
7149 ret = -ENODEV;
7150 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7151 if (tr->name && strcmp(tr->name, name) == 0) {
7152 found = 1;
7153 break;
7154 }
7155 }
7156 if (!found)
7157 goto out_unlock;
7158
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007159 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007160 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007161 goto out_unlock;
7162
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007163 list_del(&tr->list);
7164
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007165 /* Disable all the flags that were enabled coming in */
7166 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7167 if ((1 << i) & ZEROED_TRACE_FLAGS)
7168 set_tracer_flag(tr, 1 << i, 0);
7169 }
7170
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007171 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007172 event_trace_del_tracer(tr);
Namhyung Kim7da0f8e2017-04-17 11:44:27 +09007173 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007174 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007175 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007176 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007177
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007178 for (i = 0; i < tr->nr_topts; i++) {
7179 kfree(tr->topts[i].topts);
7180 }
7181 kfree(tr->topts);
7182
Chunyu Hu919e4812017-07-20 18:36:09 +08007183 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007184 kfree(tr->name);
7185 kfree(tr);
7186
7187 ret = 0;
7188
7189 out_unlock:
7190 mutex_unlock(&trace_types_lock);
7191
7192 return ret;
7193}
7194
Steven Rostedt277ba042012-08-03 16:10:49 -04007195static __init void create_trace_instances(struct dentry *d_tracer)
7196{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007197 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7198 instance_mkdir,
7199 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007200 if (WARN_ON(!trace_instance_dir))
7201 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007202}
7203
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007204static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007205init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007206{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007207 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007208
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007209 trace_create_file("available_tracers", 0444, d_tracer,
7210 tr, &show_traces_fops);
7211
7212 trace_create_file("current_tracer", 0644, d_tracer,
7213 tr, &set_tracer_fops);
7214
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007215 trace_create_file("tracing_cpumask", 0644, d_tracer,
7216 tr, &tracing_cpumask_fops);
7217
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007218 trace_create_file("trace_options", 0644, d_tracer,
7219 tr, &tracing_iter_fops);
7220
7221 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007222 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007223
7224 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007225 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007226
7227 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007228 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007229
7230 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7231 tr, &tracing_total_entries_fops);
7232
Wang YanQing238ae932013-05-26 16:52:01 +08007233 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007234 tr, &tracing_free_buffer_fops);
7235
7236 trace_create_file("trace_marker", 0220, d_tracer,
7237 tr, &tracing_mark_fops);
7238
7239 trace_create_file("trace_clock", 0644, d_tracer, tr,
7240 &trace_clock_fops);
7241
7242 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007243 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007244
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007245 create_trace_options_dir(tr);
7246
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007247#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007248 trace_create_file("tracing_max_latency", 0644, d_tracer,
7249 &tr->max_latency, &tracing_max_lat_fops);
7250#endif
7251
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007252 if (ftrace_create_function_files(tr, d_tracer))
7253 WARN(1, "Could not allocate function filter files");
7254
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007255#ifdef CONFIG_TRACER_SNAPSHOT
7256 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007257 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007258#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007259
7260 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007261 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007262
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007263 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007264}
7265
Eric W. Biedermand3381fa2017-02-01 06:06:16 +13007266static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007267{
7268 struct vfsmount *mnt;
7269 struct file_system_type *type;
7270
7271 /*
7272 * To maintain backward compatibility for tools that mount
7273 * debugfs to get to the tracing facility, tracefs is automatically
7274 * mounted to the debugfs/tracing directory.
7275 */
7276 type = get_fs_type("tracefs");
7277 if (!type)
7278 return NULL;
Eric W. Biedermand3381fa2017-02-01 06:06:16 +13007279 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007280 put_filesystem(type);
7281 if (IS_ERR(mnt))
7282 return NULL;
7283 mntget(mnt);
7284
7285 return mnt;
7286}
7287
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007288/**
7289 * tracing_init_dentry - initialize top level trace array
7290 *
7291 * This is called when creating files or directories in the tracing
7292 * directory. It is called via fs_initcall() by any of the boot up code
7293 * and expects to return the dentry of the top level tracing directory.
7294 */
7295struct dentry *tracing_init_dentry(void)
7296{
7297 struct trace_array *tr = &global_trace;
7298
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007299 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007300 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007301 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007302
Jiaxing Wang8b129192015-11-06 16:04:16 +08007303 if (WARN_ON(!tracefs_initialized()) ||
7304 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7305 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007306 return ERR_PTR(-ENODEV);
7307
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007308 /*
7309 * As there may still be users that expect the tracing
7310 * files to exist in debugfs/tracing, we must automount
7311 * the tracefs file system there, so older tools still
7312 * work with the newer kerenl.
7313 */
7314 tr->dir = debugfs_create_automount("tracing", NULL,
7315 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007316 if (!tr->dir) {
7317 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7318 return ERR_PTR(-ENOMEM);
7319 }
7320
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007321 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007322}
7323
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007324extern struct trace_enum_map *__start_ftrace_enum_maps[];
7325extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7326
7327static void __init trace_enum_init(void)
7328{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007329 int len;
7330
7331 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007332 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007333}
7334
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007335#ifdef CONFIG_MODULES
7336static void trace_module_add_enums(struct module *mod)
7337{
7338 if (!mod->num_trace_enums)
7339 return;
7340
7341 /*
7342 * Modules with bad taint do not have events created, do
7343 * not bother with enums either.
7344 */
7345 if (trace_module_has_bad_taint(mod))
7346 return;
7347
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007348 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007349}
7350
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007351#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7352static void trace_module_remove_enums(struct module *mod)
7353{
7354 union trace_enum_map_item *map;
7355 union trace_enum_map_item **last = &trace_enum_maps;
7356
7357 if (!mod->num_trace_enums)
7358 return;
7359
7360 mutex_lock(&trace_enum_mutex);
7361
7362 map = trace_enum_maps;
7363
7364 while (map) {
7365 if (map->head.mod == mod)
7366 break;
7367 map = trace_enum_jmp_to_tail(map);
7368 last = &map->tail.next;
7369 map = map->tail.next;
7370 }
7371 if (!map)
7372 goto out;
7373
7374 *last = trace_enum_jmp_to_tail(map)->tail.next;
7375 kfree(map);
7376 out:
7377 mutex_unlock(&trace_enum_mutex);
7378}
7379#else
7380static inline void trace_module_remove_enums(struct module *mod) { }
7381#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7382
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007383static int trace_module_notify(struct notifier_block *self,
7384 unsigned long val, void *data)
7385{
7386 struct module *mod = data;
7387
7388 switch (val) {
7389 case MODULE_STATE_COMING:
7390 trace_module_add_enums(mod);
7391 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007392 case MODULE_STATE_GOING:
7393 trace_module_remove_enums(mod);
7394 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007395 }
7396
7397 return 0;
7398}
7399
7400static struct notifier_block trace_module_nb = {
7401 .notifier_call = trace_module_notify,
7402 .priority = 0,
7403};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007404#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007405
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007406static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007407{
7408 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007409
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007410 trace_access_lock_init();
7411
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007412 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007413 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007414 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007415
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007416 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007417 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007418
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007419 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007420 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007421
Li Zefan339ae5d2009-04-17 10:34:30 +08007422 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007423 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007424
Avadh Patel69abe6a2009-04-10 16:04:48 -04007425 trace_create_file("saved_cmdlines", 0444, d_tracer,
7426 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007427
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007428 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7429 NULL, &tracing_saved_cmdlines_size_fops);
7430
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007431 trace_enum_init();
7432
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007433 trace_create_enum_file(d_tracer);
7434
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007435#ifdef CONFIG_MODULES
7436 register_module_notifier(&trace_module_nb);
7437#endif
7438
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007439#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007440 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7441 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007442#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007443
Steven Rostedt277ba042012-08-03 16:10:49 -04007444 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007445
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007446 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007447
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007448 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007449}
7450
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007451static int trace_panic_handler(struct notifier_block *this,
7452 unsigned long event, void *unused)
7453{
Steven Rostedt944ac422008-10-23 19:26:08 -04007454 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007455 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007456 return NOTIFY_OK;
7457}
7458
7459static struct notifier_block trace_panic_notifier = {
7460 .notifier_call = trace_panic_handler,
7461 .next = NULL,
7462 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7463};
7464
7465static int trace_die_handler(struct notifier_block *self,
7466 unsigned long val,
7467 void *data)
7468{
7469 switch (val) {
7470 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007471 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007472 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007473 break;
7474 default:
7475 break;
7476 }
7477 return NOTIFY_OK;
7478}
7479
7480static struct notifier_block trace_die_notifier = {
7481 .notifier_call = trace_die_handler,
7482 .priority = 200
7483};
7484
7485/*
7486 * printk is set to max of 1024, we really don't need it that big.
7487 * Nothing should be printing 1000 characters anyway.
7488 */
7489#define TRACE_MAX_PRINT 1000
7490
7491/*
7492 * Define here KERN_TRACE so that we have one place to modify
7493 * it if we decide to change what log level the ftrace dump
7494 * should be at.
7495 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007496#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007497
Jason Wessel955b61e2010-08-05 09:22:23 -05007498void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007499trace_printk_seq(struct trace_seq *s)
7500{
7501 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007502 if (s->seq.len >= TRACE_MAX_PRINT)
7503 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007504
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007505 /*
7506 * More paranoid code. Although the buffer size is set to
7507 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7508 * an extra layer of protection.
7509 */
7510 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7511 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007512
7513 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007514 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007515
7516 printk(KERN_TRACE "%s", s->buffer);
7517
Steven Rostedtf9520752009-03-02 14:04:40 -05007518 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007519}
7520
Jason Wessel955b61e2010-08-05 09:22:23 -05007521void trace_init_global_iter(struct trace_iterator *iter)
7522{
7523 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007524 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007525 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007526 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007527
7528 if (iter->trace && iter->trace->open)
7529 iter->trace->open(iter);
7530
7531 /* Annotate start of buffers if we had overruns */
7532 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7533 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7534
7535 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7536 if (trace_clocks[iter->tr->clock_id].in_ns)
7537 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007538}
7539
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007540void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007541{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007542 /* use static because iter can be a bit big for the stack */
7543 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007544 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007545 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007546 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007547 unsigned long flags;
7548 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007549
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007550 /* Only allow one dump user at a time. */
7551 if (atomic_inc_return(&dump_running) != 1) {
7552 atomic_dec(&dump_running);
7553 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007554 }
7555
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007556 /*
7557 * Always turn off tracing when we dump.
7558 * We don't need to show trace output of what happens
7559 * between multiple crashes.
7560 *
7561 * If the user does a sysrq-z, then they can re-enable
7562 * tracing with echo 1 > tracing_on.
7563 */
7564 tracing_off();
7565
7566 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007567
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007568 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007569 trace_init_global_iter(&iter);
7570
Steven Rostedtd7690412008-10-01 00:29:53 -04007571 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307572 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007573 }
7574
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007575 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007576
Török Edwinb54d3de2008-11-22 13:28:48 +02007577 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007578 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007579
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007580 switch (oops_dump_mode) {
7581 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007582 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007583 break;
7584 case DUMP_ORIG:
7585 iter.cpu_file = raw_smp_processor_id();
7586 break;
7587 case DUMP_NONE:
7588 goto out_enable;
7589 default:
7590 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007591 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007592 }
7593
7594 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007595
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007596 /* Did function tracer already get disabled? */
7597 if (ftrace_is_dead()) {
7598 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7599 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7600 }
7601
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007602 /*
7603 * We need to stop all tracing on all CPUS to read the
7604 * the next buffer. This is a bit expensive, but is
7605 * not done often. We fill all what we can read,
7606 * and then release the locks again.
7607 */
7608
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007609 while (!trace_empty(&iter)) {
7610
7611 if (!cnt)
7612 printk(KERN_TRACE "---------------------------------\n");
7613
7614 cnt++;
7615
Miguel Ojeda2e415392019-05-23 14:45:35 +02007616 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007617 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007618
Jason Wessel955b61e2010-08-05 09:22:23 -05007619 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007620 int ret;
7621
7622 ret = print_trace_line(&iter);
7623 if (ret != TRACE_TYPE_NO_CONSUME)
7624 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007625 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007626 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007627
7628 trace_printk_seq(&iter.seq);
7629 }
7630
7631 if (!cnt)
7632 printk(KERN_TRACE " (ftrace buffer empty)\n");
7633 else
7634 printk(KERN_TRACE "---------------------------------\n");
7635
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007636 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007637 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007638
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007639 for_each_tracing_cpu(cpu) {
7640 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007641 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007642 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007643 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007644}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007645EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007646
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007647__init static int tracer_alloc_buffers(void)
7648{
Steven Rostedt73c51622009-03-11 13:42:01 -04007649 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307650 int ret = -ENOMEM;
7651
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007652 /*
7653 * Make sure we don't accidently add more trace options
7654 * than we have bits for.
7655 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007656 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007657
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307658 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7659 goto out;
7660
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007661 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307662 goto out_free_buffer_mask;
7663
Steven Rostedt07d777f2011-09-22 14:01:55 -04007664 /* Only allocate trace_printk buffers if a trace_printk exists */
7665 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007666 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007667 trace_printk_init_buffers();
7668
Steven Rostedt73c51622009-03-11 13:42:01 -04007669 /* To save memory, keep the ring buffer size to its minimum */
7670 if (ring_buffer_expanded)
7671 ring_buf_size = trace_buf_size;
7672 else
7673 ring_buf_size = 1;
7674
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307675 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007676 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007677
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007678 raw_spin_lock_init(&global_trace.start_lock);
7679
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007680 /* Used for event triggers */
7681 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7682 if (!temp_buffer)
7683 goto out_free_cpumask;
7684
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007685 if (trace_create_savedcmd() < 0)
7686 goto out_free_temp_buffer;
7687
Steven Rostedtab464282008-05-12 21:21:00 +02007688 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007689 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007690 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7691 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007692 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007693 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007694
Steven Rostedt499e5472012-02-22 15:50:28 -05007695 if (global_trace.buffer_disabled)
7696 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007697
Steven Rostedte1e232c2014-02-10 23:38:46 -05007698 if (trace_boot_clock) {
7699 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7700 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007701 pr_warn("Trace clock %s not defined, going back to default\n",
7702 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05007703 }
7704
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007705 /*
7706 * register_tracer() might reference current_trace, so it
7707 * needs to be set before we register anything. This is
7708 * just a bootstrap of current_trace anyway.
7709 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007710 global_trace.current_trace = &nop_trace;
7711
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007712 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7713
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007714 ftrace_init_global_array_ops(&global_trace);
7715
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007716 init_trace_flags_index(&global_trace);
7717
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007718 register_tracer(&nop_trace);
7719
Steven Rostedt60a11772008-05-12 21:20:44 +02007720 /* All seems OK, enable tracing */
7721 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007722
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007723 atomic_notifier_chain_register(&panic_notifier_list,
7724 &trace_panic_notifier);
7725
7726 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007727
Steven Rostedtae63b312012-05-03 23:09:03 -04007728 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7729
7730 INIT_LIST_HEAD(&global_trace.systems);
7731 INIT_LIST_HEAD(&global_trace.events);
7732 list_add(&global_trace.list, &ftrace_trace_arrays);
7733
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007734 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007735
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007736 register_snapshot_cmd();
7737
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007738 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007739
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007740out_free_savedcmd:
7741 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007742out_free_temp_buffer:
7743 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307744out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007745 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307746out_free_buffer_mask:
7747 free_cpumask_var(tracing_buffer_mask);
7748out:
7749 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007750}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007751
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007752void __init trace_init(void)
7753{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007754 if (tracepoint_printk) {
7755 tracepoint_print_iter =
7756 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7757 if (WARN_ON(!tracepoint_print_iter))
7758 tracepoint_printk = 0;
7759 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007760 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007761 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007762}
7763
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007764__init static int clear_boot_tracer(void)
7765{
7766 /*
7767 * The default tracer at boot buffer is an init section.
7768 * This function is called in lateinit. If we did not
7769 * find the boot tracer, then clear it out, to prevent
7770 * later registration from accessing the buffer that is
7771 * about to be freed.
7772 */
7773 if (!default_bootup_tracer)
7774 return 0;
7775
7776 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7777 default_bootup_tracer);
7778 default_bootup_tracer = NULL;
7779
7780 return 0;
7781}
7782
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007783fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)3170d9a2017-08-01 12:01:52 -04007784late_initcall_sync(clear_boot_tracer);