blob: 4550afda9607eb372e2c1c4e5923eba989cbf471 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
17#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020018#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/hardirq.h>
20#include <linux/linkage.h>
21#include <linux/uaccess.h>
22#include <linux/ftrace.h>
23#include <linux/module.h>
24#include <linux/percpu.h>
25#include <linux/ctype.h>
26#include <linux/init.h>
27#include <linux/gfp.h>
28#include <linux/fs.h>
29
30#include "trace.h"
31
32unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33unsigned long __read_mostly tracing_thresh;
34
Steven Rostedt60a11772008-05-12 21:20:44 +020035static int tracing_disabled = 1;
36
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020037static long notrace
38ns2usecs(cycle_t nsec)
39{
40 nsec += 500;
41 do_div(nsec, 1000);
42 return nsec;
43}
44
45static atomic_t tracer_counter;
46static struct trace_array global_trace;
47
48static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
49
50static struct trace_array max_tr;
51
52static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
53
54static int tracer_enabled;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020055static unsigned long trace_nr_entries = 16384UL;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020056
57static struct tracer *trace_types __read_mostly;
58static struct tracer *current_trace __read_mostly;
59static int max_tracer_type_len;
60
61static DEFINE_MUTEX(trace_types_lock);
62
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020063#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
64
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020065static int __init set_nr_entries(char *str)
66{
67 if (!str)
68 return 0;
69 trace_nr_entries = simple_strtoul(str, &str, 0);
70 return 1;
71}
72__setup("trace_entries=", set_nr_entries);
73
Steven Rostedt57f50be2008-05-12 21:20:44 +020074unsigned long nsecs_to_usecs(unsigned long nsecs)
75{
76 return nsecs / 1000;
77}
78
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020079enum trace_type {
80 __TRACE_FIRST_TYPE = 0,
81
82 TRACE_FN,
83 TRACE_CTX,
84
85 __TRACE_LAST_TYPE
86};
87
88enum trace_flag_type {
89 TRACE_FLAG_IRQS_OFF = 0x01,
90 TRACE_FLAG_NEED_RESCHED = 0x02,
91 TRACE_FLAG_HARDIRQ = 0x04,
92 TRACE_FLAG_SOFTIRQ = 0x08,
93};
94
95enum trace_iterator_flags {
96 TRACE_ITER_PRINT_PARENT = 0x01,
97 TRACE_ITER_SYM_OFFSET = 0x02,
98 TRACE_ITER_SYM_ADDR = 0x04,
99 TRACE_ITER_VERBOSE = 0x08,
100};
101
102#define TRACE_ITER_SYM_MASK \
103 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
104
105/* These must match the bit postions above */
106static const char *trace_options[] = {
107 "print-parent",
108 "sym-offset",
109 "sym-addr",
110 "verbose",
111 NULL
112};
113
114static unsigned trace_flags;
115
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200116static DEFINE_SPINLOCK(ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200117
118/*
119 * Copy the new maximum trace into the separate maximum-trace
120 * structure. (this way the maximum trace is permanently saved,
121 * for later retrieval via /debugfs/tracing/latency_trace)
122 */
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200123static notrace void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200124__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
125{
126 struct trace_array_cpu *data = tr->data[cpu];
127
128 max_tr.cpu = cpu;
129 max_tr.time_start = data->preempt_timestamp;
130
131 data = max_tr.data[cpu];
132 data->saved_latency = tracing_max_latency;
133
134 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
135 data->pid = tsk->pid;
136 data->uid = tsk->uid;
137 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
138 data->policy = tsk->policy;
139 data->rt_priority = tsk->rt_priority;
140
141 /* record this tasks comm */
142 tracing_record_cmdline(current);
143}
144
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200145void check_pages(struct trace_array_cpu *data)
146{
147 struct page *page, *tmp;
148
149 BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
150 BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
151
152 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
153 BUG_ON(page->lru.next->prev != &page->lru);
154 BUG_ON(page->lru.prev->next != &page->lru);
155 }
156}
157
158void *head_page(struct trace_array_cpu *data)
159{
160 struct page *page;
161
162 check_pages(data);
163 if (list_empty(&data->trace_pages))
164 return NULL;
165
166 page = list_entry(data->trace_pages.next, struct page, lru);
167 BUG_ON(&page->lru == &data->trace_pages);
168
169 return page_address(page);
170}
171
Steven Rostedt214023c2008-05-12 21:20:46 +0200172static notrace int
173trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
174{
175 int len = (PAGE_SIZE - 1) - s->len;
176 va_list ap;
Steven Rostedtb3806b42008-05-12 21:20:46 +0200177 int ret;
Steven Rostedt214023c2008-05-12 21:20:46 +0200178
179 if (!len)
180 return 0;
181
182 va_start(ap, fmt);
Steven Rostedtb3806b42008-05-12 21:20:46 +0200183 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
Steven Rostedt214023c2008-05-12 21:20:46 +0200184 va_end(ap);
185
Steven Rostedtb3806b42008-05-12 21:20:46 +0200186 /* If we can't write it all, don't bother writing anything */
187 if (ret > len)
188 return 0;
189
190 s->len += ret;
Steven Rostedt214023c2008-05-12 21:20:46 +0200191
192 return len;
193}
194
195static notrace int
196trace_seq_puts(struct trace_seq *s, const char *str)
197{
198 int len = strlen(str);
199
200 if (len > ((PAGE_SIZE - 1) - s->len))
Steven Rostedtb3806b42008-05-12 21:20:46 +0200201 return 0;
Steven Rostedt214023c2008-05-12 21:20:46 +0200202
203 memcpy(s->buffer + s->len, str, len);
204 s->len += len;
205
206 return len;
207}
208
209static notrace int
210trace_seq_putc(struct trace_seq *s, unsigned char c)
211{
212 if (s->len >= (PAGE_SIZE - 1))
213 return 0;
214
215 s->buffer[s->len++] = c;
216
217 return 1;
218}
219
220static notrace void
221trace_seq_reset(struct trace_seq *s)
222{
223 s->len = 0;
224}
225
226static notrace void
227trace_print_seq(struct seq_file *m, struct trace_seq *s)
228{
229 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
230
231 s->buffer[len] = 0;
232 seq_puts(m, s->buffer);
233
234 trace_seq_reset(s);
235}
236
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200237notrace static void
238flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
239{
240 struct list_head flip_pages;
241
242 INIT_LIST_HEAD(&flip_pages);
243
Steven Rostedt93a588f2008-05-12 21:20:45 +0200244 memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200245 sizeof(struct trace_array_cpu) -
Steven Rostedt93a588f2008-05-12 21:20:45 +0200246 offsetof(struct trace_array_cpu, trace_head_idx));
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200247
248 check_pages(tr1);
249 check_pages(tr2);
250 list_splice_init(&tr1->trace_pages, &flip_pages);
251 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
252 list_splice_init(&flip_pages, &tr2->trace_pages);
253 BUG_ON(!list_empty(&flip_pages));
254 check_pages(tr1);
255 check_pages(tr2);
256}
257
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200258notrace void
259update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
260{
261 struct trace_array_cpu *data;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200262 int i;
263
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200264 WARN_ON_ONCE(!irqs_disabled());
265 spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200266 /* clear out all the previous traces */
267 for_each_possible_cpu(i) {
268 data = tr->data[i];
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200269 flip_trace(max_tr.data[i], data);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200270 tracing_reset(data);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200271 }
272
273 __update_max_tr(tr, tsk, cpu);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200274 spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200275}
276
277/**
278 * update_max_tr_single - only copy one trace over, and reset the rest
279 * @tr - tracer
280 * @tsk - task with the latency
281 * @cpu - the cpu of the buffer to copy.
282 */
283notrace void
284update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
285{
286 struct trace_array_cpu *data = tr->data[cpu];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200287 int i;
288
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200289 WARN_ON_ONCE(!irqs_disabled());
290 spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200291 for_each_possible_cpu(i)
292 tracing_reset(max_tr.data[i]);
293
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200294 flip_trace(max_tr.data[cpu], data);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200295 tracing_reset(data);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200296
297 __update_max_tr(tr, tsk, cpu);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200298 spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200299}
300
301int register_tracer(struct tracer *type)
302{
303 struct tracer *t;
304 int len;
305 int ret = 0;
306
307 if (!type->name) {
308 pr_info("Tracer must have a name\n");
309 return -1;
310 }
311
312 mutex_lock(&trace_types_lock);
313 for (t = trace_types; t; t = t->next) {
314 if (strcmp(type->name, t->name) == 0) {
315 /* already found */
316 pr_info("Trace %s already registered\n",
317 type->name);
318 ret = -1;
319 goto out;
320 }
321 }
322
Steven Rostedt60a11772008-05-12 21:20:44 +0200323#ifdef CONFIG_FTRACE_STARTUP_TEST
324 if (type->selftest) {
325 struct tracer *saved_tracer = current_trace;
326 struct trace_array_cpu *data;
327 struct trace_array *tr = &global_trace;
328 int saved_ctrl = tr->ctrl;
329 int i;
330 /*
331 * Run a selftest on this tracer.
332 * Here we reset the trace buffer, and set the current
333 * tracer to be this tracer. The tracer can then run some
334 * internal tracing to verify that everything is in order.
335 * If we fail, we do not register this tracer.
336 */
337 for_each_possible_cpu(i) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200338 data = tr->data[i];
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200339 if (!head_page(data))
340 continue;
Steven Rostedt60a11772008-05-12 21:20:44 +0200341 tracing_reset(data);
342 }
343 current_trace = type;
344 tr->ctrl = 0;
345 /* the test is responsible for initializing and enabling */
346 pr_info("Testing tracer %s: ", type->name);
347 ret = type->selftest(type, tr);
348 /* the test is responsible for resetting too */
349 current_trace = saved_tracer;
350 tr->ctrl = saved_ctrl;
351 if (ret) {
352 printk(KERN_CONT "FAILED!\n");
353 goto out;
354 }
Steven Rostedt1d4db002008-05-12 21:20:45 +0200355 /* Only reset on passing, to avoid touching corrupted buffers */
356 for_each_possible_cpu(i) {
357 data = tr->data[i];
358 if (!head_page(data))
359 continue;
360 tracing_reset(data);
361 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200362 printk(KERN_CONT "PASSED\n");
363 }
364#endif
365
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200366 type->next = trace_types;
367 trace_types = type;
368 len = strlen(type->name);
369 if (len > max_tracer_type_len)
370 max_tracer_type_len = len;
Steven Rostedt60a11772008-05-12 21:20:44 +0200371
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200372 out:
373 mutex_unlock(&trace_types_lock);
374
375 return ret;
376}
377
378void unregister_tracer(struct tracer *type)
379{
380 struct tracer **t;
381 int len;
382
383 mutex_lock(&trace_types_lock);
384 for (t = &trace_types; *t; t = &(*t)->next) {
385 if (*t == type)
386 goto found;
387 }
388 pr_info("Trace %s not registered\n", type->name);
389 goto out;
390
391 found:
392 *t = (*t)->next;
393 if (strlen(type->name) != max_tracer_type_len)
394 goto out;
395
396 max_tracer_type_len = 0;
397 for (t = &trace_types; *t; t = &(*t)->next) {
398 len = strlen((*t)->name);
399 if (len > max_tracer_type_len)
400 max_tracer_type_len = len;
401 }
402 out:
403 mutex_unlock(&trace_types_lock);
404}
405
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200406notrace void tracing_reset(struct trace_array_cpu *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200407{
408 data->trace_idx = 0;
Steven Rostedt93a588f2008-05-12 21:20:45 +0200409 data->trace_head = data->trace_tail = head_page(data);
410 data->trace_head_idx = 0;
411 data->trace_tail_idx = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200412}
413
414#ifdef CONFIG_FTRACE
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200415static notrace void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200416function_trace_call(unsigned long ip, unsigned long parent_ip)
417{
418 struct trace_array *tr = &global_trace;
419 struct trace_array_cpu *data;
420 unsigned long flags;
421 long disabled;
422 int cpu;
423
424 if (unlikely(!tracer_enabled))
425 return;
426
Steven Rostedt18cef372008-05-12 21:20:44 +0200427 local_irq_save(flags);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200428 cpu = raw_smp_processor_id();
429 data = tr->data[cpu];
430 disabled = atomic_inc_return(&data->disabled);
431
432 if (likely(disabled == 1))
433 ftrace(tr, data, ip, parent_ip, flags);
434
435 atomic_dec(&data->disabled);
Steven Rostedt18cef372008-05-12 21:20:44 +0200436 local_irq_restore(flags);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200437}
438
439static struct ftrace_ops trace_ops __read_mostly =
440{
441 .func = function_trace_call,
442};
443#endif
444
445notrace void tracing_start_function_trace(void)
446{
447 register_ftrace_function(&trace_ops);
448}
449
450notrace void tracing_stop_function_trace(void)
451{
452 unregister_ftrace_function(&trace_ops);
453}
454
455#define SAVED_CMDLINES 128
456static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
457static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
458static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
459static int cmdline_idx;
460static DEFINE_SPINLOCK(trace_cmdline_lock);
461atomic_t trace_record_cmdline_disabled;
462
463static void trace_init_cmdlines(void)
464{
465 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
466 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
467 cmdline_idx = 0;
468}
469
470notrace void trace_stop_cmdline_recording(void);
471
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200472static notrace void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200473{
474 unsigned map;
475 unsigned idx;
476
477 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
478 return;
479
480 /*
481 * It's not the end of the world if we don't get
482 * the lock, but we also don't want to spin
483 * nor do we want to disable interrupts,
484 * so if we miss here, then better luck next time.
485 */
486 if (!spin_trylock(&trace_cmdline_lock))
487 return;
488
489 idx = map_pid_to_cmdline[tsk->pid];
490 if (idx >= SAVED_CMDLINES) {
491 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
492
493 map = map_cmdline_to_pid[idx];
494 if (map <= PID_MAX_DEFAULT)
495 map_pid_to_cmdline[map] = (unsigned)-1;
496
497 map_pid_to_cmdline[tsk->pid] = idx;
498
499 cmdline_idx = idx;
500 }
501
502 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
503
504 spin_unlock(&trace_cmdline_lock);
505}
506
507static notrace char *trace_find_cmdline(int pid)
508{
509 char *cmdline = "<...>";
510 unsigned map;
511
512 if (!pid)
513 return "<idle>";
514
515 if (pid > PID_MAX_DEFAULT)
516 goto out;
517
518 map = map_pid_to_cmdline[pid];
519 if (map >= SAVED_CMDLINES)
520 goto out;
521
522 cmdline = saved_cmdlines[map];
523
524 out:
525 return cmdline;
526}
527
528notrace void tracing_record_cmdline(struct task_struct *tsk)
529{
530 if (atomic_read(&trace_record_cmdline_disabled))
531 return;
532
533 trace_save_cmdline(tsk);
534}
535
Steven Rostedt93a588f2008-05-12 21:20:45 +0200536static inline notrace struct list_head *
537trace_next_list(struct trace_array_cpu *data, struct list_head *next)
538{
539 /*
540 * Roundrobin - but skip the head (which is not a real page):
541 */
542 next = next->next;
543 if (unlikely(next == &data->trace_pages))
544 next = next->next;
545 BUG_ON(next == &data->trace_pages);
546
547 return next;
548}
549
550static inline notrace void *
551trace_next_page(struct trace_array_cpu *data, void *addr)
552{
553 struct list_head *next;
554 struct page *page;
555
556 page = virt_to_page(addr);
557
558 next = trace_next_list(data, &page->lru);
559 page = list_entry(next, struct page, lru);
560
561 return page_address(page);
562}
563
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200564static inline notrace struct trace_entry *
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200565tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200566{
567 unsigned long idx, idx_next;
568 struct trace_entry *entry;
569
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200570 data->trace_idx++;
Steven Rostedt93a588f2008-05-12 21:20:45 +0200571 idx = data->trace_head_idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200572 idx_next = idx + 1;
573
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200574 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
575
Steven Rostedt93a588f2008-05-12 21:20:45 +0200576 entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200577
578 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
Steven Rostedt93a588f2008-05-12 21:20:45 +0200579 data->trace_head = trace_next_page(data, data->trace_head);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200580 idx_next = 0;
581 }
582
Steven Rostedt93a588f2008-05-12 21:20:45 +0200583 if (data->trace_head == data->trace_tail &&
584 idx_next == data->trace_tail_idx) {
585 /* overrun */
586 data->trace_tail_idx++;
587 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
588 data->trace_tail =
589 trace_next_page(data, data->trace_tail);
590 data->trace_tail_idx = 0;
591 }
592 }
593
594 data->trace_head_idx = idx_next;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200595
596 return entry;
597}
598
599static inline notrace void
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200600tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200601{
602 struct task_struct *tsk = current;
603 unsigned long pc;
604
605 pc = preempt_count();
606
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200607 entry->idx = atomic_inc_return(&tracer_counter);
608 entry->preempt_count = pc & 0xff;
609 entry->pid = tsk->pid;
610 entry->t = now(raw_smp_processor_id());
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200611 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
612 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
613 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
614 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
615}
616
617notrace void
618ftrace(struct trace_array *tr, struct trace_array_cpu *data,
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200619 unsigned long ip, unsigned long parent_ip, unsigned long flags)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620{
621 struct trace_entry *entry;
622
Steven Rostedtb3806b42008-05-12 21:20:46 +0200623 spin_lock(&data->lock);
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200624 entry = tracing_get_trace_entry(tr, data);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200625 tracing_generic_entry_update(entry, flags);
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200626 entry->type = TRACE_FN;
627 entry->fn.ip = ip;
628 entry->fn.parent_ip = parent_ip;
Steven Rostedtb3806b42008-05-12 21:20:46 +0200629 spin_unlock(&data->lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200630}
631
632notrace void
633tracing_sched_switch_trace(struct trace_array *tr,
634 struct trace_array_cpu *data,
635 struct task_struct *prev, struct task_struct *next,
636 unsigned long flags)
637{
638 struct trace_entry *entry;
639
Steven Rostedtb3806b42008-05-12 21:20:46 +0200640 spin_lock(&data->lock);
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200641 entry = tracing_get_trace_entry(tr, data);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200642 tracing_generic_entry_update(entry, flags);
643 entry->type = TRACE_CTX;
644 entry->ctx.prev_pid = prev->pid;
645 entry->ctx.prev_prio = prev->prio;
646 entry->ctx.prev_state = prev->state;
647 entry->ctx.next_pid = next->pid;
648 entry->ctx.next_prio = next->prio;
Steven Rostedtb3806b42008-05-12 21:20:46 +0200649 spin_unlock(&data->lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200650}
651
652enum trace_file_type {
653 TRACE_FILE_LAT_FMT = 1,
654};
655
656static struct trace_entry *
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200657trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
658 struct trace_iterator *iter, int cpu)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200659{
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200660 struct page *page;
661 struct trace_entry *array;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200662
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200663 if (iter->next_idx[cpu] >= tr->entries ||
Steven Rostedtb3806b42008-05-12 21:20:46 +0200664 iter->next_idx[cpu] >= data->trace_idx ||
665 (data->trace_head == data->trace_tail &&
666 data->trace_head_idx == data->trace_tail_idx))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200667 return NULL;
668
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200669 if (!iter->next_page[cpu]) {
Steven Rostedt93a588f2008-05-12 21:20:45 +0200670 /* Initialize the iterator for this cpu trace buffer */
671 WARN_ON(!data->trace_tail);
672 page = virt_to_page(data->trace_tail);
673 iter->next_page[cpu] = &page->lru;
674 iter->next_page_idx[cpu] = data->trace_tail_idx;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200675 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200676
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200677 page = list_entry(iter->next_page[cpu], struct page, lru);
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200678 BUG_ON(&data->trace_pages == &page->lru);
679
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200680 array = page_address(page);
681
Steven Rostedt93a588f2008-05-12 21:20:45 +0200682 /* Still possible to catch up to the tail */
683 if (iter->next_idx[cpu] && array == data->trace_tail &&
684 iter->next_page_idx[cpu] == data->trace_tail_idx)
685 return NULL;
686
687 WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200688 return &array[iter->next_page_idx[cpu]];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200689}
690
691static struct notrace trace_entry *
692find_next_entry(struct trace_iterator *iter, int *ent_cpu)
693{
694 struct trace_array *tr = iter->tr;
695 struct trace_entry *ent, *next = NULL;
696 int next_cpu = -1;
697 int cpu;
698
699 for_each_possible_cpu(cpu) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200700 if (!head_page(tr->data[cpu]))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200701 continue;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200702 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200703 if (ent &&
704 (!next || (long)(next->idx - ent->idx) > 0)) {
705 next = ent;
706 next_cpu = cpu;
707 }
708 }
709
710 if (ent_cpu)
711 *ent_cpu = next_cpu;
712
713 return next;
714}
715
Steven Rostedtb3806b42008-05-12 21:20:46 +0200716static notrace void
717trace_iterator_increment(struct trace_iterator *iter)
718{
719 iter->idx++;
720 iter->next_idx[iter->cpu]++;
721 iter->next_page_idx[iter->cpu]++;
722 if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
723 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
724
725 iter->next_page_idx[iter->cpu] = 0;
726 iter->next_page[iter->cpu] =
727 trace_next_list(data, iter->next_page[iter->cpu]);
728 }
729}
730
731static notrace void
732trace_consume(struct trace_iterator *iter)
733{
734 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
735
736 data->trace_tail_idx++;
737 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
738 data->trace_tail = trace_next_page(data, data->trace_tail);
739 data->trace_tail_idx = 0;
740 }
741
742 /* Check if we empty it, then reset the index */
743 if (data->trace_head == data->trace_tail &&
744 data->trace_head_idx == data->trace_tail_idx)
745 data->trace_idx = 0;
746
747 trace_iterator_increment(iter);
748}
749
750static notrace void *
751find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752{
753 struct trace_entry *next;
754 int next_cpu = -1;
755
756 next = find_next_entry(iter, &next_cpu);
757
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200758 iter->prev_ent = iter->ent;
759 iter->prev_cpu = iter->cpu;
760
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200761 iter->ent = next;
762 iter->cpu = next_cpu;
763
Steven Rostedtb3806b42008-05-12 21:20:46 +0200764 if (next)
765 trace_iterator_increment(iter);
766
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200767 return next ? iter : NULL;
768}
769
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200770static notrace void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200771{
772 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200773 void *last_ent = iter->ent;
774 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200775 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200776
777 (*pos)++;
778
779 /* can't go backwards */
780 if (iter->idx > i)
781 return NULL;
782
783 if (iter->idx < 0)
784 ent = find_next_entry_inc(iter);
785 else
786 ent = iter;
787
788 while (ent && iter->idx < i)
789 ent = find_next_entry_inc(iter);
790
791 iter->pos = *pos;
792
793 if (last_ent && !ent)
794 seq_puts(m, "\n\nvim:ft=help\n");
795
796 return ent;
797}
798
799static void *s_start(struct seq_file *m, loff_t *pos)
800{
801 struct trace_iterator *iter = m->private;
802 void *p = NULL;
803 loff_t l = 0;
804 int i;
805
806 mutex_lock(&trace_types_lock);
807
808 if (!current_trace || current_trace != iter->trace)
809 return NULL;
810
811 atomic_inc(&trace_record_cmdline_disabled);
812
813 /* let the tracer grab locks here if needed */
814 if (current_trace->start)
815 current_trace->start(iter);
816
817 if (*pos != iter->pos) {
818 iter->ent = NULL;
819 iter->cpu = 0;
820 iter->idx = -1;
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200821 iter->prev_ent = NULL;
822 iter->prev_cpu = -1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200823
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200824 for_each_possible_cpu(i) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200825 iter->next_idx[i] = 0;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200826 iter->next_page[i] = NULL;
827 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200828
829 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
830 ;
831
832 } else {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200833 l = *pos - 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200834 p = s_next(m, p, &l);
835 }
836
837 return p;
838}
839
840static void s_stop(struct seq_file *m, void *p)
841{
842 struct trace_iterator *iter = m->private;
843
844 atomic_dec(&trace_record_cmdline_disabled);
845
846 /* let the tracer release locks here if needed */
847 if (current_trace && current_trace == iter->trace && iter->trace->stop)
848 iter->trace->stop(iter);
849
850 mutex_unlock(&trace_types_lock);
851}
852
Steven Rostedtb3806b42008-05-12 21:20:46 +0200853static int
Steven Rostedt214023c2008-05-12 21:20:46 +0200854seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200855{
856#ifdef CONFIG_KALLSYMS
857 char str[KSYM_SYMBOL_LEN];
858
859 kallsyms_lookup(address, NULL, NULL, NULL, str);
860
Steven Rostedtb3806b42008-05-12 21:20:46 +0200861 return trace_seq_printf(s, fmt, str);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200862#endif
Steven Rostedtb3806b42008-05-12 21:20:46 +0200863 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200864}
865
Steven Rostedtb3806b42008-05-12 21:20:46 +0200866static int
Steven Rostedt214023c2008-05-12 21:20:46 +0200867seq_print_sym_offset(struct trace_seq *s, const char *fmt,
868 unsigned long address)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200869{
870#ifdef CONFIG_KALLSYMS
871 char str[KSYM_SYMBOL_LEN];
872
873 sprint_symbol(str, address);
Steven Rostedtb3806b42008-05-12 21:20:46 +0200874 return trace_seq_printf(s, fmt, str);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200875#endif
Steven Rostedtb3806b42008-05-12 21:20:46 +0200876 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200877}
878
879#ifndef CONFIG_64BIT
880# define IP_FMT "%08lx"
881#else
882# define IP_FMT "%016lx"
883#endif
884
Steven Rostedtb3806b42008-05-12 21:20:46 +0200885static notrace int
Steven Rostedt214023c2008-05-12 21:20:46 +0200886seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200887{
Steven Rostedtb3806b42008-05-12 21:20:46 +0200888 int ret;
889
890 if (!ip)
891 return trace_seq_printf(s, "0");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200892
893 if (sym_flags & TRACE_ITER_SYM_OFFSET)
Steven Rostedtb3806b42008-05-12 21:20:46 +0200894 ret = seq_print_sym_offset(s, "%s", ip);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200895 else
Steven Rostedtb3806b42008-05-12 21:20:46 +0200896 ret = seq_print_sym_short(s, "%s", ip);
897
898 if (!ret)
899 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200900
901 if (sym_flags & TRACE_ITER_SYM_ADDR)
Steven Rostedtb3806b42008-05-12 21:20:46 +0200902 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
903 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200904}
905
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200906static notrace void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200907{
908 seq_puts(m, "# _------=> CPU# \n");
909 seq_puts(m, "# / _-----=> irqs-off \n");
910 seq_puts(m, "# | / _----=> need-resched \n");
911 seq_puts(m, "# || / _---=> hardirq/softirq \n");
912 seq_puts(m, "# ||| / _--=> preempt-depth \n");
913 seq_puts(m, "# |||| / \n");
914 seq_puts(m, "# ||||| delay \n");
915 seq_puts(m, "# cmd pid ||||| time | caller \n");
916 seq_puts(m, "# \\ / ||||| \\ | / \n");
917}
918
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200919static notrace void print_func_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200920{
921 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
922 seq_puts(m, "# | | | | |\n");
923}
924
925
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200926static notrace void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200927print_trace_header(struct seq_file *m, struct trace_iterator *iter)
928{
929 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
930 struct trace_array *tr = iter->tr;
931 struct trace_array_cpu *data = tr->data[tr->cpu];
932 struct tracer *type = current_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200933 unsigned long total = 0;
934 unsigned long entries = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200935 int cpu;
936 const char *name = "preemption";
937
938 if (type)
939 name = type->name;
940
941 for_each_possible_cpu(cpu) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200942 if (head_page(tr->data[cpu])) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200943 total += tr->data[cpu]->trace_idx;
944 if (tr->data[cpu]->trace_idx > tr->entries)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200945 entries += tr->entries;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200946 else
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200947 entries += tr->data[cpu]->trace_idx;
948 }
949 }
950
951 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
952 name, UTS_RELEASE);
953 seq_puts(m, "-----------------------------------"
954 "---------------------------------\n");
955 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
956 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +0200957 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200958 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200959 total,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200960 tr->cpu,
961#if defined(CONFIG_PREEMPT_NONE)
962 "server",
963#elif defined(CONFIG_PREEMPT_VOLUNTARY)
964 "desktop",
965#elif defined(CONFIG_PREEMPT_DESKTOP)
966 "preempt",
967#else
968 "unknown",
969#endif
970 /* These are reserved for later use */
971 0, 0, 0, 0);
972#ifdef CONFIG_SMP
973 seq_printf(m, " #P:%d)\n", num_online_cpus());
974#else
975 seq_puts(m, ")\n");
976#endif
977 seq_puts(m, " -----------------\n");
978 seq_printf(m, " | task: %.16s-%d "
979 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
980 data->comm, data->pid, data->uid, data->nice,
981 data->policy, data->rt_priority);
982 seq_puts(m, " -----------------\n");
983
984 if (data->critical_start) {
985 seq_puts(m, " => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +0200986 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
987 trace_print_seq(m, &iter->seq);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200988 seq_puts(m, "\n => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +0200989 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
990 trace_print_seq(m, &iter->seq);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200991 seq_puts(m, "\n");
992 }
993
994 seq_puts(m, "\n");
995}
996
Ingo Molnar4e3c3332008-05-12 21:20:45 +0200997static notrace void
Steven Rostedt214023c2008-05-12 21:20:46 +0200998lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200999{
1000 int hardirq, softirq;
1001 char *comm;
1002
1003 comm = trace_find_cmdline(entry->pid);
1004
Steven Rostedt214023c2008-05-12 21:20:46 +02001005 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1006 trace_seq_printf(s, "%d", cpu);
1007 trace_seq_printf(s, "%c%c",
1008 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1009 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001010
1011 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1012 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1013 if (hardirq && softirq)
Steven Rostedt214023c2008-05-12 21:20:46 +02001014 trace_seq_putc(s, 'H');
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001015 else {
1016 if (hardirq)
Steven Rostedt214023c2008-05-12 21:20:46 +02001017 trace_seq_putc(s, 'h');
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001018 else {
1019 if (softirq)
Steven Rostedt214023c2008-05-12 21:20:46 +02001020 trace_seq_putc(s, 's');
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001021 else
Steven Rostedt214023c2008-05-12 21:20:46 +02001022 trace_seq_putc(s, '.');
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001023 }
1024 }
1025
1026 if (entry->preempt_count)
Steven Rostedt214023c2008-05-12 21:20:46 +02001027 trace_seq_printf(s, "%x", entry->preempt_count);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001028 else
Steven Rostedt214023c2008-05-12 21:20:46 +02001029 trace_seq_puts(s, ".");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001030}
1031
1032unsigned long preempt_mark_thresh = 100;
1033
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001034static notrace void
Steven Rostedt214023c2008-05-12 21:20:46 +02001035lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001036 unsigned long rel_usecs)
1037{
Steven Rostedt214023c2008-05-12 21:20:46 +02001038 trace_seq_printf(s, " %4lldus", abs_usecs);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001039 if (rel_usecs > preempt_mark_thresh)
Steven Rostedt214023c2008-05-12 21:20:46 +02001040 trace_seq_puts(s, "!: ");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041 else if (rel_usecs > 1)
Steven Rostedt214023c2008-05-12 21:20:46 +02001042 trace_seq_puts(s, "+: ");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001043 else
Steven Rostedt214023c2008-05-12 21:20:46 +02001044 trace_seq_puts(s, " : ");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045}
1046
1047static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1048
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001049static notrace void
Steven Rostedt214023c2008-05-12 21:20:46 +02001050print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001051{
Steven Rostedt214023c2008-05-12 21:20:46 +02001052 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001053 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1054 struct trace_entry *next_entry = find_next_entry(iter, NULL);
1055 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1056 struct trace_entry *entry = iter->ent;
1057 unsigned long abs_usecs;
1058 unsigned long rel_usecs;
1059 char *comm;
1060 int S;
1061
1062 if (!next_entry)
1063 next_entry = entry;
1064 rel_usecs = ns2usecs(next_entry->t - entry->t);
1065 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
1066
1067 if (verbose) {
1068 comm = trace_find_cmdline(entry->pid);
Steven Rostedt214023c2008-05-12 21:20:46 +02001069 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
1070 " %ld.%03ldms (+%ld.%03ldms): ",
1071 comm,
1072 entry->pid, cpu, entry->flags,
1073 entry->preempt_count, trace_idx,
1074 ns2usecs(entry->t),
1075 abs_usecs/1000,
1076 abs_usecs % 1000, rel_usecs/1000,
1077 rel_usecs % 1000);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001078 } else {
Steven Rostedt214023c2008-05-12 21:20:46 +02001079 lat_print_generic(s, entry, cpu);
1080 lat_print_timestamp(s, abs_usecs, rel_usecs);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001081 }
1082 switch (entry->type) {
1083 case TRACE_FN:
Steven Rostedt214023c2008-05-12 21:20:46 +02001084 seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1085 trace_seq_puts(s, " (");
1086 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
1087 trace_seq_puts(s, ")\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001088 break;
1089 case TRACE_CTX:
1090 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1091 state_to_char[entry->ctx.prev_state] : 'X';
1092 comm = trace_find_cmdline(entry->ctx.next_pid);
Steven Rostedt214023c2008-05-12 21:20:46 +02001093 trace_seq_printf(s, " %d:%d:%c --> %d:%d %s\n",
1094 entry->ctx.prev_pid,
1095 entry->ctx.prev_prio,
1096 S,
1097 entry->ctx.next_pid,
1098 entry->ctx.next_prio,
1099 comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001100 break;
Steven Rostedt89b2f972008-05-12 21:20:44 +02001101 default:
Steven Rostedt214023c2008-05-12 21:20:46 +02001102 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001103 }
1104}
1105
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001106static notrace void sync_time_offset(struct trace_iterator *iter)
1107{
1108 struct trace_array_cpu *prev_array, *array;
1109 struct trace_entry *prev_entry, *entry;
1110 cycle_t prev_t, t;
1111
1112 entry = iter->ent;
1113 prev_entry = iter->prev_ent;
1114 if (!prev_entry)
1115 return;
1116
1117 prev_array = iter->tr->data[iter->prev_cpu];
1118 array = iter->tr->data[iter->cpu];
1119
1120 prev_t = prev_entry->t + prev_array->time_offset;
1121 t = entry->t + array->time_offset;
1122
1123 /*
1124 * If time goes backwards we increase the offset of
1125 * the current array, to not have observable time warps.
1126 * This will quickly synchronize the time offsets of
1127 * multiple CPUs:
1128 */
1129 if (t < prev_t)
1130 array->time_offset += prev_t - t;
1131}
1132
Steven Rostedtb3806b42008-05-12 21:20:46 +02001133static notrace int
Steven Rostedt214023c2008-05-12 21:20:46 +02001134print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001135{
Steven Rostedt214023c2008-05-12 21:20:46 +02001136 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001137 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001138 struct trace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001139 unsigned long usec_rem;
1140 unsigned long long t;
1141 unsigned long secs;
1142 char *comm;
1143 int S;
Steven Rostedtb3806b42008-05-12 21:20:46 +02001144 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001145
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001146 sync_time_offset(iter);
1147 entry = iter->ent;
1148
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001149 comm = trace_find_cmdline(iter->ent->pid);
1150
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001151 t = ns2usecs(entry->t + iter->tr->data[iter->cpu]->time_offset);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001152 usec_rem = do_div(t, 1000000ULL);
1153 secs = (unsigned long)t;
1154
Steven Rostedtb3806b42008-05-12 21:20:46 +02001155 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1156 if (!ret)
1157 return 0;
1158 ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
1159 if (!ret)
1160 return 0;
1161 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1162 if (!ret)
1163 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001164
1165 switch (entry->type) {
1166 case TRACE_FN:
Steven Rostedtb3806b42008-05-12 21:20:46 +02001167 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1168 if (!ret)
1169 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001170 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1171 entry->fn.parent_ip) {
Steven Rostedtb3806b42008-05-12 21:20:46 +02001172 ret = trace_seq_printf(s, " <-");
1173 if (!ret)
1174 return 0;
1175 ret = seq_print_ip_sym(s, entry->fn.parent_ip,
1176 sym_flags);
1177 if (!ret)
1178 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001179 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02001180 ret = trace_seq_printf(s, "\n");
1181 if (!ret)
1182 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001183 break;
1184 case TRACE_CTX:
1185 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1186 state_to_char[entry->ctx.prev_state] : 'X';
Steven Rostedtb3806b42008-05-12 21:20:46 +02001187 ret = trace_seq_printf(s, " %d:%d:%c ==> %d:%d\n",
1188 entry->ctx.prev_pid,
1189 entry->ctx.prev_prio,
1190 S,
1191 entry->ctx.next_pid,
1192 entry->ctx.next_prio);
1193 if (!ret)
1194 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001195 break;
1196 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02001197 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001198}
1199
1200static int trace_empty(struct trace_iterator *iter)
1201{
1202 struct trace_array_cpu *data;
1203 int cpu;
1204
1205 for_each_possible_cpu(cpu) {
1206 data = iter->tr->data[cpu];
1207
Steven Rostedtb3806b42008-05-12 21:20:46 +02001208 if (head_page(data) && data->trace_idx &&
1209 (data->trace_tail != data->trace_head ||
1210 data->trace_tail_idx != data->trace_head_idx))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001211 return 0;
1212 }
1213 return 1;
1214}
1215
1216static int s_show(struct seq_file *m, void *v)
1217{
1218 struct trace_iterator *iter = v;
1219
1220 if (iter->ent == NULL) {
1221 if (iter->tr) {
1222 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1223 seq_puts(m, "#\n");
1224 }
1225 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1226 /* print nothing if the buffers are empty */
1227 if (trace_empty(iter))
1228 return 0;
1229 print_trace_header(m, iter);
1230 if (!(trace_flags & TRACE_ITER_VERBOSE))
1231 print_lat_help_header(m);
1232 } else {
1233 if (!(trace_flags & TRACE_ITER_VERBOSE))
1234 print_func_help_header(m);
1235 }
1236 } else {
1237 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
Steven Rostedt214023c2008-05-12 21:20:46 +02001238 print_lat_fmt(iter, iter->idx, iter->cpu);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001239 else
Steven Rostedt214023c2008-05-12 21:20:46 +02001240 print_trace_fmt(iter);
1241 trace_print_seq(m, &iter->seq);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001242 }
1243
1244 return 0;
1245}
1246
1247static struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001248 .start = s_start,
1249 .next = s_next,
1250 .stop = s_stop,
1251 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001252};
1253
1254static struct trace_iterator notrace *
1255__tracing_open(struct inode *inode, struct file *file, int *ret)
1256{
1257 struct trace_iterator *iter;
1258
Steven Rostedt60a11772008-05-12 21:20:44 +02001259 if (tracing_disabled) {
1260 *ret = -ENODEV;
1261 return NULL;
1262 }
1263
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001264 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1265 if (!iter) {
1266 *ret = -ENOMEM;
1267 goto out;
1268 }
1269
1270 mutex_lock(&trace_types_lock);
1271 if (current_trace && current_trace->print_max)
1272 iter->tr = &max_tr;
1273 else
1274 iter->tr = inode->i_private;
1275 iter->trace = current_trace;
1276 iter->pos = -1;
1277
1278 /* TODO stop tracer */
1279 *ret = seq_open(file, &tracer_seq_ops);
1280 if (!*ret) {
1281 struct seq_file *m = file->private_data;
1282 m->private = iter;
1283
1284 /* stop the trace while dumping */
1285 if (iter->tr->ctrl)
1286 tracer_enabled = 0;
1287
1288 if (iter->trace && iter->trace->open)
1289 iter->trace->open(iter);
1290 } else {
1291 kfree(iter);
1292 iter = NULL;
1293 }
1294 mutex_unlock(&trace_types_lock);
1295
1296 out:
1297 return iter;
1298}
1299
1300int tracing_open_generic(struct inode *inode, struct file *filp)
1301{
Steven Rostedt60a11772008-05-12 21:20:44 +02001302 if (tracing_disabled)
1303 return -ENODEV;
1304
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001305 filp->private_data = inode->i_private;
1306 return 0;
1307}
1308
1309int tracing_release(struct inode *inode, struct file *file)
1310{
1311 struct seq_file *m = (struct seq_file *)file->private_data;
1312 struct trace_iterator *iter = m->private;
1313
1314 mutex_lock(&trace_types_lock);
1315 if (iter->trace && iter->trace->close)
1316 iter->trace->close(iter);
1317
1318 /* reenable tracing if it was previously enabled */
1319 if (iter->tr->ctrl)
1320 tracer_enabled = 1;
1321 mutex_unlock(&trace_types_lock);
1322
1323 seq_release(inode, file);
1324 kfree(iter);
1325 return 0;
1326}
1327
1328static int tracing_open(struct inode *inode, struct file *file)
1329{
1330 int ret;
1331
1332 __tracing_open(inode, file, &ret);
1333
1334 return ret;
1335}
1336
1337static int tracing_lt_open(struct inode *inode, struct file *file)
1338{
1339 struct trace_iterator *iter;
1340 int ret;
1341
1342 iter = __tracing_open(inode, file, &ret);
1343
1344 if (!ret)
1345 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1346
1347 return ret;
1348}
1349
1350
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001351static notrace void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001352t_next(struct seq_file *m, void *v, loff_t *pos)
1353{
1354 struct tracer *t = m->private;
1355
1356 (*pos)++;
1357
1358 if (t)
1359 t = t->next;
1360
1361 m->private = t;
1362
1363 return t;
1364}
1365
1366static void *t_start(struct seq_file *m, loff_t *pos)
1367{
1368 struct tracer *t = m->private;
1369 loff_t l = 0;
1370
1371 mutex_lock(&trace_types_lock);
1372 for (; t && l < *pos; t = t_next(m, t, &l))
1373 ;
1374
1375 return t;
1376}
1377
1378static void t_stop(struct seq_file *m, void *p)
1379{
1380 mutex_unlock(&trace_types_lock);
1381}
1382
1383static int t_show(struct seq_file *m, void *v)
1384{
1385 struct tracer *t = v;
1386
1387 if (!t)
1388 return 0;
1389
1390 seq_printf(m, "%s", t->name);
1391 if (t->next)
1392 seq_putc(m, ' ');
1393 else
1394 seq_putc(m, '\n');
1395
1396 return 0;
1397}
1398
1399static struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001400 .start = t_start,
1401 .next = t_next,
1402 .stop = t_stop,
1403 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001404};
1405
1406static int show_traces_open(struct inode *inode, struct file *file)
1407{
1408 int ret;
1409
Steven Rostedt60a11772008-05-12 21:20:44 +02001410 if (tracing_disabled)
1411 return -ENODEV;
1412
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001413 ret = seq_open(file, &show_traces_seq_ops);
1414 if (!ret) {
1415 struct seq_file *m = file->private_data;
1416 m->private = trace_types;
1417 }
1418
1419 return ret;
1420}
1421
1422static struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001423 .open = tracing_open,
1424 .read = seq_read,
1425 .llseek = seq_lseek,
1426 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001427};
1428
1429static struct file_operations tracing_lt_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001430 .open = tracing_lt_open,
1431 .read = seq_read,
1432 .llseek = seq_lseek,
1433 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001434};
1435
1436static struct file_operations show_traces_fops = {
1437 .open = show_traces_open,
1438 .read = seq_read,
1439 .release = seq_release,
1440};
1441
1442static ssize_t
1443tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1444 size_t cnt, loff_t *ppos)
1445{
1446 char *buf;
1447 int r = 0;
1448 int len = 0;
1449 int i;
1450
1451 /* calulate max size */
1452 for (i = 0; trace_options[i]; i++) {
1453 len += strlen(trace_options[i]);
1454 len += 3; /* "no" and space */
1455 }
1456
1457 /* +2 for \n and \0 */
1458 buf = kmalloc(len + 2, GFP_KERNEL);
1459 if (!buf)
1460 return -ENOMEM;
1461
1462 for (i = 0; trace_options[i]; i++) {
1463 if (trace_flags & (1 << i))
1464 r += sprintf(buf + r, "%s ", trace_options[i]);
1465 else
1466 r += sprintf(buf + r, "no%s ", trace_options[i]);
1467 }
1468
1469 r += sprintf(buf + r, "\n");
1470 WARN_ON(r >= len + 2);
1471
1472 r = simple_read_from_buffer(ubuf, cnt, ppos,
1473 buf, r);
1474
1475 kfree(buf);
1476
1477 return r;
1478}
1479
1480static ssize_t
1481tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1482 size_t cnt, loff_t *ppos)
1483{
1484 char buf[64];
1485 char *cmp = buf;
1486 int neg = 0;
1487 int i;
1488
1489 if (cnt > 63)
1490 cnt = 63;
1491
1492 if (copy_from_user(&buf, ubuf, cnt))
1493 return -EFAULT;
1494
1495 buf[cnt] = 0;
1496
1497 if (strncmp(buf, "no", 2) == 0) {
1498 neg = 1;
1499 cmp += 2;
1500 }
1501
1502 for (i = 0; trace_options[i]; i++) {
1503 int len = strlen(trace_options[i]);
1504
1505 if (strncmp(cmp, trace_options[i], len) == 0) {
1506 if (neg)
1507 trace_flags &= ~(1 << i);
1508 else
1509 trace_flags |= (1 << i);
1510 break;
1511 }
1512 }
1513
1514 filp->f_pos += cnt;
1515
1516 return cnt;
1517}
1518
1519static struct file_operations tracing_iter_fops = {
1520 .open = tracing_open_generic,
1521 .read = tracing_iter_ctrl_read,
1522 .write = tracing_iter_ctrl_write,
1523};
1524
Ingo Molnar7bd2f242008-05-12 21:20:45 +02001525static const char readme_msg[] =
1526 "tracing mini-HOWTO:\n\n"
1527 "# mkdir /debug\n"
1528 "# mount -t debugfs nodev /debug\n\n"
1529 "# cat /debug/tracing/available_tracers\n"
1530 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
1531 "# cat /debug/tracing/current_tracer\n"
1532 "none\n"
1533 "# echo sched_switch > /debug/tracing/current_tracer\n"
1534 "# cat /debug/tracing/current_tracer\n"
1535 "sched_switch\n"
1536 "# cat /debug/tracing/iter_ctrl\n"
1537 "noprint-parent nosym-offset nosym-addr noverbose\n"
1538 "# echo print-parent > /debug/tracing/iter_ctrl\n"
1539 "# echo 1 > /debug/tracing/tracing_enabled\n"
1540 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
1541 "echo 0 > /debug/tracing/tracing_enabled\n"
1542;
1543
1544static ssize_t
1545tracing_readme_read(struct file *filp, char __user *ubuf,
1546 size_t cnt, loff_t *ppos)
1547{
1548 return simple_read_from_buffer(ubuf, cnt, ppos,
1549 readme_msg, strlen(readme_msg));
1550}
1551
1552static struct file_operations tracing_readme_fops = {
1553 .open = tracing_open_generic,
1554 .read = tracing_readme_read,
1555};
1556
1557
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001558static ssize_t
1559tracing_ctrl_read(struct file *filp, char __user *ubuf,
1560 size_t cnt, loff_t *ppos)
1561{
1562 struct trace_array *tr = filp->private_data;
1563 char buf[64];
1564 int r;
1565
1566 r = sprintf(buf, "%ld\n", tr->ctrl);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001567 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001568}
1569
1570static ssize_t
1571tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1572 size_t cnt, loff_t *ppos)
1573{
1574 struct trace_array *tr = filp->private_data;
1575 long val;
1576 char buf[64];
1577
1578 if (cnt > 63)
1579 cnt = 63;
1580
1581 if (copy_from_user(&buf, ubuf, cnt))
1582 return -EFAULT;
1583
1584 buf[cnt] = 0;
1585
1586 val = simple_strtoul(buf, NULL, 10);
1587
1588 val = !!val;
1589
1590 mutex_lock(&trace_types_lock);
1591 if (tr->ctrl ^ val) {
1592 if (val)
1593 tracer_enabled = 1;
1594 else
1595 tracer_enabled = 0;
1596
1597 tr->ctrl = val;
1598
1599 if (current_trace && current_trace->ctrl_update)
1600 current_trace->ctrl_update(tr);
1601 }
1602 mutex_unlock(&trace_types_lock);
1603
1604 filp->f_pos += cnt;
1605
1606 return cnt;
1607}
1608
1609static ssize_t
1610tracing_set_trace_read(struct file *filp, char __user *ubuf,
1611 size_t cnt, loff_t *ppos)
1612{
1613 char buf[max_tracer_type_len+2];
1614 int r;
1615
1616 mutex_lock(&trace_types_lock);
1617 if (current_trace)
1618 r = sprintf(buf, "%s\n", current_trace->name);
1619 else
1620 r = sprintf(buf, "\n");
1621 mutex_unlock(&trace_types_lock);
1622
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001623 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001624}
1625
1626static ssize_t
1627tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1628 size_t cnt, loff_t *ppos)
1629{
1630 struct trace_array *tr = &global_trace;
1631 struct tracer *t;
1632 char buf[max_tracer_type_len+1];
1633 int i;
1634
1635 if (cnt > max_tracer_type_len)
1636 cnt = max_tracer_type_len;
1637
1638 if (copy_from_user(&buf, ubuf, cnt))
1639 return -EFAULT;
1640
1641 buf[cnt] = 0;
1642
1643 /* strip ending whitespace. */
1644 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1645 buf[i] = 0;
1646
1647 mutex_lock(&trace_types_lock);
1648 for (t = trace_types; t; t = t->next) {
1649 if (strcmp(t->name, buf) == 0)
1650 break;
1651 }
1652 if (!t || t == current_trace)
1653 goto out;
1654
1655 if (current_trace && current_trace->reset)
1656 current_trace->reset(tr);
1657
1658 current_trace = t;
1659 if (t->init)
1660 t->init(tr);
1661
1662 out:
1663 mutex_unlock(&trace_types_lock);
1664
1665 filp->f_pos += cnt;
1666
1667 return cnt;
1668}
1669
1670static ssize_t
1671tracing_max_lat_read(struct file *filp, char __user *ubuf,
1672 size_t cnt, loff_t *ppos)
1673{
1674 unsigned long *ptr = filp->private_data;
1675 char buf[64];
1676 int r;
1677
1678 r = snprintf(buf, 64, "%ld\n",
1679 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1680 if (r > 64)
1681 r = 64;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001682 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001683}
1684
1685static ssize_t
1686tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1687 size_t cnt, loff_t *ppos)
1688{
1689 long *ptr = filp->private_data;
1690 long val;
1691 char buf[64];
1692
1693 if (cnt > 63)
1694 cnt = 63;
1695
1696 if (copy_from_user(&buf, ubuf, cnt))
1697 return -EFAULT;
1698
1699 buf[cnt] = 0;
1700
1701 val = simple_strtoul(buf, NULL, 10);
1702
1703 *ptr = val * 1000;
1704
1705 return cnt;
1706}
1707
Steven Rostedtb3806b42008-05-12 21:20:46 +02001708static atomic_t tracing_reader;
1709
1710static int tracing_open_pipe(struct inode *inode, struct file *filp)
1711{
1712 struct trace_iterator *iter;
1713
1714 if (tracing_disabled)
1715 return -ENODEV;
1716
1717 /* We only allow for reader of the pipe */
1718 if (atomic_inc_return(&tracing_reader) != 1) {
1719 atomic_dec(&tracing_reader);
1720 return -EBUSY;
1721 }
1722
1723 /* create a buffer to store the information to pass to userspace */
1724 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1725 if (!iter)
1726 return -ENOMEM;
1727
1728 iter->tr = &global_trace;
1729
1730 filp->private_data = iter;
1731
1732 return 0;
1733}
1734
1735static int tracing_release_pipe(struct inode *inode, struct file *file)
1736{
1737 struct trace_iterator *iter = file->private_data;
1738
1739 kfree(iter);
1740 atomic_dec(&tracing_reader);
1741
1742 return 0;
1743}
1744
1745/*
1746 * Consumer reader.
1747 */
1748static ssize_t
1749tracing_read_pipe(struct file *filp, char __user *ubuf,
1750 size_t cnt, loff_t *ppos)
1751{
1752 struct trace_iterator *iter = filp->private_data;
1753 struct trace_array_cpu *data;
1754 static cpumask_t mask;
1755 struct trace_entry *entry;
1756 static int start;
1757 unsigned long flags;
1758 int read = 0;
1759 int cpu;
1760 int len;
1761 int ret;
1762
1763 /* return any leftover data */
1764 if (iter->seq.len > start) {
1765 len = iter->seq.len - start;
1766 if (cnt > len)
1767 cnt = len;
1768 ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt);
1769 if (ret)
1770 cnt = -EFAULT;
1771
1772 start += len;
1773
1774 return cnt;
1775 }
1776
1777 trace_seq_reset(&iter->seq);
1778 start = 0;
1779
1780 while (trace_empty(iter)) {
1781 /*
1782 * This is a make-shift waitqueue. The reason we don't use
1783 * an actual wait queue is because:
1784 * 1) we only ever have one waiter
1785 * 2) the tracing, traces all functions, we don't want
1786 * the overhead of calling wake_up and friends
1787 * (and tracing them too)
1788 * Anyway, this is really very primitive wakeup.
1789 */
1790 set_current_state(TASK_INTERRUPTIBLE);
1791 iter->tr->waiter = current;
1792
1793 /* sleep for one second, and try again. */
1794 schedule_timeout(HZ);
1795
1796 iter->tr->waiter = NULL;
1797
1798 if (signal_pending(current))
1799 return -EINTR;
1800
1801 /*
1802 * We block until we read something and tracing is disabled.
1803 * We still block if tracing is disabled, but we have never
1804 * read anything. This allows a user to cat this file, and
1805 * then enable tracing. But after we have read something,
1806 * we give an EOF when tracing is again disabled.
1807 *
1808 * iter->pos will be 0 if we haven't read anything.
1809 */
1810 if (!tracer_enabled && iter->pos)
1811 break;
1812
1813 continue;
1814 }
1815
1816 /* stop when tracing is finished */
1817 if (trace_empty(iter))
1818 return 0;
1819
1820 if (cnt >= PAGE_SIZE)
1821 cnt = PAGE_SIZE - 1;
1822
1823 memset(iter, 0, sizeof(*iter));
1824 iter->tr = &global_trace;
1825 iter->pos = -1;
1826
1827 /*
1828 * We need to stop all tracing on all CPUS to read the
1829 * the next buffer. This is a bit expensive, but is
1830 * not done often. We fill all what we can read,
1831 * and then release the locks again.
1832 */
1833
1834 cpus_clear(mask);
1835 local_irq_save(flags);
1836 for_each_possible_cpu(cpu) {
1837 data = iter->tr->data[cpu];
1838
1839 if (!head_page(data) || !data->trace_idx)
1840 continue;
1841
1842 atomic_inc(&data->disabled);
1843 spin_lock(&data->lock);
1844 cpu_set(cpu, mask);
1845 }
1846
1847 while ((entry = find_next_entry(iter, &cpu))) {
1848
1849 if (!entry)
1850 break;
1851
1852 iter->ent = entry;
1853 iter->cpu = cpu;
1854
1855 ret = print_trace_fmt(iter);
1856 if (!ret)
1857 break;
1858
1859 trace_consume(iter);
1860
1861 if (iter->seq.len >= cnt)
1862 break;
1863
1864 }
1865
Ingo Molnard4c5a2f2008-05-12 21:20:46 +02001866 for_each_cpu_mask(cpu, mask) {
Steven Rostedtb3806b42008-05-12 21:20:46 +02001867 data = iter->tr->data[cpu];
Steven Rostedtb3806b42008-05-12 21:20:46 +02001868 spin_unlock(&data->lock);
1869 atomic_dec(&data->disabled);
1870 }
1871 local_irq_restore(flags);
1872
1873 /* Now copy what we have to the user */
1874 read = iter->seq.len;
1875 if (read > cnt)
1876 read = cnt;
1877
1878 ret = copy_to_user(ubuf, iter->seq.buffer, read);
1879
1880 if (read < iter->seq.len)
1881 start = read;
1882 else
1883 trace_seq_reset(&iter->seq);
1884
1885 if (ret)
1886 read = -EFAULT;
1887
1888 return read;
1889}
1890
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001891static struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001892 .open = tracing_open_generic,
1893 .read = tracing_max_lat_read,
1894 .write = tracing_max_lat_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001895};
1896
1897static struct file_operations tracing_ctrl_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001898 .open = tracing_open_generic,
1899 .read = tracing_ctrl_read,
1900 .write = tracing_ctrl_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001901};
1902
1903static struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001904 .open = tracing_open_generic,
1905 .read = tracing_set_trace_read,
1906 .write = tracing_set_trace_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001907};
1908
Steven Rostedtb3806b42008-05-12 21:20:46 +02001909static struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001910 .open = tracing_open_pipe,
1911 .read = tracing_read_pipe,
1912 .release = tracing_release_pipe,
Steven Rostedtb3806b42008-05-12 21:20:46 +02001913};
1914
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001915#ifdef CONFIG_DYNAMIC_FTRACE
1916
1917static ssize_t
1918tracing_read_long(struct file *filp, char __user *ubuf,
1919 size_t cnt, loff_t *ppos)
1920{
1921 unsigned long *p = filp->private_data;
1922 char buf[64];
1923 int r;
1924
1925 r = sprintf(buf, "%ld\n", *p);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001926
1927 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001928}
1929
1930static struct file_operations tracing_read_long_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001931 .open = tracing_open_generic,
1932 .read = tracing_read_long,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001933};
1934#endif
1935
1936static struct dentry *d_tracer;
1937
1938struct dentry *tracing_init_dentry(void)
1939{
1940 static int once;
1941
1942 if (d_tracer)
1943 return d_tracer;
1944
1945 d_tracer = debugfs_create_dir("tracing", NULL);
1946
1947 if (!d_tracer && !once) {
1948 once = 1;
1949 pr_warning("Could not create debugfs directory 'tracing'\n");
1950 return NULL;
1951 }
1952
1953 return d_tracer;
1954}
1955
Steven Rostedt60a11772008-05-12 21:20:44 +02001956#ifdef CONFIG_FTRACE_SELFTEST
1957/* Let selftest have access to static functions in this file */
1958#include "trace_selftest.c"
1959#endif
1960
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001961static __init void tracer_init_debugfs(void)
1962{
1963 struct dentry *d_tracer;
1964 struct dentry *entry;
1965
1966 d_tracer = tracing_init_dentry();
1967
1968 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1969 &global_trace, &tracing_ctrl_fops);
1970 if (!entry)
1971 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1972
1973 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1974 NULL, &tracing_iter_fops);
1975 if (!entry)
1976 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1977
1978 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1979 &global_trace, &tracing_lt_fops);
1980 if (!entry)
1981 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1982
1983 entry = debugfs_create_file("trace", 0444, d_tracer,
1984 &global_trace, &tracing_fops);
1985 if (!entry)
1986 pr_warning("Could not create debugfs 'trace' entry\n");
1987
1988 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1989 &global_trace, &show_traces_fops);
1990 if (!entry)
1991 pr_warning("Could not create debugfs 'trace' entry\n");
1992
1993 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1994 &global_trace, &set_tracer_fops);
1995 if (!entry)
1996 pr_warning("Could not create debugfs 'trace' entry\n");
1997
1998 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1999 &tracing_max_latency,
2000 &tracing_max_lat_fops);
2001 if (!entry)
2002 pr_warning("Could not create debugfs "
2003 "'tracing_max_latency' entry\n");
2004
2005 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2006 &tracing_thresh, &tracing_max_lat_fops);
2007 if (!entry)
2008 pr_warning("Could not create debugfs "
2009 "'tracing_threash' entry\n");
Ingo Molnar7bd2f242008-05-12 21:20:45 +02002010 entry = debugfs_create_file("README", 0644, d_tracer,
2011 NULL, &tracing_readme_fops);
2012 if (!entry)
2013 pr_warning("Could not create debugfs 'README' entry\n");
2014
Steven Rostedtb3806b42008-05-12 21:20:46 +02002015 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2016 NULL, &tracing_pipe_fops);
2017 if (!entry)
2018 pr_warning("Could not create debugfs "
2019 "'tracing_threash' entry\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002020
2021#ifdef CONFIG_DYNAMIC_FTRACE
2022 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2023 &ftrace_update_tot_cnt,
2024 &tracing_read_long_fops);
2025 if (!entry)
2026 pr_warning("Could not create debugfs "
2027 "'dyn_ftrace_total_info' entry\n");
2028#endif
2029}
2030
2031/* dummy trace to disable tracing */
2032static struct tracer no_tracer __read_mostly =
2033{
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002034 .name = "none",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002035};
2036
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002037static int trace_alloc_page(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002038{
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002039 struct trace_array_cpu *data;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002040 struct page *page, *tmp;
2041 LIST_HEAD(pages);
Ingo Molnarc7aafc52008-05-12 21:20:45 +02002042 void *array;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002043 int i;
2044
2045 /* first allocate a page for each CPU */
2046 for_each_possible_cpu(i) {
2047 array = (void *)__get_free_page(GFP_KERNEL);
2048 if (array == NULL) {
2049 printk(KERN_ERR "tracer: failed to allocate page"
2050 "for trace buffer!\n");
2051 goto free_pages;
2052 }
2053
2054 page = virt_to_page(array);
2055 list_add(&page->lru, &pages);
2056
2057/* Only allocate if we are actually using the max trace */
2058#ifdef CONFIG_TRACER_MAX_TRACE
2059 array = (void *)__get_free_page(GFP_KERNEL);
2060 if (array == NULL) {
2061 printk(KERN_ERR "tracer: failed to allocate page"
2062 "for trace buffer!\n");
2063 goto free_pages;
2064 }
2065 page = virt_to_page(array);
2066 list_add(&page->lru, &pages);
2067#endif
2068 }
2069
2070 /* Now that we successfully allocate a page per CPU, add them */
2071 for_each_possible_cpu(i) {
2072 data = global_trace.data[i];
Steven Rostedtb3806b42008-05-12 21:20:46 +02002073 spin_lock_init(&data->lock);
Ingo Molnard4c5a2f2008-05-12 21:20:46 +02002074 lockdep_set_class(&data->lock, &data->lock_key);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002075 page = list_entry(pages.next, struct page, lru);
Ingo Molnarc7aafc52008-05-12 21:20:45 +02002076 list_del_init(&page->lru);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002077 list_add_tail(&page->lru, &data->trace_pages);
2078 ClearPageLRU(page);
2079
2080#ifdef CONFIG_TRACER_MAX_TRACE
2081 data = max_tr.data[i];
Steven Rostedtb3806b42008-05-12 21:20:46 +02002082 spin_lock_init(&data->lock);
Ingo Molnard4c5a2f2008-05-12 21:20:46 +02002083 lockdep_set_class(&data->lock, &data->lock_key);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002084 page = list_entry(pages.next, struct page, lru);
Ingo Molnarc7aafc52008-05-12 21:20:45 +02002085 list_del_init(&page->lru);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002086 list_add_tail(&page->lru, &data->trace_pages);
2087 SetPageLRU(page);
2088#endif
2089 }
2090 global_trace.entries += ENTRIES_PER_PAGE;
2091
2092 return 0;
2093
2094 free_pages:
2095 list_for_each_entry_safe(page, tmp, &pages, lru) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +02002096 list_del_init(&page->lru);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002097 __free_page(page);
2098 }
2099 return -ENOMEM;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002100}
2101
2102__init static int tracer_alloc_buffers(void)
2103{
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002104 struct trace_array_cpu *data;
2105 void *array;
2106 struct page *page;
2107 int pages = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +02002108 int ret = -ENOMEM;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002109 int i;
2110
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002111 /* Allocate the first page for all buffers */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002112 for_each_possible_cpu(i) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002113 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002114 max_tr.data[i] = &per_cpu(max_data, i);
2115
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002116 array = (void *)__get_free_page(GFP_KERNEL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002117 if (array == NULL) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002118 printk(KERN_ERR "tracer: failed to allocate page"
2119 "for trace buffer!\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002120 goto free_buffers;
2121 }
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002122
2123 /* set the array to the list */
2124 INIT_LIST_HEAD(&data->trace_pages);
2125 page = virt_to_page(array);
2126 list_add(&page->lru, &data->trace_pages);
2127 /* use the LRU flag to differentiate the two buffers */
2128 ClearPageLRU(page);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002129
2130/* Only allocate if we are actually using the max trace */
2131#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002132 array = (void *)__get_free_page(GFP_KERNEL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002133 if (array == NULL) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002134 printk(KERN_ERR "tracer: failed to allocate page"
2135 "for trace buffer!\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002136 goto free_buffers;
2137 }
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002138
2139 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
2140 page = virt_to_page(array);
2141 list_add(&page->lru, &max_tr.data[i]->trace_pages);
2142 SetPageLRU(page);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002143#endif
2144 }
2145
2146 /*
2147 * Since we allocate by orders of pages, we may be able to
2148 * round up a bit.
2149 */
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002150 global_trace.entries = ENTRIES_PER_PAGE;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002151 pages++;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002152
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002153 while (global_trace.entries < trace_nr_entries) {
2154 if (trace_alloc_page())
2155 break;
2156 pages++;
2157 }
Steven Rostedt89b2f972008-05-12 21:20:44 +02002158 max_tr.entries = global_trace.entries;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002159
2160 pr_info("tracer: %d pages allocated for %ld",
2161 pages, trace_nr_entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002162 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
2163 pr_info(" actual entries %ld\n", global_trace.entries);
2164
2165 tracer_init_debugfs();
2166
2167 trace_init_cmdlines();
2168
2169 register_tracer(&no_tracer);
2170 current_trace = &no_tracer;
2171
Steven Rostedt60a11772008-05-12 21:20:44 +02002172 /* All seems OK, enable tracing */
2173 tracing_disabled = 0;
2174
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002175 return 0;
2176
2177 free_buffers:
2178 for (i-- ; i >= 0; i--) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002179 struct page *page, *tmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002180 struct trace_array_cpu *data = global_trace.data[i];
2181
Ingo Molnarc7aafc52008-05-12 21:20:45 +02002182 if (data) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002183 list_for_each_entry_safe(page, tmp,
2184 &data->trace_pages, lru) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +02002185 list_del_init(&page->lru);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002186 __free_page(page);
2187 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002188 }
2189
2190#ifdef CONFIG_TRACER_MAX_TRACE
2191 data = max_tr.data[i];
Ingo Molnarc7aafc52008-05-12 21:20:45 +02002192 if (data) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002193 list_for_each_entry_safe(page, tmp,
2194 &data->trace_pages, lru) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +02002195 list_del_init(&page->lru);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002196 __free_page(page);
2197 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002198 }
2199#endif
2200 }
Steven Rostedt60a11772008-05-12 21:20:44 +02002201 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002202}
Steven Rostedt60a11772008-05-12 21:20:44 +02002203fs_initcall(tracer_alloc_buffers);