blob: 9bad2379115ae8ac4408ef44aab45f2b7c1684bd [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h>
16#include <linux/seq_file.h>
17#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020018#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/hardirq.h>
20#include <linux/linkage.h>
21#include <linux/uaccess.h>
22#include <linux/ftrace.h>
23#include <linux/module.h>
24#include <linux/percpu.h>
25#include <linux/ctype.h>
26#include <linux/init.h>
27#include <linux/gfp.h>
28#include <linux/fs.h>
29
30#include "trace.h"
31
32unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33unsigned long __read_mostly tracing_thresh;
34
35static long notrace
36ns2usecs(cycle_t nsec)
37{
38 nsec += 500;
39 do_div(nsec, 1000);
40 return nsec;
41}
42
43static atomic_t tracer_counter;
44static struct trace_array global_trace;
45
46static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
47
48static struct trace_array max_tr;
49
50static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
51
52static int tracer_enabled;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020053static unsigned long trace_nr_entries = 16384UL;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
55static struct tracer *trace_types __read_mostly;
56static struct tracer *current_trace __read_mostly;
57static int max_tracer_type_len;
58
59static DEFINE_MUTEX(trace_types_lock);
60
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020061#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
62
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020063static int __init set_nr_entries(char *str)
64{
65 if (!str)
66 return 0;
67 trace_nr_entries = simple_strtoul(str, &str, 0);
68 return 1;
69}
70__setup("trace_entries=", set_nr_entries);
71
Steven Rostedt57f50be2008-05-12 21:20:44 +020072unsigned long nsecs_to_usecs(unsigned long nsecs)
73{
74 return nsecs / 1000;
75}
76
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020077enum trace_type {
78 __TRACE_FIRST_TYPE = 0,
79
80 TRACE_FN,
81 TRACE_CTX,
82
83 __TRACE_LAST_TYPE
84};
85
86enum trace_flag_type {
87 TRACE_FLAG_IRQS_OFF = 0x01,
88 TRACE_FLAG_NEED_RESCHED = 0x02,
89 TRACE_FLAG_HARDIRQ = 0x04,
90 TRACE_FLAG_SOFTIRQ = 0x08,
91};
92
93enum trace_iterator_flags {
94 TRACE_ITER_PRINT_PARENT = 0x01,
95 TRACE_ITER_SYM_OFFSET = 0x02,
96 TRACE_ITER_SYM_ADDR = 0x04,
97 TRACE_ITER_VERBOSE = 0x08,
98};
99
100#define TRACE_ITER_SYM_MASK \
101 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
102
103/* These must match the bit postions above */
104static const char *trace_options[] = {
105 "print-parent",
106 "sym-offset",
107 "sym-addr",
108 "verbose",
109 NULL
110};
111
112static unsigned trace_flags;
113
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200114static DEFINE_SPINLOCK(ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200115
116/*
117 * Copy the new maximum trace into the separate maximum-trace
118 * structure. (this way the maximum trace is permanently saved,
119 * for later retrieval via /debugfs/tracing/latency_trace)
120 */
121static void notrace
122__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
123{
124 struct trace_array_cpu *data = tr->data[cpu];
125
126 max_tr.cpu = cpu;
127 max_tr.time_start = data->preempt_timestamp;
128
129 data = max_tr.data[cpu];
130 data->saved_latency = tracing_max_latency;
131
132 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
133 data->pid = tsk->pid;
134 data->uid = tsk->uid;
135 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
136 data->policy = tsk->policy;
137 data->rt_priority = tsk->rt_priority;
138
139 /* record this tasks comm */
140 tracing_record_cmdline(current);
141}
142
143notrace void
144update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
145{
146 struct trace_array_cpu *data;
147 void *save_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200148 struct list_head save_pages;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200149 int i;
150
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200151 WARN_ON_ONCE(!irqs_disabled());
152 spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200153 /* clear out all the previous traces */
154 for_each_possible_cpu(i) {
155 data = tr->data[i];
156 save_trace = max_tr.data[i]->trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200157 save_pages = max_tr.data[i]->trace_pages;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200158 memcpy(max_tr.data[i], data, sizeof(*data));
159 data->trace = save_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200160 data->trace_pages = save_pages;
Steven Rostedt89b2f972008-05-12 21:20:44 +0200161 tracing_reset(data);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200162 }
163
164 __update_max_tr(tr, tsk, cpu);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200165 spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200166}
167
168/**
169 * update_max_tr_single - only copy one trace over, and reset the rest
170 * @tr - tracer
171 * @tsk - task with the latency
172 * @cpu - the cpu of the buffer to copy.
173 */
174notrace void
175update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
176{
177 struct trace_array_cpu *data = tr->data[cpu];
178 void *save_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200179 struct list_head save_pages;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200180 int i;
181
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200182 WARN_ON_ONCE(!irqs_disabled());
183 spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200184 for_each_possible_cpu(i)
185 tracing_reset(max_tr.data[i]);
186
187 save_trace = max_tr.data[cpu]->trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200188 save_pages = max_tr.data[cpu]->trace_pages;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200189 memcpy(max_tr.data[cpu], data, sizeof(*data));
190 data->trace = save_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200191 data->trace_pages = save_pages;
Steven Rostedt89b2f972008-05-12 21:20:44 +0200192 tracing_reset(data);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200193
194 __update_max_tr(tr, tsk, cpu);
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200195 spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200196}
197
198int register_tracer(struct tracer *type)
199{
200 struct tracer *t;
201 int len;
202 int ret = 0;
203
204 if (!type->name) {
205 pr_info("Tracer must have a name\n");
206 return -1;
207 }
208
209 mutex_lock(&trace_types_lock);
210 for (t = trace_types; t; t = t->next) {
211 if (strcmp(type->name, t->name) == 0) {
212 /* already found */
213 pr_info("Trace %s already registered\n",
214 type->name);
215 ret = -1;
216 goto out;
217 }
218 }
219
220 type->next = trace_types;
221 trace_types = type;
222 len = strlen(type->name);
223 if (len > max_tracer_type_len)
224 max_tracer_type_len = len;
225 out:
226 mutex_unlock(&trace_types_lock);
227
228 return ret;
229}
230
231void unregister_tracer(struct tracer *type)
232{
233 struct tracer **t;
234 int len;
235
236 mutex_lock(&trace_types_lock);
237 for (t = &trace_types; *t; t = &(*t)->next) {
238 if (*t == type)
239 goto found;
240 }
241 pr_info("Trace %s not registered\n", type->name);
242 goto out;
243
244 found:
245 *t = (*t)->next;
246 if (strlen(type->name) != max_tracer_type_len)
247 goto out;
248
249 max_tracer_type_len = 0;
250 for (t = &trace_types; *t; t = &(*t)->next) {
251 len = strlen((*t)->name);
252 if (len > max_tracer_type_len)
253 max_tracer_type_len = len;
254 }
255 out:
256 mutex_unlock(&trace_types_lock);
257}
258
259void notrace tracing_reset(struct trace_array_cpu *data)
260{
261 data->trace_idx = 0;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200262 data->trace_current = data->trace;
263 data->trace_current_idx = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200264}
265
266#ifdef CONFIG_FTRACE
267static void notrace
268function_trace_call(unsigned long ip, unsigned long parent_ip)
269{
270 struct trace_array *tr = &global_trace;
271 struct trace_array_cpu *data;
272 unsigned long flags;
273 long disabled;
274 int cpu;
275
276 if (unlikely(!tracer_enabled))
277 return;
278
Steven Rostedt18cef372008-05-12 21:20:44 +0200279 local_irq_save(flags);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200280 cpu = raw_smp_processor_id();
281 data = tr->data[cpu];
282 disabled = atomic_inc_return(&data->disabled);
283
284 if (likely(disabled == 1))
285 ftrace(tr, data, ip, parent_ip, flags);
286
287 atomic_dec(&data->disabled);
Steven Rostedt18cef372008-05-12 21:20:44 +0200288 local_irq_restore(flags);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200289}
290
291static struct ftrace_ops trace_ops __read_mostly =
292{
293 .func = function_trace_call,
294};
295#endif
296
297notrace void tracing_start_function_trace(void)
298{
299 register_ftrace_function(&trace_ops);
300}
301
302notrace void tracing_stop_function_trace(void)
303{
304 unregister_ftrace_function(&trace_ops);
305}
306
307#define SAVED_CMDLINES 128
308static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
309static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
310static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
311static int cmdline_idx;
312static DEFINE_SPINLOCK(trace_cmdline_lock);
313atomic_t trace_record_cmdline_disabled;
314
315static void trace_init_cmdlines(void)
316{
317 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
318 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
319 cmdline_idx = 0;
320}
321
322notrace void trace_stop_cmdline_recording(void);
323
324static void notrace trace_save_cmdline(struct task_struct *tsk)
325{
326 unsigned map;
327 unsigned idx;
328
329 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
330 return;
331
332 /*
333 * It's not the end of the world if we don't get
334 * the lock, but we also don't want to spin
335 * nor do we want to disable interrupts,
336 * so if we miss here, then better luck next time.
337 */
338 if (!spin_trylock(&trace_cmdline_lock))
339 return;
340
341 idx = map_pid_to_cmdline[tsk->pid];
342 if (idx >= SAVED_CMDLINES) {
343 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
344
345 map = map_cmdline_to_pid[idx];
346 if (map <= PID_MAX_DEFAULT)
347 map_pid_to_cmdline[map] = (unsigned)-1;
348
349 map_pid_to_cmdline[tsk->pid] = idx;
350
351 cmdline_idx = idx;
352 }
353
354 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
355
356 spin_unlock(&trace_cmdline_lock);
357}
358
359static notrace char *trace_find_cmdline(int pid)
360{
361 char *cmdline = "<...>";
362 unsigned map;
363
364 if (!pid)
365 return "<idle>";
366
367 if (pid > PID_MAX_DEFAULT)
368 goto out;
369
370 map = map_pid_to_cmdline[pid];
371 if (map >= SAVED_CMDLINES)
372 goto out;
373
374 cmdline = saved_cmdlines[map];
375
376 out:
377 return cmdline;
378}
379
380notrace void tracing_record_cmdline(struct task_struct *tsk)
381{
382 if (atomic_read(&trace_record_cmdline_disabled))
383 return;
384
385 trace_save_cmdline(tsk);
386}
387
388static inline notrace struct trace_entry *
389tracing_get_trace_entry(struct trace_array *tr,
390 struct trace_array_cpu *data)
391{
392 unsigned long idx, idx_next;
393 struct trace_entry *entry;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200394 struct page *page;
395 struct list_head *next;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200396
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200397 data->trace_idx++;
398 idx = data->trace_current_idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200399 idx_next = idx + 1;
400
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200401 entry = data->trace_current + idx * TRACE_ENTRY_SIZE;
402
403 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
404 page = virt_to_page(data->trace_current);
405 if (unlikely(&page->lru == data->trace_pages.prev))
406 next = data->trace_pages.next;
407 else
408 next = page->lru.next;
409 page = list_entry(next, struct page, lru);
410 data->trace_current = page_address(page);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200411 idx_next = 0;
412 }
413
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200414 data->trace_current_idx = idx_next;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200415
416 return entry;
417}
418
419static inline notrace void
420tracing_generic_entry_update(struct trace_entry *entry,
421 unsigned long flags)
422{
423 struct task_struct *tsk = current;
424 unsigned long pc;
425
426 pc = preempt_count();
427
428 entry->idx = atomic_inc_return(&tracer_counter);
429 entry->preempt_count = pc & 0xff;
430 entry->pid = tsk->pid;
431 entry->t = now(raw_smp_processor_id());
432 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
433 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
434 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
435 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
436}
437
438notrace void
439ftrace(struct trace_array *tr, struct trace_array_cpu *data,
440 unsigned long ip, unsigned long parent_ip,
441 unsigned long flags)
442{
443 struct trace_entry *entry;
444
445 entry = tracing_get_trace_entry(tr, data);
446 tracing_generic_entry_update(entry, flags);
447 entry->type = TRACE_FN;
448 entry->fn.ip = ip;
449 entry->fn.parent_ip = parent_ip;
450}
451
452notrace void
453tracing_sched_switch_trace(struct trace_array *tr,
454 struct trace_array_cpu *data,
455 struct task_struct *prev, struct task_struct *next,
456 unsigned long flags)
457{
458 struct trace_entry *entry;
459
460 entry = tracing_get_trace_entry(tr, data);
461 tracing_generic_entry_update(entry, flags);
462 entry->type = TRACE_CTX;
463 entry->ctx.prev_pid = prev->pid;
464 entry->ctx.prev_prio = prev->prio;
465 entry->ctx.prev_state = prev->state;
466 entry->ctx.next_pid = next->pid;
467 entry->ctx.next_prio = next->prio;
468}
469
470enum trace_file_type {
471 TRACE_FILE_LAT_FMT = 1,
472};
473
474static struct trace_entry *
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200475trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
476 struct trace_iterator *iter, int cpu)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200477{
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200478 struct page *page;
479 struct trace_entry *array;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200480
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200481 if (iter->next_idx[cpu] >= tr->entries ||
482 iter->next_idx[cpu] >= data->trace_idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200483 return NULL;
484
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200485 if (!iter->next_page[cpu]) {
486 /*
487 * Initialize. If the count of elements in
488 * this buffer is greater than the max entries
489 * we had an underrun. Which means we looped around.
490 * We can simply use the current pointer as our
491 * starting point.
492 */
493 if (data->trace_idx >= tr->entries) {
494 page = virt_to_page(data->trace_current);
495 iter->next_page[cpu] = &page->lru;
496 iter->next_page_idx[cpu] = data->trace_current_idx;
497 } else {
498 iter->next_page[cpu] = data->trace_pages.next;
499 iter->next_page_idx[cpu] = 0;
500 }
501 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200502
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200503 page = list_entry(iter->next_page[cpu], struct page, lru);
504 array = page_address(page);
505
506 return &array[iter->next_page_idx[cpu]];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200507}
508
509static struct notrace trace_entry *
510find_next_entry(struct trace_iterator *iter, int *ent_cpu)
511{
512 struct trace_array *tr = iter->tr;
513 struct trace_entry *ent, *next = NULL;
514 int next_cpu = -1;
515 int cpu;
516
517 for_each_possible_cpu(cpu) {
518 if (!tr->data[cpu]->trace)
519 continue;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200520 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200521 if (ent &&
522 (!next || (long)(next->idx - ent->idx) > 0)) {
523 next = ent;
524 next_cpu = cpu;
525 }
526 }
527
528 if (ent_cpu)
529 *ent_cpu = next_cpu;
530
531 return next;
532}
533
534static void *find_next_entry_inc(struct trace_iterator *iter)
535{
536 struct trace_entry *next;
537 int next_cpu = -1;
538
539 next = find_next_entry(iter, &next_cpu);
540
541 if (next) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200542 iter->idx++;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200543 iter->next_idx[next_cpu]++;
544 iter->next_page_idx[next_cpu]++;
545 if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) {
546 struct trace_array_cpu *data = iter->tr->data[next_cpu];
547
548 iter->next_page_idx[next_cpu] = 0;
549 iter->next_page[next_cpu] =
550 iter->next_page[next_cpu]->next;
551 if (iter->next_page[next_cpu] == &data->trace_pages)
552 iter->next_page[next_cpu] =
553 data->trace_pages.next;
554 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200555 }
556 iter->ent = next;
557 iter->cpu = next_cpu;
558
559 return next ? iter : NULL;
560}
561
562static void notrace *
563s_next(struct seq_file *m, void *v, loff_t *pos)
564{
565 struct trace_iterator *iter = m->private;
566 void *ent;
567 void *last_ent = iter->ent;
568 int i = (int)*pos;
569
570 (*pos)++;
571
572 /* can't go backwards */
573 if (iter->idx > i)
574 return NULL;
575
576 if (iter->idx < 0)
577 ent = find_next_entry_inc(iter);
578 else
579 ent = iter;
580
581 while (ent && iter->idx < i)
582 ent = find_next_entry_inc(iter);
583
584 iter->pos = *pos;
585
586 if (last_ent && !ent)
587 seq_puts(m, "\n\nvim:ft=help\n");
588
589 return ent;
590}
591
592static void *s_start(struct seq_file *m, loff_t *pos)
593{
594 struct trace_iterator *iter = m->private;
595 void *p = NULL;
596 loff_t l = 0;
597 int i;
598
599 mutex_lock(&trace_types_lock);
600
601 if (!current_trace || current_trace != iter->trace)
602 return NULL;
603
604 atomic_inc(&trace_record_cmdline_disabled);
605
606 /* let the tracer grab locks here if needed */
607 if (current_trace->start)
608 current_trace->start(iter);
609
610 if (*pos != iter->pos) {
611 iter->ent = NULL;
612 iter->cpu = 0;
613 iter->idx = -1;
614
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200615 for_each_possible_cpu(i) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200616 iter->next_idx[i] = 0;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200617 iter->next_page[i] = NULL;
618 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200619
620 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
621 ;
622
623 } else {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200624 l = *pos - 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200625 p = s_next(m, p, &l);
626 }
627
628 return p;
629}
630
631static void s_stop(struct seq_file *m, void *p)
632{
633 struct trace_iterator *iter = m->private;
634
635 atomic_dec(&trace_record_cmdline_disabled);
636
637 /* let the tracer release locks here if needed */
638 if (current_trace && current_trace == iter->trace && iter->trace->stop)
639 iter->trace->stop(iter);
640
641 mutex_unlock(&trace_types_lock);
642}
643
644static void
645seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
646{
647#ifdef CONFIG_KALLSYMS
648 char str[KSYM_SYMBOL_LEN];
649
650 kallsyms_lookup(address, NULL, NULL, NULL, str);
651
652 seq_printf(m, fmt, str);
653#endif
654}
655
656static void
657seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
658{
659#ifdef CONFIG_KALLSYMS
660 char str[KSYM_SYMBOL_LEN];
661
662 sprint_symbol(str, address);
663 seq_printf(m, fmt, str);
664#endif
665}
666
667#ifndef CONFIG_64BIT
668# define IP_FMT "%08lx"
669#else
670# define IP_FMT "%016lx"
671#endif
672
673static void notrace
674seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
675{
676 if (!ip) {
677 seq_printf(m, "0");
678 return;
679 }
680
681 if (sym_flags & TRACE_ITER_SYM_OFFSET)
682 seq_print_sym_offset(m, "%s", ip);
683 else
684 seq_print_sym_short(m, "%s", ip);
685
686 if (sym_flags & TRACE_ITER_SYM_ADDR)
687 seq_printf(m, " <" IP_FMT ">", ip);
688}
689
690static void notrace print_lat_help_header(struct seq_file *m)
691{
692 seq_puts(m, "# _------=> CPU# \n");
693 seq_puts(m, "# / _-----=> irqs-off \n");
694 seq_puts(m, "# | / _----=> need-resched \n");
695 seq_puts(m, "# || / _---=> hardirq/softirq \n");
696 seq_puts(m, "# ||| / _--=> preempt-depth \n");
697 seq_puts(m, "# |||| / \n");
698 seq_puts(m, "# ||||| delay \n");
699 seq_puts(m, "# cmd pid ||||| time | caller \n");
700 seq_puts(m, "# \\ / ||||| \\ | / \n");
701}
702
703static void notrace print_func_help_header(struct seq_file *m)
704{
705 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
706 seq_puts(m, "# | | | | |\n");
707}
708
709
710static void notrace
711print_trace_header(struct seq_file *m, struct trace_iterator *iter)
712{
713 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
714 struct trace_array *tr = iter->tr;
715 struct trace_array_cpu *data = tr->data[tr->cpu];
716 struct tracer *type = current_trace;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200717 unsigned long total = 0;
718 unsigned long entries = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200719 int cpu;
720 const char *name = "preemption";
721
722 if (type)
723 name = type->name;
724
725 for_each_possible_cpu(cpu) {
726 if (tr->data[cpu]->trace) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200727 total += tr->data[cpu]->trace_idx;
728 if (tr->data[cpu]->trace_idx > tr->entries)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200729 entries += tr->entries;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200730 else
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200731 entries += tr->data[cpu]->trace_idx;
732 }
733 }
734
735 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
736 name, UTS_RELEASE);
737 seq_puts(m, "-----------------------------------"
738 "---------------------------------\n");
739 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
740 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +0200741 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200743 total,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200744 tr->cpu,
745#if defined(CONFIG_PREEMPT_NONE)
746 "server",
747#elif defined(CONFIG_PREEMPT_VOLUNTARY)
748 "desktop",
749#elif defined(CONFIG_PREEMPT_DESKTOP)
750 "preempt",
751#else
752 "unknown",
753#endif
754 /* These are reserved for later use */
755 0, 0, 0, 0);
756#ifdef CONFIG_SMP
757 seq_printf(m, " #P:%d)\n", num_online_cpus());
758#else
759 seq_puts(m, ")\n");
760#endif
761 seq_puts(m, " -----------------\n");
762 seq_printf(m, " | task: %.16s-%d "
763 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
764 data->comm, data->pid, data->uid, data->nice,
765 data->policy, data->rt_priority);
766 seq_puts(m, " -----------------\n");
767
768 if (data->critical_start) {
769 seq_puts(m, " => started at: ");
770 seq_print_ip_sym(m, data->critical_start, sym_flags);
771 seq_puts(m, "\n => ended at: ");
772 seq_print_ip_sym(m, data->critical_end, sym_flags);
773 seq_puts(m, "\n");
774 }
775
776 seq_puts(m, "\n");
777}
778
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200779static void notrace
780lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
781{
782 int hardirq, softirq;
783 char *comm;
784
785 comm = trace_find_cmdline(entry->pid);
786
787 seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
788 seq_printf(m, "%d", cpu);
789 seq_printf(m, "%c%c",
790 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
791 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
792
793 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
794 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
795 if (hardirq && softirq)
796 seq_putc(m, 'H');
797 else {
798 if (hardirq)
799 seq_putc(m, 'h');
800 else {
801 if (softirq)
802 seq_putc(m, 's');
803 else
804 seq_putc(m, '.');
805 }
806 }
807
808 if (entry->preempt_count)
809 seq_printf(m, "%x", entry->preempt_count);
810 else
811 seq_puts(m, ".");
812}
813
814unsigned long preempt_mark_thresh = 100;
815
816static void notrace
817lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
818 unsigned long rel_usecs)
819{
820 seq_printf(m, " %4lldus", abs_usecs);
821 if (rel_usecs > preempt_mark_thresh)
822 seq_puts(m, "!: ");
823 else if (rel_usecs > 1)
824 seq_puts(m, "+: ");
825 else
826 seq_puts(m, " : ");
827}
828
829static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
830
831static void notrace
832print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
833 unsigned int trace_idx, int cpu)
834{
835 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
836 struct trace_entry *next_entry = find_next_entry(iter, NULL);
837 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
838 struct trace_entry *entry = iter->ent;
839 unsigned long abs_usecs;
840 unsigned long rel_usecs;
841 char *comm;
842 int S;
843
844 if (!next_entry)
845 next_entry = entry;
846 rel_usecs = ns2usecs(next_entry->t - entry->t);
847 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
848
849 if (verbose) {
850 comm = trace_find_cmdline(entry->pid);
851 seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
852 " %ld.%03ldms (+%ld.%03ldms): ",
853 comm,
854 entry->pid, cpu, entry->flags,
855 entry->preempt_count, trace_idx,
856 ns2usecs(entry->t),
857 abs_usecs/1000,
858 abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
859 } else {
860 lat_print_generic(m, entry, cpu);
861 lat_print_timestamp(m, abs_usecs, rel_usecs);
862 }
863 switch (entry->type) {
864 case TRACE_FN:
865 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
866 seq_puts(m, " (");
867 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
868 seq_puts(m, ")\n");
869 break;
870 case TRACE_CTX:
871 S = entry->ctx.prev_state < sizeof(state_to_char) ?
872 state_to_char[entry->ctx.prev_state] : 'X';
873 comm = trace_find_cmdline(entry->ctx.next_pid);
874 seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
875 entry->ctx.prev_pid,
876 entry->ctx.prev_prio,
877 S,
878 entry->ctx.next_pid,
879 entry->ctx.next_prio,
880 comm);
881 break;
Steven Rostedt89b2f972008-05-12 21:20:44 +0200882 default:
883 seq_printf(m, "Unknown type %d\n", entry->type);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200884 }
885}
886
887static void notrace
888print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
889{
890 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
891 struct trace_entry *entry = iter->ent;
892 unsigned long usec_rem;
893 unsigned long long t;
894 unsigned long secs;
895 char *comm;
896 int S;
897
898 comm = trace_find_cmdline(iter->ent->pid);
899
900 t = ns2usecs(entry->t);
901 usec_rem = do_div(t, 1000000ULL);
902 secs = (unsigned long)t;
903
904 seq_printf(m, "%16s-%-5d ", comm, entry->pid);
905 seq_printf(m, "[%02d] ", iter->cpu);
906 seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
907
908 switch (entry->type) {
909 case TRACE_FN:
910 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
911 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
912 entry->fn.parent_ip) {
913 seq_printf(m, " <-");
914 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
915 }
916 break;
917 case TRACE_CTX:
918 S = entry->ctx.prev_state < sizeof(state_to_char) ?
919 state_to_char[entry->ctx.prev_state] : 'X';
920 seq_printf(m, " %d:%d:%c ==> %d:%d\n",
921 entry->ctx.prev_pid,
922 entry->ctx.prev_prio,
923 S,
924 entry->ctx.next_pid,
925 entry->ctx.next_prio);
926 break;
927 }
928 seq_printf(m, "\n");
929}
930
931static int trace_empty(struct trace_iterator *iter)
932{
933 struct trace_array_cpu *data;
934 int cpu;
935
936 for_each_possible_cpu(cpu) {
937 data = iter->tr->data[cpu];
938
939 if (data->trace &&
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200940 data->trace_idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200941 return 0;
942 }
943 return 1;
944}
945
946static int s_show(struct seq_file *m, void *v)
947{
948 struct trace_iterator *iter = v;
949
950 if (iter->ent == NULL) {
951 if (iter->tr) {
952 seq_printf(m, "# tracer: %s\n", iter->trace->name);
953 seq_puts(m, "#\n");
954 }
955 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
956 /* print nothing if the buffers are empty */
957 if (trace_empty(iter))
958 return 0;
959 print_trace_header(m, iter);
960 if (!(trace_flags & TRACE_ITER_VERBOSE))
961 print_lat_help_header(m);
962 } else {
963 if (!(trace_flags & TRACE_ITER_VERBOSE))
964 print_func_help_header(m);
965 }
966 } else {
967 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
968 print_lat_fmt(m, iter, iter->idx, iter->cpu);
969 else
970 print_trace_fmt(m, iter);
971 }
972
973 return 0;
974}
975
976static struct seq_operations tracer_seq_ops = {
977 .start = s_start,
978 .next = s_next,
979 .stop = s_stop,
980 .show = s_show,
981};
982
983static struct trace_iterator notrace *
984__tracing_open(struct inode *inode, struct file *file, int *ret)
985{
986 struct trace_iterator *iter;
987
988 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
989 if (!iter) {
990 *ret = -ENOMEM;
991 goto out;
992 }
993
994 mutex_lock(&trace_types_lock);
995 if (current_trace && current_trace->print_max)
996 iter->tr = &max_tr;
997 else
998 iter->tr = inode->i_private;
999 iter->trace = current_trace;
1000 iter->pos = -1;
1001
1002 /* TODO stop tracer */
1003 *ret = seq_open(file, &tracer_seq_ops);
1004 if (!*ret) {
1005 struct seq_file *m = file->private_data;
1006 m->private = iter;
1007
1008 /* stop the trace while dumping */
1009 if (iter->tr->ctrl)
1010 tracer_enabled = 0;
1011
1012 if (iter->trace && iter->trace->open)
1013 iter->trace->open(iter);
1014 } else {
1015 kfree(iter);
1016 iter = NULL;
1017 }
1018 mutex_unlock(&trace_types_lock);
1019
1020 out:
1021 return iter;
1022}
1023
1024int tracing_open_generic(struct inode *inode, struct file *filp)
1025{
1026 filp->private_data = inode->i_private;
1027 return 0;
1028}
1029
1030int tracing_release(struct inode *inode, struct file *file)
1031{
1032 struct seq_file *m = (struct seq_file *)file->private_data;
1033 struct trace_iterator *iter = m->private;
1034
1035 mutex_lock(&trace_types_lock);
1036 if (iter->trace && iter->trace->close)
1037 iter->trace->close(iter);
1038
1039 /* reenable tracing if it was previously enabled */
1040 if (iter->tr->ctrl)
1041 tracer_enabled = 1;
1042 mutex_unlock(&trace_types_lock);
1043
1044 seq_release(inode, file);
1045 kfree(iter);
1046 return 0;
1047}
1048
1049static int tracing_open(struct inode *inode, struct file *file)
1050{
1051 int ret;
1052
1053 __tracing_open(inode, file, &ret);
1054
1055 return ret;
1056}
1057
1058static int tracing_lt_open(struct inode *inode, struct file *file)
1059{
1060 struct trace_iterator *iter;
1061 int ret;
1062
1063 iter = __tracing_open(inode, file, &ret);
1064
1065 if (!ret)
1066 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1067
1068 return ret;
1069}
1070
1071
1072static void notrace *
1073t_next(struct seq_file *m, void *v, loff_t *pos)
1074{
1075 struct tracer *t = m->private;
1076
1077 (*pos)++;
1078
1079 if (t)
1080 t = t->next;
1081
1082 m->private = t;
1083
1084 return t;
1085}
1086
1087static void *t_start(struct seq_file *m, loff_t *pos)
1088{
1089 struct tracer *t = m->private;
1090 loff_t l = 0;
1091
1092 mutex_lock(&trace_types_lock);
1093 for (; t && l < *pos; t = t_next(m, t, &l))
1094 ;
1095
1096 return t;
1097}
1098
1099static void t_stop(struct seq_file *m, void *p)
1100{
1101 mutex_unlock(&trace_types_lock);
1102}
1103
1104static int t_show(struct seq_file *m, void *v)
1105{
1106 struct tracer *t = v;
1107
1108 if (!t)
1109 return 0;
1110
1111 seq_printf(m, "%s", t->name);
1112 if (t->next)
1113 seq_putc(m, ' ');
1114 else
1115 seq_putc(m, '\n');
1116
1117 return 0;
1118}
1119
1120static struct seq_operations show_traces_seq_ops = {
1121 .start = t_start,
1122 .next = t_next,
1123 .stop = t_stop,
1124 .show = t_show,
1125};
1126
1127static int show_traces_open(struct inode *inode, struct file *file)
1128{
1129 int ret;
1130
1131 ret = seq_open(file, &show_traces_seq_ops);
1132 if (!ret) {
1133 struct seq_file *m = file->private_data;
1134 m->private = trace_types;
1135 }
1136
1137 return ret;
1138}
1139
1140static struct file_operations tracing_fops = {
1141 .open = tracing_open,
1142 .read = seq_read,
1143 .llseek = seq_lseek,
1144 .release = tracing_release,
1145};
1146
1147static struct file_operations tracing_lt_fops = {
1148 .open = tracing_lt_open,
1149 .read = seq_read,
1150 .llseek = seq_lseek,
1151 .release = tracing_release,
1152};
1153
1154static struct file_operations show_traces_fops = {
1155 .open = show_traces_open,
1156 .read = seq_read,
1157 .release = seq_release,
1158};
1159
1160static ssize_t
1161tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1162 size_t cnt, loff_t *ppos)
1163{
1164 char *buf;
1165 int r = 0;
1166 int len = 0;
1167 int i;
1168
1169 /* calulate max size */
1170 for (i = 0; trace_options[i]; i++) {
1171 len += strlen(trace_options[i]);
1172 len += 3; /* "no" and space */
1173 }
1174
1175 /* +2 for \n and \0 */
1176 buf = kmalloc(len + 2, GFP_KERNEL);
1177 if (!buf)
1178 return -ENOMEM;
1179
1180 for (i = 0; trace_options[i]; i++) {
1181 if (trace_flags & (1 << i))
1182 r += sprintf(buf + r, "%s ", trace_options[i]);
1183 else
1184 r += sprintf(buf + r, "no%s ", trace_options[i]);
1185 }
1186
1187 r += sprintf(buf + r, "\n");
1188 WARN_ON(r >= len + 2);
1189
1190 r = simple_read_from_buffer(ubuf, cnt, ppos,
1191 buf, r);
1192
1193 kfree(buf);
1194
1195 return r;
1196}
1197
1198static ssize_t
1199tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1200 size_t cnt, loff_t *ppos)
1201{
1202 char buf[64];
1203 char *cmp = buf;
1204 int neg = 0;
1205 int i;
1206
1207 if (cnt > 63)
1208 cnt = 63;
1209
1210 if (copy_from_user(&buf, ubuf, cnt))
1211 return -EFAULT;
1212
1213 buf[cnt] = 0;
1214
1215 if (strncmp(buf, "no", 2) == 0) {
1216 neg = 1;
1217 cmp += 2;
1218 }
1219
1220 for (i = 0; trace_options[i]; i++) {
1221 int len = strlen(trace_options[i]);
1222
1223 if (strncmp(cmp, trace_options[i], len) == 0) {
1224 if (neg)
1225 trace_flags &= ~(1 << i);
1226 else
1227 trace_flags |= (1 << i);
1228 break;
1229 }
1230 }
1231
1232 filp->f_pos += cnt;
1233
1234 return cnt;
1235}
1236
1237static struct file_operations tracing_iter_fops = {
1238 .open = tracing_open_generic,
1239 .read = tracing_iter_ctrl_read,
1240 .write = tracing_iter_ctrl_write,
1241};
1242
1243static ssize_t
1244tracing_ctrl_read(struct file *filp, char __user *ubuf,
1245 size_t cnt, loff_t *ppos)
1246{
1247 struct trace_array *tr = filp->private_data;
1248 char buf[64];
1249 int r;
1250
1251 r = sprintf(buf, "%ld\n", tr->ctrl);
1252 return simple_read_from_buffer(ubuf, cnt, ppos,
1253 buf, r);
1254}
1255
1256static ssize_t
1257tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1258 size_t cnt, loff_t *ppos)
1259{
1260 struct trace_array *tr = filp->private_data;
1261 long val;
1262 char buf[64];
1263
1264 if (cnt > 63)
1265 cnt = 63;
1266
1267 if (copy_from_user(&buf, ubuf, cnt))
1268 return -EFAULT;
1269
1270 buf[cnt] = 0;
1271
1272 val = simple_strtoul(buf, NULL, 10);
1273
1274 val = !!val;
1275
1276 mutex_lock(&trace_types_lock);
1277 if (tr->ctrl ^ val) {
1278 if (val)
1279 tracer_enabled = 1;
1280 else
1281 tracer_enabled = 0;
1282
1283 tr->ctrl = val;
1284
1285 if (current_trace && current_trace->ctrl_update)
1286 current_trace->ctrl_update(tr);
1287 }
1288 mutex_unlock(&trace_types_lock);
1289
1290 filp->f_pos += cnt;
1291
1292 return cnt;
1293}
1294
1295static ssize_t
1296tracing_set_trace_read(struct file *filp, char __user *ubuf,
1297 size_t cnt, loff_t *ppos)
1298{
1299 char buf[max_tracer_type_len+2];
1300 int r;
1301
1302 mutex_lock(&trace_types_lock);
1303 if (current_trace)
1304 r = sprintf(buf, "%s\n", current_trace->name);
1305 else
1306 r = sprintf(buf, "\n");
1307 mutex_unlock(&trace_types_lock);
1308
1309 return simple_read_from_buffer(ubuf, cnt, ppos,
1310 buf, r);
1311}
1312
1313static ssize_t
1314tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1315 size_t cnt, loff_t *ppos)
1316{
1317 struct trace_array *tr = &global_trace;
1318 struct tracer *t;
1319 char buf[max_tracer_type_len+1];
1320 int i;
1321
1322 if (cnt > max_tracer_type_len)
1323 cnt = max_tracer_type_len;
1324
1325 if (copy_from_user(&buf, ubuf, cnt))
1326 return -EFAULT;
1327
1328 buf[cnt] = 0;
1329
1330 /* strip ending whitespace. */
1331 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1332 buf[i] = 0;
1333
1334 mutex_lock(&trace_types_lock);
1335 for (t = trace_types; t; t = t->next) {
1336 if (strcmp(t->name, buf) == 0)
1337 break;
1338 }
1339 if (!t || t == current_trace)
1340 goto out;
1341
1342 if (current_trace && current_trace->reset)
1343 current_trace->reset(tr);
1344
1345 current_trace = t;
1346 if (t->init)
1347 t->init(tr);
1348
1349 out:
1350 mutex_unlock(&trace_types_lock);
1351
1352 filp->f_pos += cnt;
1353
1354 return cnt;
1355}
1356
1357static ssize_t
1358tracing_max_lat_read(struct file *filp, char __user *ubuf,
1359 size_t cnt, loff_t *ppos)
1360{
1361 unsigned long *ptr = filp->private_data;
1362 char buf[64];
1363 int r;
1364
1365 r = snprintf(buf, 64, "%ld\n",
1366 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1367 if (r > 64)
1368 r = 64;
1369 return simple_read_from_buffer(ubuf, cnt, ppos,
1370 buf, r);
1371}
1372
1373static ssize_t
1374tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1375 size_t cnt, loff_t *ppos)
1376{
1377 long *ptr = filp->private_data;
1378 long val;
1379 char buf[64];
1380
1381 if (cnt > 63)
1382 cnt = 63;
1383
1384 if (copy_from_user(&buf, ubuf, cnt))
1385 return -EFAULT;
1386
1387 buf[cnt] = 0;
1388
1389 val = simple_strtoul(buf, NULL, 10);
1390
1391 *ptr = val * 1000;
1392
1393 return cnt;
1394}
1395
1396static struct file_operations tracing_max_lat_fops = {
1397 .open = tracing_open_generic,
1398 .read = tracing_max_lat_read,
1399 .write = tracing_max_lat_write,
1400};
1401
1402static struct file_operations tracing_ctrl_fops = {
1403 .open = tracing_open_generic,
1404 .read = tracing_ctrl_read,
1405 .write = tracing_ctrl_write,
1406};
1407
1408static struct file_operations set_tracer_fops = {
1409 .open = tracing_open_generic,
1410 .read = tracing_set_trace_read,
1411 .write = tracing_set_trace_write,
1412};
1413
1414#ifdef CONFIG_DYNAMIC_FTRACE
1415
1416static ssize_t
1417tracing_read_long(struct file *filp, char __user *ubuf,
1418 size_t cnt, loff_t *ppos)
1419{
1420 unsigned long *p = filp->private_data;
1421 char buf[64];
1422 int r;
1423
1424 r = sprintf(buf, "%ld\n", *p);
1425 return simple_read_from_buffer(ubuf, cnt, ppos,
1426 buf, r);
1427}
1428
1429static struct file_operations tracing_read_long_fops = {
1430 .open = tracing_open_generic,
1431 .read = tracing_read_long,
1432};
1433#endif
1434
1435static struct dentry *d_tracer;
1436
1437struct dentry *tracing_init_dentry(void)
1438{
1439 static int once;
1440
1441 if (d_tracer)
1442 return d_tracer;
1443
1444 d_tracer = debugfs_create_dir("tracing", NULL);
1445
1446 if (!d_tracer && !once) {
1447 once = 1;
1448 pr_warning("Could not create debugfs directory 'tracing'\n");
1449 return NULL;
1450 }
1451
1452 return d_tracer;
1453}
1454
1455static __init void tracer_init_debugfs(void)
1456{
1457 struct dentry *d_tracer;
1458 struct dentry *entry;
1459
1460 d_tracer = tracing_init_dentry();
1461
1462 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1463 &global_trace, &tracing_ctrl_fops);
1464 if (!entry)
1465 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1466
1467 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1468 NULL, &tracing_iter_fops);
1469 if (!entry)
1470 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1471
1472 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1473 &global_trace, &tracing_lt_fops);
1474 if (!entry)
1475 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1476
1477 entry = debugfs_create_file("trace", 0444, d_tracer,
1478 &global_trace, &tracing_fops);
1479 if (!entry)
1480 pr_warning("Could not create debugfs 'trace' entry\n");
1481
1482 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1483 &global_trace, &show_traces_fops);
1484 if (!entry)
1485 pr_warning("Could not create debugfs 'trace' entry\n");
1486
1487 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1488 &global_trace, &set_tracer_fops);
1489 if (!entry)
1490 pr_warning("Could not create debugfs 'trace' entry\n");
1491
1492 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1493 &tracing_max_latency,
1494 &tracing_max_lat_fops);
1495 if (!entry)
1496 pr_warning("Could not create debugfs "
1497 "'tracing_max_latency' entry\n");
1498
1499 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
1500 &tracing_thresh, &tracing_max_lat_fops);
1501 if (!entry)
1502 pr_warning("Could not create debugfs "
1503 "'tracing_threash' entry\n");
1504
1505#ifdef CONFIG_DYNAMIC_FTRACE
1506 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
1507 &ftrace_update_tot_cnt,
1508 &tracing_read_long_fops);
1509 if (!entry)
1510 pr_warning("Could not create debugfs "
1511 "'dyn_ftrace_total_info' entry\n");
1512#endif
1513}
1514
1515/* dummy trace to disable tracing */
1516static struct tracer no_tracer __read_mostly =
1517{
1518 .name = "none",
1519};
1520
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001521static int trace_alloc_page(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522{
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001523 struct trace_array_cpu *data;
1524 void *array;
1525 struct page *page, *tmp;
1526 LIST_HEAD(pages);
1527 int i;
1528
1529 /* first allocate a page for each CPU */
1530 for_each_possible_cpu(i) {
1531 array = (void *)__get_free_page(GFP_KERNEL);
1532 if (array == NULL) {
1533 printk(KERN_ERR "tracer: failed to allocate page"
1534 "for trace buffer!\n");
1535 goto free_pages;
1536 }
1537
1538 page = virt_to_page(array);
1539 list_add(&page->lru, &pages);
1540
1541/* Only allocate if we are actually using the max trace */
1542#ifdef CONFIG_TRACER_MAX_TRACE
1543 array = (void *)__get_free_page(GFP_KERNEL);
1544 if (array == NULL) {
1545 printk(KERN_ERR "tracer: failed to allocate page"
1546 "for trace buffer!\n");
1547 goto free_pages;
1548 }
1549 page = virt_to_page(array);
1550 list_add(&page->lru, &pages);
1551#endif
1552 }
1553
1554 /* Now that we successfully allocate a page per CPU, add them */
1555 for_each_possible_cpu(i) {
1556 data = global_trace.data[i];
1557 page = list_entry(pages.next, struct page, lru);
1558 list_del(&page->lru);
1559 list_add_tail(&page->lru, &data->trace_pages);
1560 ClearPageLRU(page);
1561
1562#ifdef CONFIG_TRACER_MAX_TRACE
1563 data = max_tr.data[i];
1564 page = list_entry(pages.next, struct page, lru);
1565 list_del(&page->lru);
1566 list_add_tail(&page->lru, &data->trace_pages);
1567 SetPageLRU(page);
1568#endif
1569 }
1570 global_trace.entries += ENTRIES_PER_PAGE;
1571
1572 return 0;
1573
1574 free_pages:
1575 list_for_each_entry_safe(page, tmp, &pages, lru) {
1576 list_del(&page->lru);
1577 __free_page(page);
1578 }
1579 return -ENOMEM;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580}
1581
1582__init static int tracer_alloc_buffers(void)
1583{
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001584 struct trace_array_cpu *data;
1585 void *array;
1586 struct page *page;
1587 int pages = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588 int i;
1589
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001590 /* Allocate the first page for all buffers */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591 for_each_possible_cpu(i) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001592 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593 max_tr.data[i] = &per_cpu(max_data, i);
1594
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001595 array = (void *)__get_free_page(GFP_KERNEL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001596 if (array == NULL) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001597 printk(KERN_ERR "tracer: failed to allocate page"
1598 "for trace buffer!\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001599 goto free_buffers;
1600 }
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001601 data->trace = array;
1602
1603 /* set the array to the list */
1604 INIT_LIST_HEAD(&data->trace_pages);
1605 page = virt_to_page(array);
1606 list_add(&page->lru, &data->trace_pages);
1607 /* use the LRU flag to differentiate the two buffers */
1608 ClearPageLRU(page);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001609
1610/* Only allocate if we are actually using the max trace */
1611#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001612 array = (void *)__get_free_page(GFP_KERNEL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001613 if (array == NULL) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001614 printk(KERN_ERR "tracer: failed to allocate page"
1615 "for trace buffer!\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001616 goto free_buffers;
1617 }
1618 max_tr.data[i]->trace = array;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001619
1620 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
1621 page = virt_to_page(array);
1622 list_add(&page->lru, &max_tr.data[i]->trace_pages);
1623 SetPageLRU(page);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001624#endif
1625 }
1626
1627 /*
1628 * Since we allocate by orders of pages, we may be able to
1629 * round up a bit.
1630 */
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001631 global_trace.entries = ENTRIES_PER_PAGE;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001632 pages++;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001633
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001634 while (global_trace.entries < trace_nr_entries) {
1635 if (trace_alloc_page())
1636 break;
1637 pages++;
1638 }
Steven Rostedt89b2f972008-05-12 21:20:44 +02001639 max_tr.entries = global_trace.entries;
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001640
1641 pr_info("tracer: %d pages allocated for %ld",
1642 pages, trace_nr_entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001643 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
1644 pr_info(" actual entries %ld\n", global_trace.entries);
1645
1646 tracer_init_debugfs();
1647
1648 trace_init_cmdlines();
1649
1650 register_tracer(&no_tracer);
1651 current_trace = &no_tracer;
1652
1653 return 0;
1654
1655 free_buffers:
1656 for (i-- ; i >= 0; i--) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001657 struct page *page, *tmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001658 struct trace_array_cpu *data = global_trace.data[i];
1659
1660 if (data && data->trace) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001661 list_for_each_entry_safe(page, tmp,
1662 &data->trace_pages, lru) {
1663 list_del(&page->lru);
1664 __free_page(page);
1665 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001666 data->trace = NULL;
1667 }
1668
1669#ifdef CONFIG_TRACER_MAX_TRACE
1670 data = max_tr.data[i];
1671 if (data && data->trace) {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001672 list_for_each_entry_safe(page, tmp,
1673 &data->trace_pages, lru) {
1674 list_del(&page->lru);
1675 __free_page(page);
1676 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001677 data->trace = NULL;
1678 }
1679#endif
1680 }
1681 return -ENOMEM;
1682}
1683
1684device_initcall(tracer_alloc_buffers);