blob: 34fad57087f9aaaadb7c5a07b9e422cf71fb84cc [file] [log] [blame]
Arjan van de Ven10274982009-09-12 07:53:05 +02001/*
2 * builtin-timechart.c - make an svg timechart of system activity
3 *
4 * (C) Copyright 2009 Intel Corporation
5 *
6 * Authors:
7 * Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14
15#include "builtin.h"
16
17#include "util/util.h"
18
19#include "util/color.h"
20#include <linux/list.h>
21#include "util/cache.h"
22#include <linux/rbtree.h>
23#include "util/symbol.h"
24#include "util/string.h"
25#include "util/callchain.h"
26#include "util/strlist.h"
27
28#include "perf.h"
29#include "util/header.h"
30#include "util/parse-options.h"
31#include "util/parse-events.h"
32#include "util/svghelper.h"
33
34static char const *input_name = "perf.data";
35static char const *output_name = "output.svg";
36
37
38static unsigned long page_size;
39static unsigned long mmap_window = 32;
40static u64 sample_type;
41
42static unsigned int numcpus;
43static u64 min_freq; /* Lowest CPU frequency seen */
44static u64 max_freq; /* Highest CPU frequency seen */
45static u64 turbo_frequency;
46
47static u64 first_time, last_time;
48
Arjan van de Ven39a90a82009-09-24 15:40:13 +020049static int power_only;
50
Arjan van de Ven10274982009-09-12 07:53:05 +020051
52static struct perf_header *header;
53
54struct per_pid;
55struct per_pidcomm;
56
57struct cpu_sample;
58struct power_event;
59struct wake_event;
60
61struct sample_wrapper;
62
63/*
64 * Datastructure layout:
65 * We keep an list of "pid"s, matching the kernels notion of a task struct.
66 * Each "pid" entry, has a list of "comm"s.
67 * this is because we want to track different programs different, while
68 * exec will reuse the original pid (by design).
69 * Each comm has a list of samples that will be used to draw
70 * final graph.
71 */
72
73struct per_pid {
74 struct per_pid *next;
75
76 int pid;
77 int ppid;
78
79 u64 start_time;
80 u64 end_time;
81 u64 total_time;
82 int display;
83
84 struct per_pidcomm *all;
85 struct per_pidcomm *current;
86
87 int painted;
88};
89
90
91struct per_pidcomm {
92 struct per_pidcomm *next;
93
94 u64 start_time;
95 u64 end_time;
96 u64 total_time;
97
98 int Y;
99 int display;
100
101 long state;
102 u64 state_since;
103
104 char *comm;
105
106 struct cpu_sample *samples;
107};
108
109struct sample_wrapper {
110 struct sample_wrapper *next;
111
112 u64 timestamp;
113 unsigned char data[0];
114};
115
116#define TYPE_NONE 0
117#define TYPE_RUNNING 1
118#define TYPE_WAITING 2
119#define TYPE_BLOCKED 3
120
121struct cpu_sample {
122 struct cpu_sample *next;
123
124 u64 start_time;
125 u64 end_time;
126 int type;
127 int cpu;
128};
129
130static struct per_pid *all_data;
131
132#define CSTATE 1
133#define PSTATE 2
134
135struct power_event {
136 struct power_event *next;
137 int type;
138 int state;
139 u64 start_time;
140 u64 end_time;
141 int cpu;
142};
143
144struct wake_event {
145 struct wake_event *next;
146 int waker;
147 int wakee;
148 u64 time;
149};
150
151static struct power_event *power_events;
152static struct wake_event *wake_events;
153
154struct sample_wrapper *all_samples;
155
Arjan van de Venbbe29872009-10-20 07:09:39 +0900156
157struct process_filter;
158struct process_filter {
159 char *name;
160 int pid;
161 struct process_filter *next;
162};
163
164static struct process_filter *process_filter;
165
166
Arjan van de Ven10274982009-09-12 07:53:05 +0200167static struct per_pid *find_create_pid(int pid)
168{
169 struct per_pid *cursor = all_data;
170
171 while (cursor) {
172 if (cursor->pid == pid)
173 return cursor;
174 cursor = cursor->next;
175 }
176 cursor = malloc(sizeof(struct per_pid));
177 assert(cursor != NULL);
178 memset(cursor, 0, sizeof(struct per_pid));
179 cursor->pid = pid;
180 cursor->next = all_data;
181 all_data = cursor;
182 return cursor;
183}
184
185static void pid_set_comm(int pid, char *comm)
186{
187 struct per_pid *p;
188 struct per_pidcomm *c;
189 p = find_create_pid(pid);
190 c = p->all;
191 while (c) {
192 if (c->comm && strcmp(c->comm, comm) == 0) {
193 p->current = c;
194 return;
195 }
196 if (!c->comm) {
197 c->comm = strdup(comm);
198 p->current = c;
199 return;
200 }
201 c = c->next;
202 }
203 c = malloc(sizeof(struct per_pidcomm));
204 assert(c != NULL);
205 memset(c, 0, sizeof(struct per_pidcomm));
206 c->comm = strdup(comm);
207 p->current = c;
208 c->next = p->all;
209 p->all = c;
210}
211
212static void pid_fork(int pid, int ppid, u64 timestamp)
213{
214 struct per_pid *p, *pp;
215 p = find_create_pid(pid);
216 pp = find_create_pid(ppid);
217 p->ppid = ppid;
218 if (pp->current && pp->current->comm && !p->current)
219 pid_set_comm(pid, pp->current->comm);
220
221 p->start_time = timestamp;
222 if (p->current) {
223 p->current->start_time = timestamp;
224 p->current->state_since = timestamp;
225 }
226}
227
228static void pid_exit(int pid, u64 timestamp)
229{
230 struct per_pid *p;
231 p = find_create_pid(pid);
232 p->end_time = timestamp;
233 if (p->current)
234 p->current->end_time = timestamp;
235}
236
237static void
238pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
239{
240 struct per_pid *p;
241 struct per_pidcomm *c;
242 struct cpu_sample *sample;
243
244 p = find_create_pid(pid);
245 c = p->current;
246 if (!c) {
247 c = malloc(sizeof(struct per_pidcomm));
248 assert(c != NULL);
249 memset(c, 0, sizeof(struct per_pidcomm));
250 p->current = c;
251 c->next = p->all;
252 p->all = c;
253 }
254
255 sample = malloc(sizeof(struct cpu_sample));
256 assert(sample != NULL);
257 memset(sample, 0, sizeof(struct cpu_sample));
258 sample->start_time = start;
259 sample->end_time = end;
260 sample->type = type;
261 sample->next = c->samples;
262 sample->cpu = cpu;
263 c->samples = sample;
264
265 if (sample->type == TYPE_RUNNING && end > start && start > 0) {
266 c->total_time += (end-start);
267 p->total_time += (end-start);
268 }
269
270 if (c->start_time == 0 || c->start_time > start)
271 c->start_time = start;
272 if (p->start_time == 0 || p->start_time > start)
273 p->start_time = start;
274
275 if (cpu > numcpus)
276 numcpus = cpu;
277}
278
279#define MAX_CPUS 4096
280
281static u64 cpus_cstate_start_times[MAX_CPUS];
282static int cpus_cstate_state[MAX_CPUS];
283static u64 cpus_pstate_start_times[MAX_CPUS];
284static u64 cpus_pstate_state[MAX_CPUS];
285
286static int
287process_comm_event(event_t *event)
288{
289 pid_set_comm(event->comm.pid, event->comm.comm);
290 return 0;
291}
292static int
293process_fork_event(event_t *event)
294{
295 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
296 return 0;
297}
298
299static int
300process_exit_event(event_t *event)
301{
302 pid_exit(event->fork.pid, event->fork.time);
303 return 0;
304}
305
306struct trace_entry {
307 u32 size;
308 unsigned short type;
309 unsigned char flags;
310 unsigned char preempt_count;
311 int pid;
312 int tgid;
313};
314
315struct power_entry {
316 struct trace_entry te;
317 s64 type;
318 s64 value;
319};
320
321#define TASK_COMM_LEN 16
322struct wakeup_entry {
323 struct trace_entry te;
324 char comm[TASK_COMM_LEN];
325 int pid;
326 int prio;
327 int success;
328};
329
330/*
331 * trace_flag_type is an enumeration that holds different
332 * states when a trace occurs. These are:
333 * IRQS_OFF - interrupts were disabled
334 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
335 * NEED_RESCED - reschedule is requested
336 * HARDIRQ - inside an interrupt handler
337 * SOFTIRQ - inside a softirq handler
338 */
339enum trace_flag_type {
340 TRACE_FLAG_IRQS_OFF = 0x01,
341 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
342 TRACE_FLAG_NEED_RESCHED = 0x04,
343 TRACE_FLAG_HARDIRQ = 0x08,
344 TRACE_FLAG_SOFTIRQ = 0x10,
345};
346
347
348
349struct sched_switch {
350 struct trace_entry te;
351 char prev_comm[TASK_COMM_LEN];
352 int prev_pid;
353 int prev_prio;
354 long prev_state; /* Arjan weeps. */
355 char next_comm[TASK_COMM_LEN];
356 int next_pid;
357 int next_prio;
358};
359
360static void c_state_start(int cpu, u64 timestamp, int state)
361{
362 cpus_cstate_start_times[cpu] = timestamp;
363 cpus_cstate_state[cpu] = state;
364}
365
366static void c_state_end(int cpu, u64 timestamp)
367{
368 struct power_event *pwr;
369 pwr = malloc(sizeof(struct power_event));
370 if (!pwr)
371 return;
372 memset(pwr, 0, sizeof(struct power_event));
373
374 pwr->state = cpus_cstate_state[cpu];
375 pwr->start_time = cpus_cstate_start_times[cpu];
376 pwr->end_time = timestamp;
377 pwr->cpu = cpu;
378 pwr->type = CSTATE;
379 pwr->next = power_events;
380
381 power_events = pwr;
382}
383
384static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
385{
386 struct power_event *pwr;
387 pwr = malloc(sizeof(struct power_event));
388
389 if (new_freq > 8000000) /* detect invalid data */
390 return;
391
392 if (!pwr)
393 return;
394 memset(pwr, 0, sizeof(struct power_event));
395
396 pwr->state = cpus_pstate_state[cpu];
397 pwr->start_time = cpus_pstate_start_times[cpu];
398 pwr->end_time = timestamp;
399 pwr->cpu = cpu;
400 pwr->type = PSTATE;
401 pwr->next = power_events;
402
403 if (!pwr->start_time)
404 pwr->start_time = first_time;
405
406 power_events = pwr;
407
408 cpus_pstate_state[cpu] = new_freq;
409 cpus_pstate_start_times[cpu] = timestamp;
410
411 if ((u64)new_freq > max_freq)
412 max_freq = new_freq;
413
414 if (new_freq < min_freq || min_freq == 0)
415 min_freq = new_freq;
416
417 if (new_freq == max_freq - 1000)
418 turbo_frequency = max_freq;
419}
420
421static void
422sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
423{
424 struct wake_event *we;
425 struct per_pid *p;
426 struct wakeup_entry *wake = (void *)te;
427
428 we = malloc(sizeof(struct wake_event));
429 if (!we)
430 return;
431
432 memset(we, 0, sizeof(struct wake_event));
433 we->time = timestamp;
434 we->waker = pid;
435
436 if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
437 we->waker = -1;
438
439 we->wakee = wake->pid;
440 we->next = wake_events;
441 wake_events = we;
442 p = find_create_pid(we->wakee);
443
444 if (p && p->current && p->current->state == TYPE_NONE) {
445 p->current->state_since = timestamp;
446 p->current->state = TYPE_WAITING;
447 }
448 if (p && p->current && p->current->state == TYPE_BLOCKED) {
449 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
450 p->current->state_since = timestamp;
451 p->current->state = TYPE_WAITING;
452 }
453}
454
455static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
456{
457 struct per_pid *p = NULL, *prev_p;
458 struct sched_switch *sw = (void *)te;
459
460
461 prev_p = find_create_pid(sw->prev_pid);
462
463 p = find_create_pid(sw->next_pid);
464
465 if (prev_p->current && prev_p->current->state != TYPE_NONE)
466 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
467 if (p && p->current) {
468 if (p->current->state != TYPE_NONE)
469 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
470
471 p->current->state_since = timestamp;
472 p->current->state = TYPE_RUNNING;
473 }
474
475 if (prev_p->current) {
476 prev_p->current->state = TYPE_NONE;
477 prev_p->current->state_since = timestamp;
478 if (sw->prev_state & 2)
479 prev_p->current->state = TYPE_BLOCKED;
480 if (sw->prev_state == 0)
481 prev_p->current->state = TYPE_WAITING;
482 }
483}
484
485
486static int
487process_sample_event(event_t *event)
488{
489 int cursor = 0;
490 u64 addr = 0;
491 u64 stamp = 0;
492 u32 cpu = 0;
493 u32 pid = 0;
494 struct trace_entry *te;
495
496 if (sample_type & PERF_SAMPLE_IP)
497 cursor++;
498
499 if (sample_type & PERF_SAMPLE_TID) {
500 pid = event->sample.array[cursor]>>32;
501 cursor++;
502 }
503 if (sample_type & PERF_SAMPLE_TIME) {
504 stamp = event->sample.array[cursor++];
505
506 if (!first_time || first_time > stamp)
507 first_time = stamp;
508 if (last_time < stamp)
509 last_time = stamp;
510
511 }
512 if (sample_type & PERF_SAMPLE_ADDR)
513 addr = event->sample.array[cursor++];
514 if (sample_type & PERF_SAMPLE_ID)
515 cursor++;
516 if (sample_type & PERF_SAMPLE_STREAM_ID)
517 cursor++;
518 if (sample_type & PERF_SAMPLE_CPU)
519 cpu = event->sample.array[cursor++] & 0xFFFFFFFF;
520 if (sample_type & PERF_SAMPLE_PERIOD)
521 cursor++;
522
523 te = (void *)&event->sample.array[cursor];
524
525 if (sample_type & PERF_SAMPLE_RAW && te->size > 0) {
526 char *event_str;
527 struct power_entry *pe;
528
529 pe = (void *)te;
530
531 event_str = perf_header__find_event(te->type);
532
533 if (!event_str)
534 return 0;
535
536 if (strcmp(event_str, "power:power_start") == 0)
537 c_state_start(cpu, stamp, pe->value);
538
539 if (strcmp(event_str, "power:power_end") == 0)
540 c_state_end(cpu, stamp);
541
542 if (strcmp(event_str, "power:power_frequency") == 0)
543 p_state_change(cpu, stamp, pe->value);
544
545 if (strcmp(event_str, "sched:sched_wakeup") == 0)
546 sched_wakeup(cpu, stamp, pid, te);
547
548 if (strcmp(event_str, "sched:sched_switch") == 0)
549 sched_switch(cpu, stamp, te);
550 }
551 return 0;
552}
553
554/*
555 * After the last sample we need to wrap up the current C/P state
556 * and close out each CPU for these.
557 */
558static void end_sample_processing(void)
559{
560 u64 cpu;
561 struct power_event *pwr;
562
Arjan van de Ven39a90a82009-09-24 15:40:13 +0200563 for (cpu = 0; cpu <= numcpus; cpu++) {
Arjan van de Ven10274982009-09-12 07:53:05 +0200564 pwr = malloc(sizeof(struct power_event));
565 if (!pwr)
566 return;
567 memset(pwr, 0, sizeof(struct power_event));
568
569 /* C state */
570#if 0
571 pwr->state = cpus_cstate_state[cpu];
572 pwr->start_time = cpus_cstate_start_times[cpu];
573 pwr->end_time = last_time;
574 pwr->cpu = cpu;
575 pwr->type = CSTATE;
576 pwr->next = power_events;
577
578 power_events = pwr;
579#endif
580 /* P state */
581
582 pwr = malloc(sizeof(struct power_event));
583 if (!pwr)
584 return;
585 memset(pwr, 0, sizeof(struct power_event));
586
587 pwr->state = cpus_pstate_state[cpu];
588 pwr->start_time = cpus_pstate_start_times[cpu];
589 pwr->end_time = last_time;
590 pwr->cpu = cpu;
591 pwr->type = PSTATE;
592 pwr->next = power_events;
593
594 if (!pwr->start_time)
595 pwr->start_time = first_time;
596 if (!pwr->state)
597 pwr->state = min_freq;
598 power_events = pwr;
599 }
600}
601
602static u64 sample_time(event_t *event)
603{
604 int cursor;
605
606 cursor = 0;
607 if (sample_type & PERF_SAMPLE_IP)
608 cursor++;
609 if (sample_type & PERF_SAMPLE_TID)
610 cursor++;
611 if (sample_type & PERF_SAMPLE_TIME)
612 return event->sample.array[cursor];
613 return 0;
614}
615
616
617/*
618 * We first queue all events, sorted backwards by insertion.
619 * The order will get flipped later.
620 */
621static int
622queue_sample_event(event_t *event)
623{
624 struct sample_wrapper *copy, *prev;
625 int size;
626
627 size = event->sample.header.size + sizeof(struct sample_wrapper) + 8;
628
629 copy = malloc(size);
630 if (!copy)
631 return 1;
632
633 memset(copy, 0, size);
634
635 copy->next = NULL;
636 copy->timestamp = sample_time(event);
637
638 memcpy(&copy->data, event, event->sample.header.size);
639
640 /* insert in the right place in the list */
641
642 if (!all_samples) {
643 /* first sample ever */
644 all_samples = copy;
645 return 0;
646 }
647
648 if (all_samples->timestamp < copy->timestamp) {
649 /* insert at the head of the list */
650 copy->next = all_samples;
651 all_samples = copy;
652 return 0;
653 }
654
655 prev = all_samples;
656 while (prev->next) {
657 if (prev->next->timestamp < copy->timestamp) {
658 copy->next = prev->next;
659 prev->next = copy;
660 return 0;
661 }
662 prev = prev->next;
663 }
664 /* insert at the end of the list */
665 prev->next = copy;
666
667 return 0;
668}
669
670static void sort_queued_samples(void)
671{
672 struct sample_wrapper *cursor, *next;
673
674 cursor = all_samples;
675 all_samples = NULL;
676
677 while (cursor) {
678 next = cursor->next;
679 cursor->next = all_samples;
680 all_samples = cursor;
681 cursor = next;
682 }
683}
684
685/*
686 * Sort the pid datastructure
687 */
688static void sort_pids(void)
689{
690 struct per_pid *new_list, *p, *cursor, *prev;
691 /* sort by ppid first, then by pid, lowest to highest */
692
693 new_list = NULL;
694
695 while (all_data) {
696 p = all_data;
697 all_data = p->next;
698 p->next = NULL;
699
700 if (new_list == NULL) {
701 new_list = p;
702 p->next = NULL;
703 continue;
704 }
705 prev = NULL;
706 cursor = new_list;
707 while (cursor) {
708 if (cursor->ppid > p->ppid ||
709 (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
710 /* must insert before */
711 if (prev) {
712 p->next = prev->next;
713 prev->next = p;
714 cursor = NULL;
715 continue;
716 } else {
717 p->next = new_list;
718 new_list = p;
719 cursor = NULL;
720 continue;
721 }
722 }
723
724 prev = cursor;
725 cursor = cursor->next;
726 if (!cursor)
727 prev->next = p;
728 }
729 }
730 all_data = new_list;
731}
732
733
734static void draw_c_p_states(void)
735{
736 struct power_event *pwr;
737 pwr = power_events;
738
739 /*
740 * two pass drawing so that the P state bars are on top of the C state blocks
741 */
742 while (pwr) {
743 if (pwr->type == CSTATE)
744 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
745 pwr = pwr->next;
746 }
747
748 pwr = power_events;
749 while (pwr) {
750 if (pwr->type == PSTATE) {
751 if (!pwr->state)
752 pwr->state = min_freq;
753 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
754 }
755 pwr = pwr->next;
756 }
757}
758
759static void draw_wakeups(void)
760{
761 struct wake_event *we;
762 struct per_pid *p;
763 struct per_pidcomm *c;
764
765 we = wake_events;
766 while (we) {
767 int from = 0, to = 0;
Arjan van de Ven4f1202c2009-09-20 18:13:28 +0200768 char *task_from = NULL, *task_to = NULL;
Arjan van de Ven10274982009-09-12 07:53:05 +0200769
770 /* locate the column of the waker and wakee */
771 p = all_data;
772 while (p) {
773 if (p->pid == we->waker || p->pid == we->wakee) {
774 c = p->all;
775 while (c) {
776 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
Arjan van de Venbbe29872009-10-20 07:09:39 +0900777 if (p->pid == we->waker && !from) {
Arjan van de Ven10274982009-09-12 07:53:05 +0200778 from = c->Y;
Arjan van de Ven3bc2a392009-10-20 06:46:49 +0900779 task_from = strdup(c->comm);
Arjan van de Ven4f1202c2009-09-20 18:13:28 +0200780 }
Arjan van de Venbbe29872009-10-20 07:09:39 +0900781 if (p->pid == we->wakee && !to) {
Arjan van de Ven10274982009-09-12 07:53:05 +0200782 to = c->Y;
Arjan van de Ven3bc2a392009-10-20 06:46:49 +0900783 task_to = strdup(c->comm);
Arjan van de Ven4f1202c2009-09-20 18:13:28 +0200784 }
Arjan van de Ven10274982009-09-12 07:53:05 +0200785 }
786 c = c->next;
787 }
Arjan van de Ven3bc2a392009-10-20 06:46:49 +0900788 c = p->all;
789 while (c) {
790 if (p->pid == we->waker && !from) {
791 from = c->Y;
792 task_from = strdup(c->comm);
793 }
794 if (p->pid == we->wakee && !to) {
795 to = c->Y;
796 task_to = strdup(c->comm);
797 }
798 c = c->next;
799 }
Arjan van de Ven10274982009-09-12 07:53:05 +0200800 }
801 p = p->next;
802 }
803
Arjan van de Ven3bc2a392009-10-20 06:46:49 +0900804 if (!task_from) {
805 task_from = malloc(40);
806 sprintf(task_from, "[%i]", we->waker);
807 }
808 if (!task_to) {
809 task_to = malloc(40);
810 sprintf(task_to, "[%i]", we->wakee);
811 }
812
Arjan van de Ven10274982009-09-12 07:53:05 +0200813 if (we->waker == -1)
814 svg_interrupt(we->time, to);
815 else if (from && to && abs(from - to) == 1)
816 svg_wakeline(we->time, from, to);
817 else
Arjan van de Ven4f1202c2009-09-20 18:13:28 +0200818 svg_partial_wakeline(we->time, from, task_from, to, task_to);
Arjan van de Ven10274982009-09-12 07:53:05 +0200819 we = we->next;
Arjan van de Ven3bc2a392009-10-20 06:46:49 +0900820
821 free(task_from);
822 free(task_to);
Arjan van de Ven10274982009-09-12 07:53:05 +0200823 }
824}
825
826static void draw_cpu_usage(void)
827{
828 struct per_pid *p;
829 struct per_pidcomm *c;
830 struct cpu_sample *sample;
831 p = all_data;
832 while (p) {
833 c = p->all;
834 while (c) {
835 sample = c->samples;
836 while (sample) {
837 if (sample->type == TYPE_RUNNING)
838 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
839
840 sample = sample->next;
841 }
842 c = c->next;
843 }
844 p = p->next;
845 }
846}
847
848static void draw_process_bars(void)
849{
850 struct per_pid *p;
851 struct per_pidcomm *c;
852 struct cpu_sample *sample;
853 int Y = 0;
854
855 Y = 2 * numcpus + 2;
856
857 p = all_data;
858 while (p) {
859 c = p->all;
860 while (c) {
861 if (!c->display) {
862 c->Y = 0;
863 c = c->next;
864 continue;
865 }
866
Arjan van de Vena92fe7b2009-09-20 18:13:53 +0200867 svg_box(Y, c->start_time, c->end_time, "process");
Arjan van de Ven10274982009-09-12 07:53:05 +0200868 sample = c->samples;
869 while (sample) {
870 if (sample->type == TYPE_RUNNING)
Arjan van de Vena92fe7b2009-09-20 18:13:53 +0200871 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
Arjan van de Ven10274982009-09-12 07:53:05 +0200872 if (sample->type == TYPE_BLOCKED)
873 svg_box(Y, sample->start_time, sample->end_time, "blocked");
874 if (sample->type == TYPE_WAITING)
Arjan van de Vena92fe7b2009-09-20 18:13:53 +0200875 svg_waiting(Y, sample->start_time, sample->end_time);
Arjan van de Ven10274982009-09-12 07:53:05 +0200876 sample = sample->next;
877 }
878
879 if (c->comm) {
880 char comm[256];
881 if (c->total_time > 5000000000) /* 5 seconds */
882 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
883 else
884 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
885
886 svg_text(Y, c->start_time, comm);
887 }
888 c->Y = Y;
889 Y++;
890 c = c->next;
891 }
892 p = p->next;
893 }
894}
895
Arjan van de Venbbe29872009-10-20 07:09:39 +0900896static void add_process_filter(const char *string)
897{
898 struct process_filter *filt;
899 int pid;
900
901 pid = strtoull(string, NULL, 10);
902 filt = malloc(sizeof(struct process_filter));
903 if (!filt)
904 return;
905
906 filt->name = strdup(string);
907 filt->pid = pid;
908 filt->next = process_filter;
909
910 process_filter = filt;
911}
912
913static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
914{
915 struct process_filter *filt;
916 if (!process_filter)
917 return 1;
918
919 filt = process_filter;
920 while (filt) {
921 if (filt->pid && p->pid == filt->pid)
922 return 1;
923 if (strcmp(filt->name, c->comm) == 0)
924 return 1;
925 filt = filt->next;
926 }
927 return 0;
928}
929
930static int determine_display_tasks_filtered(void)
931{
932 struct per_pid *p;
933 struct per_pidcomm *c;
934 int count = 0;
935
936 p = all_data;
937 while (p) {
938 p->display = 0;
939 if (p->start_time == 1)
940 p->start_time = first_time;
941
942 /* no exit marker, task kept running to the end */
943 if (p->end_time == 0)
944 p->end_time = last_time;
945
946 c = p->all;
947
948 while (c) {
949 c->display = 0;
950
951 if (c->start_time == 1)
952 c->start_time = first_time;
953
954 if (passes_filter(p, c)) {
955 c->display = 1;
956 p->display = 1;
957 count++;
958 }
959
960 if (c->end_time == 0)
961 c->end_time = last_time;
962
963 c = c->next;
964 }
965 p = p->next;
966 }
967 return count;
968}
969
Arjan van de Ven10274982009-09-12 07:53:05 +0200970static int determine_display_tasks(u64 threshold)
971{
972 struct per_pid *p;
973 struct per_pidcomm *c;
974 int count = 0;
975
Arjan van de Venbbe29872009-10-20 07:09:39 +0900976 if (process_filter)
977 return determine_display_tasks_filtered();
978
Arjan van de Ven10274982009-09-12 07:53:05 +0200979 p = all_data;
980 while (p) {
981 p->display = 0;
982 if (p->start_time == 1)
983 p->start_time = first_time;
984
985 /* no exit marker, task kept running to the end */
986 if (p->end_time == 0)
987 p->end_time = last_time;
Arjan van de Ven39a90a82009-09-24 15:40:13 +0200988 if (p->total_time >= threshold && !power_only)
Arjan van de Ven10274982009-09-12 07:53:05 +0200989 p->display = 1;
990
991 c = p->all;
992
993 while (c) {
994 c->display = 0;
995
996 if (c->start_time == 1)
997 c->start_time = first_time;
998
Arjan van de Ven39a90a82009-09-24 15:40:13 +0200999 if (c->total_time >= threshold && !power_only) {
Arjan van de Ven10274982009-09-12 07:53:05 +02001000 c->display = 1;
1001 count++;
1002 }
1003
1004 if (c->end_time == 0)
1005 c->end_time = last_time;
1006
1007 c = c->next;
1008 }
1009 p = p->next;
1010 }
1011 return count;
1012}
1013
1014
1015
1016#define TIME_THRESH 10000000
1017
1018static void write_svg_file(const char *filename)
1019{
1020 u64 i;
1021 int count;
1022
1023 numcpus++;
1024
1025
1026 count = determine_display_tasks(TIME_THRESH);
1027
1028 /* We'd like to show at least 15 tasks; be less picky if we have fewer */
1029 if (count < 15)
1030 count = determine_display_tasks(TIME_THRESH / 10);
1031
Arjan van de Ven5094b652009-09-20 18:14:16 +02001032 open_svg(filename, numcpus, count, first_time, last_time);
Arjan van de Ven10274982009-09-12 07:53:05 +02001033
Arjan van de Ven5094b652009-09-20 18:14:16 +02001034 svg_time_grid();
Arjan van de Ven10274982009-09-12 07:53:05 +02001035 svg_legenda();
1036
1037 for (i = 0; i < numcpus; i++)
1038 svg_cpu_box(i, max_freq, turbo_frequency);
1039
1040 draw_cpu_usage();
1041 draw_process_bars();
1042 draw_c_p_states();
1043 draw_wakeups();
1044
1045 svg_close();
1046}
1047
1048static int
1049process_event(event_t *event)
1050{
1051
1052 switch (event->header.type) {
1053
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001054 case PERF_RECORD_COMM:
Arjan van de Ven10274982009-09-12 07:53:05 +02001055 return process_comm_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001056 case PERF_RECORD_FORK:
Arjan van de Ven10274982009-09-12 07:53:05 +02001057 return process_fork_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001058 case PERF_RECORD_EXIT:
Arjan van de Ven10274982009-09-12 07:53:05 +02001059 return process_exit_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001060 case PERF_RECORD_SAMPLE:
Arjan van de Ven10274982009-09-12 07:53:05 +02001061 return queue_sample_event(event);
1062
1063 /*
1064 * We dont process them right now but they are fine:
1065 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001066 case PERF_RECORD_MMAP:
1067 case PERF_RECORD_THROTTLE:
1068 case PERF_RECORD_UNTHROTTLE:
Arjan van de Ven10274982009-09-12 07:53:05 +02001069 return 0;
1070
1071 default:
1072 return -1;
1073 }
1074
1075 return 0;
1076}
1077
1078static void process_samples(void)
1079{
1080 struct sample_wrapper *cursor;
1081 event_t *event;
1082
1083 sort_queued_samples();
1084
1085 cursor = all_samples;
1086 while (cursor) {
1087 event = (void *)&cursor->data;
1088 cursor = cursor->next;
1089 process_sample_event(event);
1090 }
1091}
1092
1093
1094static int __cmd_timechart(void)
1095{
1096 int ret, rc = EXIT_FAILURE;
1097 unsigned long offset = 0;
1098 unsigned long head, shift;
1099 struct stat statbuf;
1100 event_t *event;
1101 uint32_t size;
1102 char *buf;
1103 int input;
1104
1105 input = open(input_name, O_RDONLY);
1106 if (input < 0) {
1107 fprintf(stderr, " failed to open file: %s", input_name);
1108 if (!strcmp(input_name, "perf.data"))
1109 fprintf(stderr, " (try 'perf record' first)");
1110 fprintf(stderr, "\n");
1111 exit(-1);
1112 }
1113
1114 ret = fstat(input, &statbuf);
1115 if (ret < 0) {
1116 perror("failed to stat file");
1117 exit(-1);
1118 }
1119
1120 if (!statbuf.st_size) {
1121 fprintf(stderr, "zero-sized file, nothing to do!\n");
1122 exit(0);
1123 }
1124
1125 header = perf_header__read(input);
1126 head = header->data_offset;
1127
1128 sample_type = perf_header__sample_type(header);
1129
1130 shift = page_size * (head / page_size);
1131 offset += shift;
1132 head -= shift;
1133
1134remap:
1135 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1136 MAP_SHARED, input, offset);
1137 if (buf == MAP_FAILED) {
1138 perror("failed to mmap file");
1139 exit(-1);
1140 }
1141
1142more:
1143 event = (event_t *)(buf + head);
1144
1145 size = event->header.size;
1146 if (!size)
1147 size = 8;
1148
1149 if (head + event->header.size >= page_size * mmap_window) {
1150 int ret2;
1151
1152 shift = page_size * (head / page_size);
1153
1154 ret2 = munmap(buf, page_size * mmap_window);
1155 assert(ret2 == 0);
1156
1157 offset += shift;
1158 head -= shift;
1159 goto remap;
1160 }
1161
1162 size = event->header.size;
1163
1164 if (!size || process_event(event) < 0) {
1165
1166 printf("%p [%p]: skipping unknown header type: %d\n",
1167 (void *)(offset + head),
1168 (void *)(long)(event->header.size),
1169 event->header.type);
1170
1171 /*
1172 * assume we lost track of the stream, check alignment, and
1173 * increment a single u64 in the hope to catch on again 'soon'.
1174 */
1175
1176 if (unlikely(head & 7))
1177 head &= ~7ULL;
1178
1179 size = 8;
1180 }
1181
1182 head += size;
1183
1184 if (offset + head >= header->data_offset + header->data_size)
1185 goto done;
1186
1187 if (offset + head < (unsigned long)statbuf.st_size)
1188 goto more;
1189
1190done:
1191 rc = EXIT_SUCCESS;
1192 close(input);
1193
1194
1195 process_samples();
1196
1197 end_sample_processing();
1198
1199 sort_pids();
1200
1201 write_svg_file(output_name);
1202
1203 printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name);
1204
1205 return rc;
1206}
1207
Arjan van de Ven3c09eeb2009-09-19 13:34:42 +02001208static const char * const timechart_usage[] = {
1209 "perf timechart [<options>] {record}",
Arjan van de Ven10274982009-09-12 07:53:05 +02001210 NULL
1211};
1212
Arjan van de Ven3c09eeb2009-09-19 13:34:42 +02001213static const char *record_args[] = {
1214 "record",
1215 "-a",
1216 "-R",
1217 "-M",
1218 "-f",
1219 "-c", "1",
1220 "-e", "power:power_start",
1221 "-e", "power:power_end",
1222 "-e", "power:power_frequency",
1223 "-e", "sched:sched_wakeup",
1224 "-e", "sched:sched_switch",
1225};
1226
1227static int __cmd_record(int argc, const char **argv)
1228{
1229 unsigned int rec_argc, i, j;
1230 const char **rec_argv;
1231
1232 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1233 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1234
1235 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1236 rec_argv[i] = strdup(record_args[i]);
1237
1238 for (j = 1; j < (unsigned int)argc; j++, i++)
1239 rec_argv[i] = argv[j];
1240
1241 return cmd_record(i, rec_argv, NULL);
1242}
1243
Arjan van de Venbbe29872009-10-20 07:09:39 +09001244static int
1245parse_process(const struct option *opt __used, const char *arg, int __used unset)
1246{
1247 if (arg)
1248 add_process_filter(arg);
1249 return 0;
1250}
1251
Arjan van de Ven10274982009-09-12 07:53:05 +02001252static const struct option options[] = {
1253 OPT_STRING('i', "input", &input_name, "file",
1254 "input file name"),
1255 OPT_STRING('o', "output", &output_name, "file",
1256 "output file name"),
Arjan van de Ven5094b652009-09-20 18:14:16 +02001257 OPT_INTEGER('w', "width", &svg_page_width,
1258 "page width"),
Arjan van de Venbbe29872009-10-20 07:09:39 +09001259 OPT_BOOLEAN('P', "power-only", &power_only,
Arjan van de Ven39a90a82009-09-24 15:40:13 +02001260 "output power data only"),
Arjan van de Venbbe29872009-10-20 07:09:39 +09001261 OPT_CALLBACK('p', "process", NULL, "process",
1262 "process selector. Pass a pid or process name.",
1263 parse_process),
Arjan van de Ven10274982009-09-12 07:53:05 +02001264 OPT_END()
1265};
1266
1267
1268int cmd_timechart(int argc, const char **argv, const char *prefix __used)
1269{
1270 symbol__init();
1271
1272 page_size = getpagesize();
1273
Arjan van de Ven3c09eeb2009-09-19 13:34:42 +02001274 argc = parse_options(argc, argv, options, timechart_usage,
1275 PARSE_OPT_STOP_AT_NON_OPTION);
Arjan van de Ven10274982009-09-12 07:53:05 +02001276
Arjan van de Ven3c09eeb2009-09-19 13:34:42 +02001277 if (argc && !strncmp(argv[0], "rec", 3))
1278 return __cmd_record(argc, argv);
1279 else if (argc)
1280 usage_with_options(timechart_usage, options);
Arjan van de Ven10274982009-09-12 07:53:05 +02001281
1282 setup_pager();
1283
1284 return __cmd_timechart();
1285}