blob: 1a1028d3bc3471fb24e6f0fe13cc64276b79f4d5 [file] [log] [blame]
Ingo Molnarbf9e1872009-06-02 23:37:05 +02001/*
2 * builtin-report.c
3 *
4 * Builtin report command: Analyze the perf.data input file,
5 * look up and read DSOs and symbol information and display
6 * a histogram of results, along various sorting keys.
7 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02008#include "builtin.h"
Ingo Molnar53cb8bc2009-05-26 09:17:18 +02009
Ingo Molnarbf9e1872009-06-02 23:37:05 +020010#include "util/util.h"
11
Ingo Molnar8fc03212009-06-04 15:19:47 +020012#include "util/color.h"
Arnaldo Carvalho de Melo35a50c82009-05-18 16:24:49 -030013#include "util/list.h"
Ingo Molnara930d2c2009-05-27 09:50:13 +020014#include "util/cache.h"
Arnaldo Carvalho de Melo35a50c82009-05-18 16:24:49 -030015#include "util/rbtree.h"
Arnaldo Carvalho de Meloa2928c42009-05-28 14:55:04 -030016#include "util/symbol.h"
Arnaldo Carvalho de Meloa0055ae2009-06-01 17:50:19 -030017#include "util/string.h"
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030018
Ingo Molnar53cb8bc2009-05-26 09:17:18 +020019#include "perf.h"
20
21#include "util/parse-options.h"
22#include "util/parse-events.h"
23
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030024#define SHOW_KERNEL 1
25#define SHOW_USER 2
26#define SHOW_HV 4
27
Ingo Molnar23ac9cb2009-05-27 09:33:18 +020028static char const *input_name = "perf.data";
Peter Zijlstra450aaa22009-05-27 20:20:23 +020029static char *vmlinux = NULL;
Ingo Molnarbd741372009-06-04 14:13:04 +020030
31static char default_sort_order[] = "comm,dso";
32static char *sort_order = default_sort_order;
33
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030034static int input;
35static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
36
Ingo Molnar97b07b62009-05-26 18:48:58 +020037static int dump_trace = 0;
Ingo Molnar35029732009-06-03 09:38:58 +020038#define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
39
Ingo Molnar16f762a2009-05-27 09:10:38 +020040static int verbose;
Arnaldo Carvalho de Melob78c07d2009-05-29 13:48:59 -030041static int full_paths;
Ingo Molnar97b07b62009-05-26 18:48:58 +020042
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030043static unsigned long page_size;
44static unsigned long mmap_window = 32;
45
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030046struct ip_event {
47 struct perf_event_header header;
48 __u64 ip;
49 __u32 pid, tid;
50};
Ingo Molnar75051722009-06-03 23:14:49 +020051
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030052struct mmap_event {
53 struct perf_event_header header;
54 __u32 pid, tid;
55 __u64 start;
56 __u64 len;
57 __u64 pgoff;
58 char filename[PATH_MAX];
59};
Ingo Molnar75051722009-06-03 23:14:49 +020060
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030061struct comm_event {
62 struct perf_event_header header;
Ingo Molnar75051722009-06-03 23:14:49 +020063 __u32 pid, tid;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030064 char comm[16];
65};
66
Peter Zijlstra62fc4452009-06-04 16:53:49 +020067struct fork_event {
68 struct perf_event_header header;
69 __u32 pid, ppid;
70};
71
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030072typedef union event_union {
73 struct perf_event_header header;
74 struct ip_event ip;
75 struct mmap_event mmap;
76 struct comm_event comm;
Peter Zijlstra62fc4452009-06-04 16:53:49 +020077 struct fork_event fork;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030078} event_t;
79
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -030080static LIST_HEAD(dsos);
81static struct dso *kernel_dso;
82
83static void dsos__add(struct dso *dso)
84{
85 list_add_tail(&dso->node, &dsos);
86}
87
88static struct dso *dsos__find(const char *name)
89{
90 struct dso *pos;
91
92 list_for_each_entry(pos, &dsos, node)
93 if (strcmp(pos->name, name) == 0)
94 return pos;
95 return NULL;
96}
97
98static struct dso *dsos__findnew(const char *name)
99{
100 struct dso *dso = dsos__find(name);
Peter Zijlstrab7a16ea2009-05-27 13:35:35 +0200101 int nr;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300102
Ingo Molnar4593bba2009-06-02 15:34:25 +0200103 if (dso)
104 return dso;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300105
Ingo Molnar4593bba2009-06-02 15:34:25 +0200106 dso = dso__new(name, 0);
107 if (!dso)
108 goto out_delete_dso;
Peter Zijlstrab7a16ea2009-05-27 13:35:35 +0200109
Ingo Molnarbd741372009-06-04 14:13:04 +0200110 nr = dso__load(dso, NULL, verbose);
Ingo Molnar4593bba2009-06-02 15:34:25 +0200111 if (nr < 0) {
Ingo Molnarbd741372009-06-04 14:13:04 +0200112 if (verbose)
113 fprintf(stderr, "Failed to open: %s\n", name);
Ingo Molnar4593bba2009-06-02 15:34:25 +0200114 goto out_delete_dso;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300115 }
Ingo Molnar4593bba2009-06-02 15:34:25 +0200116 if (!nr && verbose) {
117 fprintf(stderr,
118 "No symbols found in: %s, maybe install a debug package?\n",
119 name);
120 }
121
122 dsos__add(dso);
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300123
124 return dso;
125
126out_delete_dso:
127 dso__delete(dso);
128 return NULL;
129}
130
Ingo Molnar16f762a2009-05-27 09:10:38 +0200131static void dsos__fprintf(FILE *fp)
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300132{
133 struct dso *pos;
134
135 list_for_each_entry(pos, &dsos, node)
136 dso__fprintf(pos, fp);
137}
138
Peter Zijlstra450aaa22009-05-27 20:20:23 +0200139static int load_kernel(void)
140{
Arnaldo Carvalho de Meloa827c872009-05-28 14:55:19 -0300141 int err;
Peter Zijlstra450aaa22009-05-27 20:20:23 +0200142
Arnaldo Carvalho de Melo0085c952009-05-28 14:55:13 -0300143 kernel_dso = dso__new("[kernel]", 0);
Peter Zijlstra450aaa22009-05-27 20:20:23 +0200144 if (!kernel_dso)
Arnaldo Carvalho de Meloa2928c42009-05-28 14:55:04 -0300145 return -1;
Peter Zijlstra450aaa22009-05-27 20:20:23 +0200146
Ingo Molnarbd741372009-06-04 14:13:04 +0200147 err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
Arnaldo Carvalho de Meloa2928c42009-05-28 14:55:04 -0300148 if (err) {
149 dso__delete(kernel_dso);
150 kernel_dso = NULL;
151 } else
152 dsos__add(kernel_dso);
Peter Zijlstra450aaa22009-05-27 20:20:23 +0200153
Arnaldo Carvalho de Meloa2928c42009-05-28 14:55:04 -0300154 return err;
Peter Zijlstra450aaa22009-05-27 20:20:23 +0200155}
156
Ingo Molnard80d3382009-06-03 23:14:49 +0200157static char __cwd[PATH_MAX];
158static char *cwd = __cwd;
159static int cwdlen;
160
161static int strcommon(const char *pathname)
Arnaldo Carvalho de Melob78c07d2009-05-29 13:48:59 -0300162{
163 int n = 0;
164
165 while (pathname[n] == cwd[n] && n < cwdlen)
166 ++n;
167
168 return n;
169}
170
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300171struct map {
172 struct list_head node;
173 uint64_t start;
174 uint64_t end;
175 uint64_t pgoff;
176 struct dso *dso;
177};
178
Ingo Molnard80d3382009-06-03 23:14:49 +0200179static struct map *map__new(struct mmap_event *event)
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300180{
181 struct map *self = malloc(sizeof(*self));
182
183 if (self != NULL) {
Arnaldo Carvalho de Melob78c07d2009-05-29 13:48:59 -0300184 const char *filename = event->filename;
185 char newfilename[PATH_MAX];
186
187 if (cwd) {
Ingo Molnard80d3382009-06-03 23:14:49 +0200188 int n = strcommon(filename);
189
Arnaldo Carvalho de Melob78c07d2009-05-29 13:48:59 -0300190 if (n == cwdlen) {
191 snprintf(newfilename, sizeof(newfilename),
192 ".%s", filename + n);
193 filename = newfilename;
194 }
195 }
196
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300197 self->start = event->start;
198 self->end = event->start + event->len;
199 self->pgoff = event->pgoff;
200
Arnaldo Carvalho de Melob78c07d2009-05-29 13:48:59 -0300201 self->dso = dsos__findnew(filename);
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300202 if (self->dso == NULL)
203 goto out_delete;
204 }
205 return self;
206out_delete:
207 free(self);
208 return NULL;
209}
210
Peter Zijlstra62fc4452009-06-04 16:53:49 +0200211static struct map *map__clone(struct map *self)
212{
213 struct map *map = malloc(sizeof(*self));
214
215 if (!map)
216 return NULL;
217
218 memcpy(map, self, sizeof(*self));
219
220 return map;
221}
222
223static int map__overlap(struct map *l, struct map *r)
224{
225 if (l->start > r->start) {
226 struct map *t = l;
227 l = r;
228 r = t;
229 }
230
231 if (l->end > r->start)
232 return 1;
233
234 return 0;
235}
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300236
Arnaldo Carvalho de Melo9ac99542009-06-04 13:54:00 -0300237static size_t map__fprintf(struct map *self, FILE *fp)
238{
239 return fprintf(fp, " %lx-%lx %lx %s\n",
240 self->start, self->end, self->pgoff, self->dso->name);
241}
242
243
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300244struct thread {
Arnaldo Carvalho de Meloce7e4362009-05-19 09:30:23 -0300245 struct rb_node rb_node;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300246 struct list_head maps;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300247 pid_t pid;
248 char *comm;
249};
250
251static struct thread *thread__new(pid_t pid)
252{
253 struct thread *self = malloc(sizeof(*self));
254
255 if (self != NULL) {
256 self->pid = pid;
Peter Zijlstra82292892009-06-03 12:37:36 +0200257 self->comm = malloc(32);
Ingo Molnar0a520c62009-06-02 23:24:45 +0200258 if (self->comm)
Peter Zijlstra82292892009-06-03 12:37:36 +0200259 snprintf(self->comm, 32, ":%d", self->pid);
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300260 INIT_LIST_HEAD(&self->maps);
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300261 }
262
263 return self;
264}
265
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300266static int thread__set_comm(struct thread *self, const char *comm)
267{
Peter Zijlstra82292892009-06-03 12:37:36 +0200268 if (self->comm)
269 free(self->comm);
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300270 self->comm = strdup(comm);
271 return self->comm ? 0 : -ENOMEM;
272}
273
Arnaldo Carvalho de Melo9ac99542009-06-04 13:54:00 -0300274static size_t thread__fprintf(struct thread *self, FILE *fp)
275{
276 struct map *pos;
277 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
278
279 list_for_each_entry(pos, &self->maps, node)
280 ret += map__fprintf(pos, fp);
281
282 return ret;
283}
284
285
Ingo Molnar16f762a2009-05-27 09:10:38 +0200286static struct rb_root threads;
Ingo Molnareed4dcd2009-06-03 19:59:24 +0200287static struct thread *last_match;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300288
289static struct thread *threads__findnew(pid_t pid)
290{
Arnaldo Carvalho de Meloce7e4362009-05-19 09:30:23 -0300291 struct rb_node **p = &threads.rb_node;
292 struct rb_node *parent = NULL;
293 struct thread *th;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300294
Ingo Molnareed4dcd2009-06-03 19:59:24 +0200295 /*
296 * Font-end cache - PID lookups come in blocks,
297 * so most of the time we dont have to look up
298 * the full rbtree:
299 */
300 if (last_match && last_match->pid == pid)
301 return last_match;
302
Arnaldo Carvalho de Meloce7e4362009-05-19 09:30:23 -0300303 while (*p != NULL) {
304 parent = *p;
305 th = rb_entry(parent, struct thread, rb_node);
306
Ingo Molnareed4dcd2009-06-03 19:59:24 +0200307 if (th->pid == pid) {
308 last_match = th;
Arnaldo Carvalho de Meloce7e4362009-05-19 09:30:23 -0300309 return th;
Ingo Molnareed4dcd2009-06-03 19:59:24 +0200310 }
Arnaldo Carvalho de Meloce7e4362009-05-19 09:30:23 -0300311
312 if (pid < th->pid)
313 p = &(*p)->rb_left;
314 else
315 p = &(*p)->rb_right;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300316 }
317
Arnaldo Carvalho de Meloce7e4362009-05-19 09:30:23 -0300318 th = thread__new(pid);
319 if (th != NULL) {
320 rb_link_node(&th->rb_node, parent, p);
321 rb_insert_color(&th->rb_node, &threads);
Ingo Molnareed4dcd2009-06-03 19:59:24 +0200322 last_match = th;
Arnaldo Carvalho de Meloce7e4362009-05-19 09:30:23 -0300323 }
Ingo Molnareed4dcd2009-06-03 19:59:24 +0200324
Arnaldo Carvalho de Meloce7e4362009-05-19 09:30:23 -0300325 return th;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300326}
327
328static void thread__insert_map(struct thread *self, struct map *map)
329{
Peter Zijlstra62fc4452009-06-04 16:53:49 +0200330 struct map *pos, *tmp;
331
332 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
333 if (map__overlap(pos, map)) {
334 list_del_init(&pos->node);
335 /* XXX leaks dsos */
336 free(pos);
337 }
338 }
339
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300340 list_add_tail(&map->node, &self->maps);
341}
342
Peter Zijlstra62fc4452009-06-04 16:53:49 +0200343static int thread__fork(struct thread *self, struct thread *parent)
344{
345 struct map *map;
346
347 if (self->comm)
348 free(self->comm);
349 self->comm = strdup(parent->comm);
350 if (!self->comm)
351 return -ENOMEM;
352
353 list_for_each_entry(map, &parent->maps, node) {
354 struct map *new = map__clone(map);
355 if (!new)
356 return -ENOMEM;
357 thread__insert_map(self, new);
358 }
359
360 return 0;
361}
362
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300363static struct map *thread__find_map(struct thread *self, uint64_t ip)
364{
Ingo Molnar16f762a2009-05-27 09:10:38 +0200365 struct map *pos;
366
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300367 if (self == NULL)
368 return NULL;
369
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300370 list_for_each_entry(pos, &self->maps, node)
371 if (ip >= pos->start && ip <= pos->end)
372 return pos;
373
374 return NULL;
375}
376
Arnaldo Carvalho de Melo9ac99542009-06-04 13:54:00 -0300377static size_t threads__fprintf(FILE *fp)
378{
379 size_t ret = 0;
380 struct rb_node *nd;
381
382 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
383 struct thread *pos = rb_entry(nd, struct thread, rb_node);
384
385 ret += thread__fprintf(pos, fp);
386 }
387
388 return ret;
389}
390
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200391/*
392 * histogram, sorted on item, collects counts
393 */
394
395static struct rb_root hist;
396
397struct hist_entry {
398 struct rb_node rb_node;
399
400 struct thread *thread;
401 struct map *map;
402 struct dso *dso;
403 struct symbol *sym;
404 uint64_t ip;
405 char level;
406
407 uint32_t count;
408};
409
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200410/*
411 * configurable sorting bits
412 */
413
414struct sort_entry {
415 struct list_head list;
416
Peter Zijlstraca8cdee2009-05-28 11:08:33 +0200417 char *header;
418
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200419 int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
Peter Zijlstra82292892009-06-03 12:37:36 +0200420 int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200421 size_t (*print)(FILE *fp, struct hist_entry *);
422};
423
Peter Zijlstra82292892009-06-03 12:37:36 +0200424/* --sort pid */
425
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200426static int64_t
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200427sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
428{
429 return right->thread->pid - left->thread->pid;
430}
431
432static size_t
433sort__thread_print(FILE *fp, struct hist_entry *self)
434{
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200435 return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200436}
437
438static struct sort_entry sort_thread = {
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200439 .header = " Command: Pid",
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200440 .cmp = sort__thread_cmp,
441 .print = sort__thread_print,
442};
443
Peter Zijlstra82292892009-06-03 12:37:36 +0200444/* --sort comm */
445
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200446static int64_t
Peter Zijlstra992444b2009-05-27 20:20:27 +0200447sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
448{
Peter Zijlstra82292892009-06-03 12:37:36 +0200449 return right->thread->pid - left->thread->pid;
450}
451
452static int64_t
453sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
454{
Peter Zijlstra992444b2009-05-27 20:20:27 +0200455 char *comm_l = left->thread->comm;
456 char *comm_r = right->thread->comm;
457
458 if (!comm_l || !comm_r) {
459 if (!comm_l && !comm_r)
460 return 0;
461 else if (!comm_l)
462 return -1;
463 else
464 return 1;
465 }
466
467 return strcmp(comm_l, comm_r);
468}
469
470static size_t
471sort__comm_print(FILE *fp, struct hist_entry *self)
472{
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200473 return fprintf(fp, "%16s", self->thread->comm);
Peter Zijlstra992444b2009-05-27 20:20:27 +0200474}
475
476static struct sort_entry sort_comm = {
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200477 .header = " Command",
Peter Zijlstra82292892009-06-03 12:37:36 +0200478 .cmp = sort__comm_cmp,
479 .collapse = sort__comm_collapse,
480 .print = sort__comm_print,
Peter Zijlstra992444b2009-05-27 20:20:27 +0200481};
482
Peter Zijlstra82292892009-06-03 12:37:36 +0200483/* --sort dso */
484
Peter Zijlstra992444b2009-05-27 20:20:27 +0200485static int64_t
Peter Zijlstra55e5ec42009-05-27 20:20:28 +0200486sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
487{
488 struct dso *dso_l = left->dso;
489 struct dso *dso_r = right->dso;
490
491 if (!dso_l || !dso_r) {
492 if (!dso_l && !dso_r)
493 return 0;
494 else if (!dso_l)
495 return -1;
496 else
497 return 1;
498 }
499
500 return strcmp(dso_l->name, dso_r->name);
501}
502
503static size_t
504sort__dso_print(FILE *fp, struct hist_entry *self)
505{
Ingo Molnar0a520c62009-06-02 23:24:45 +0200506 if (self->dso)
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200507 return fprintf(fp, "%-25s", self->dso->name);
Ingo Molnar0a520c62009-06-02 23:24:45 +0200508
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200509 return fprintf(fp, "%016llx ", (__u64)self->ip);
Peter Zijlstra55e5ec42009-05-27 20:20:28 +0200510}
511
512static struct sort_entry sort_dso = {
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200513 .header = "Shared Object ",
Peter Zijlstra55e5ec42009-05-27 20:20:28 +0200514 .cmp = sort__dso_cmp,
515 .print = sort__dso_print,
516};
517
Peter Zijlstra82292892009-06-03 12:37:36 +0200518/* --sort symbol */
519
Peter Zijlstra55e5ec42009-05-27 20:20:28 +0200520static int64_t
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200521sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300522{
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200523 uint64_t ip_l, ip_r;
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200524
525 if (left->sym == right->sym)
526 return 0;
527
528 ip_l = left->sym ? left->sym->start : left->ip;
529 ip_r = right->sym ? right->sym->start : right->ip;
530
531 return (int64_t)(ip_r - ip_l);
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -0300532}
533
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200534static size_t
535sort__sym_print(FILE *fp, struct hist_entry *self)
536{
537 size_t ret = 0;
538
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200539 if (verbose)
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200540 ret += fprintf(fp, "%#018llx ", (__u64)self->ip);
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200541
Ingo Molnar0a520c62009-06-02 23:24:45 +0200542 if (self->sym)
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200543 ret += fprintf(fp, "%s", self->sym->name);
Ingo Molnar0a520c62009-06-02 23:24:45 +0200544 else
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200545 ret += fprintf(fp, "%#016llx", (__u64)self->ip);
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200546
547 return ret;
548}
549
550static struct sort_entry sort_sym = {
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200551 .header = "Symbol",
Peter Zijlstraca8cdee2009-05-28 11:08:33 +0200552 .cmp = sort__sym_cmp,
553 .print = sort__sym_print,
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200554};
555
Peter Zijlstra82292892009-06-03 12:37:36 +0200556static int sort__need_collapse = 0;
557
Peter Zijlstra37f440c2009-05-27 20:20:26 +0200558struct sort_dimension {
559 char *name;
560 struct sort_entry *entry;
561 int taken;
562};
563
564static struct sort_dimension sort_dimensions[] = {
565 { .name = "pid", .entry = &sort_thread, },
Peter Zijlstra992444b2009-05-27 20:20:27 +0200566 { .name = "comm", .entry = &sort_comm, },
Peter Zijlstra55e5ec42009-05-27 20:20:28 +0200567 { .name = "dso", .entry = &sort_dso, },
Peter Zijlstra37f440c2009-05-27 20:20:26 +0200568 { .name = "symbol", .entry = &sort_sym, },
569};
570
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200571static LIST_HEAD(hist_entry__sort_list);
572
Peter Zijlstra37f440c2009-05-27 20:20:26 +0200573static int sort_dimension__add(char *tok)
574{
575 int i;
576
577 for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
578 struct sort_dimension *sd = &sort_dimensions[i];
579
580 if (sd->taken)
581 continue;
582
Ingo Molnar5352f352009-06-03 10:07:39 +0200583 if (strncasecmp(tok, sd->name, strlen(tok)))
Peter Zijlstra37f440c2009-05-27 20:20:26 +0200584 continue;
585
Peter Zijlstra82292892009-06-03 12:37:36 +0200586 if (sd->entry->collapse)
587 sort__need_collapse = 1;
588
Peter Zijlstra37f440c2009-05-27 20:20:26 +0200589 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
590 sd->taken = 1;
Ingo Molnar5352f352009-06-03 10:07:39 +0200591
Peter Zijlstra37f440c2009-05-27 20:20:26 +0200592 return 0;
593 }
594
595 return -ESRCH;
596}
597
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200598static int64_t
599hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
600{
601 struct sort_entry *se;
602 int64_t cmp = 0;
603
604 list_for_each_entry(se, &hist_entry__sort_list, list) {
605 cmp = se->cmp(left, right);
606 if (cmp)
607 break;
608 }
609
610 return cmp;
611}
612
Peter Zijlstra82292892009-06-03 12:37:36 +0200613static int64_t
614hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
615{
616 struct sort_entry *se;
617 int64_t cmp = 0;
618
619 list_for_each_entry(se, &hist_entry__sort_list, list) {
620 int64_t (*f)(struct hist_entry *, struct hist_entry *);
621
622 f = se->collapse ?: se->cmp;
623
624 cmp = f(left, right);
625 if (cmp)
626 break;
627 }
628
629 return cmp;
630}
631
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200632static size_t
633hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples)
634{
635 struct sort_entry *se;
636 size_t ret;
637
638 if (total_samples) {
Ingo Molnar8fc03212009-06-04 15:19:47 +0200639 double percent = self->count * 100.0 / total_samples;
640 char *color = PERF_COLOR_NORMAL;
641
642 /*
643 * We color high-overhead entries in red, low-overhead
644 * entries in green - and keep the middle ground normal:
645 */
646 if (percent >= 5.0)
647 color = PERF_COLOR_RED;
648 if (percent < 0.5)
649 color = PERF_COLOR_GREEN;
650
651 ret = color_fprintf(fp, color, " %6.2f%%",
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200652 (self->count * 100.0) / total_samples);
653 } else
654 ret = fprintf(fp, "%12d ", self->count);
655
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200656 list_for_each_entry(se, &hist_entry__sort_list, list) {
657 fprintf(fp, " ");
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200658 ret += se->print(fp, self);
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200659 }
Peter Zijlstra1aa16732009-05-27 20:20:25 +0200660
661 ret += fprintf(fp, "\n");
662
663 return ret;
664}
665
666/*
667 * collect histogram counts
668 */
669
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200670static int
671hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
672 struct symbol *sym, uint64_t ip, char level)
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300673{
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200674 struct rb_node **p = &hist.rb_node;
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300675 struct rb_node *parent = NULL;
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200676 struct hist_entry *he;
677 struct hist_entry entry = {
678 .thread = thread,
679 .map = map,
680 .dso = dso,
681 .sym = sym,
682 .ip = ip,
683 .level = level,
684 .count = 1,
685 };
686 int cmp;
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300687
688 while (*p != NULL) {
689 parent = *p;
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200690 he = rb_entry(parent, struct hist_entry, rb_node);
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300691
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200692 cmp = hist_entry__cmp(&entry, he);
693
694 if (!cmp) {
695 he->count++;
696 return 0;
697 }
698
699 if (cmp < 0)
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300700 p = &(*p)->rb_left;
701 else
702 p = &(*p)->rb_right;
703 }
704
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200705 he = malloc(sizeof(*he));
706 if (!he)
707 return -ENOMEM;
708 *he = entry;
709 rb_link_node(&he->rb_node, parent, p);
710 rb_insert_color(&he->rb_node, &hist);
711
712 return 0;
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300713}
714
Peter Zijlstra82292892009-06-03 12:37:36 +0200715static void hist_entry__free(struct hist_entry *he)
716{
717 free(he);
718}
719
720/*
721 * collapse the histogram
722 */
723
724static struct rb_root collapse_hists;
725
726static void collapse__insert_entry(struct hist_entry *he)
727{
728 struct rb_node **p = &collapse_hists.rb_node;
729 struct rb_node *parent = NULL;
730 struct hist_entry *iter;
731 int64_t cmp;
732
733 while (*p != NULL) {
734 parent = *p;
735 iter = rb_entry(parent, struct hist_entry, rb_node);
736
737 cmp = hist_entry__collapse(iter, he);
738
739 if (!cmp) {
740 iter->count += he->count;
741 hist_entry__free(he);
742 return;
743 }
744
745 if (cmp < 0)
746 p = &(*p)->rb_left;
747 else
748 p = &(*p)->rb_right;
749 }
750
751 rb_link_node(&he->rb_node, parent, p);
752 rb_insert_color(&he->rb_node, &collapse_hists);
753}
754
755static void collapse__resort(void)
756{
757 struct rb_node *next;
758 struct hist_entry *n;
759
760 if (!sort__need_collapse)
761 return;
762
763 next = rb_first(&hist);
764 while (next) {
765 n = rb_entry(next, struct hist_entry, rb_node);
766 next = rb_next(&n->rb_node);
767
768 rb_erase(&n->rb_node, &hist);
769 collapse__insert_entry(n);
770 }
771}
772
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200773/*
774 * reverse the map, sort on count.
775 */
776
777static struct rb_root output_hists;
778
779static void output__insert_entry(struct hist_entry *he)
780{
781 struct rb_node **p = &output_hists.rb_node;
782 struct rb_node *parent = NULL;
783 struct hist_entry *iter;
784
785 while (*p != NULL) {
786 parent = *p;
787 iter = rb_entry(parent, struct hist_entry, rb_node);
788
789 if (he->count > iter->count)
790 p = &(*p)->rb_left;
791 else
792 p = &(*p)->rb_right;
793 }
794
795 rb_link_node(&he->rb_node, parent, p);
796 rb_insert_color(&he->rb_node, &output_hists);
797}
798
799static void output__resort(void)
800{
Peter Zijlstra82292892009-06-03 12:37:36 +0200801 struct rb_node *next;
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200802 struct hist_entry *n;
Arnaldo Carvalho de Meloa4c43be2009-06-03 23:02:33 -0300803 struct rb_root *tree = &hist;
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200804
Peter Zijlstra82292892009-06-03 12:37:36 +0200805 if (sort__need_collapse)
Arnaldo Carvalho de Meloa4c43be2009-06-03 23:02:33 -0300806 tree = &collapse_hists;
807
808 next = rb_first(tree);
Peter Zijlstra82292892009-06-03 12:37:36 +0200809
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200810 while (next) {
811 n = rb_entry(next, struct hist_entry, rb_node);
812 next = rb_next(&n->rb_node);
813
Arnaldo Carvalho de Meloa4c43be2009-06-03 23:02:33 -0300814 rb_erase(&n->rb_node, tree);
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200815 output__insert_entry(n);
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300816 }
817}
818
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200819static size_t output__fprintf(FILE *fp, uint64_t total_samples)
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300820{
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200821 struct hist_entry *pos;
Ingo Molnar2d655372009-05-27 21:36:22 +0200822 struct sort_entry *se;
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300823 struct rb_node *nd;
824 size_t ret = 0;
825
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200826 fprintf(fp, "\n");
Peter Zijlstraca8cdee2009-05-28 11:08:33 +0200827 fprintf(fp, "#\n");
Ingo Molnar05ca0612009-06-04 14:21:16 +0200828 fprintf(fp, "# (%Ld profiler events)\n", (__u64)total_samples);
829 fprintf(fp, "#\n");
Peter Zijlstraca8cdee2009-05-28 11:08:33 +0200830
831 fprintf(fp, "# Overhead");
832 list_for_each_entry(se, &hist_entry__sort_list, list)
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200833 fprintf(fp, " %s", se->header);
Peter Zijlstraca8cdee2009-05-28 11:08:33 +0200834 fprintf(fp, "\n");
835
836 fprintf(fp, "# ........");
Ingo Molnar2d655372009-05-27 21:36:22 +0200837 list_for_each_entry(se, &hist_entry__sort_list, list) {
Peter Zijlstraca8cdee2009-05-28 11:08:33 +0200838 int i;
839
Ingo Molnar4593bba2009-06-02 15:34:25 +0200840 fprintf(fp, " ");
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200841 for (i = 0; i < strlen(se->header); i++)
Peter Zijlstraca8cdee2009-05-28 11:08:33 +0200842 fprintf(fp, ".");
Ingo Molnar2d655372009-05-27 21:36:22 +0200843 }
Peter Zijlstraca8cdee2009-05-28 11:08:33 +0200844 fprintf(fp, "\n");
845
846 fprintf(fp, "#\n");
Ingo Molnar2d655372009-05-27 21:36:22 +0200847
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200848 for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
849 pos = rb_entry(nd, struct hist_entry, rb_node);
850 ret += hist_entry__fprintf(fp, pos, total_samples);
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300851 }
852
Ingo Molnarbd741372009-06-04 14:13:04 +0200853 if (!strcmp(sort_order, default_sort_order)) {
854 fprintf(fp, "#\n");
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200855 fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
Ingo Molnarbd741372009-06-04 14:13:04 +0200856 fprintf(fp, "#\n");
857 }
Peter Zijlstra71dd8942009-06-04 15:16:56 +0200858 fprintf(fp, "\n");
Ingo Molnarbd741372009-06-04 14:13:04 +0200859
Arnaldo Carvalho de Melo3a4b8cc2009-05-26 16:19:04 -0300860 return ret;
861}
862
Peter Zijlstra436224a2009-06-02 21:02:36 +0200863static void register_idle_thread(void)
864{
865 struct thread *thread = threads__findnew(0);
866
867 if (thread == NULL ||
868 thread__set_comm(thread, "[idle]")) {
869 fprintf(stderr, "problem inserting idle task.\n");
870 exit(-1);
871 }
872}
873
Peter Zijlstra62fc4452009-06-04 16:53:49 +0200874static unsigned long total = 0,
875 total_mmap = 0,
876 total_comm = 0,
877 total_fork = 0,
878 total_unknown = 0;
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +0200879
Ingo Molnard80d3382009-06-03 23:14:49 +0200880static int
Ingo Molnar75051722009-06-03 23:14:49 +0200881process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
882{
883 char level;
884 int show = 0;
885 struct dso *dso = NULL;
886 struct thread *thread = threads__findnew(event->ip.pid);
887 uint64_t ip = event->ip.ip;
888 struct map *map = NULL;
889
890 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
891 (void *)(offset + head),
892 (void *)(long)(event->header.size),
893 event->header.misc,
894 event->ip.pid,
895 (void *)(long)ip);
896
897 dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
898
899 if (thread == NULL) {
900 fprintf(stderr, "problem processing %d event, skipping it.\n",
901 event->header.type);
902 return -1;
903 }
904
905 if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
906 show = SHOW_KERNEL;
907 level = 'k';
908
909 dso = kernel_dso;
910
911 dprintf(" ...... dso: %s\n", dso->name);
912
913 } else if (event->header.misc & PERF_EVENT_MISC_USER) {
914
915 show = SHOW_USER;
916 level = '.';
917
918 map = thread__find_map(thread, ip);
919 if (map != NULL) {
920 dso = map->dso;
921 ip -= map->start + map->pgoff;
922 } else {
923 /*
924 * If this is outside of all known maps,
925 * and is a negative address, try to look it
926 * up in the kernel dso, as it might be a
927 * vsyscall (which executes in user-mode):
928 */
929 if ((long long)ip < 0)
930 dso = kernel_dso;
931 }
932 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
933
934 } else {
935 show = SHOW_HV;
936 level = 'H';
937 dprintf(" ...... dso: [hypervisor]\n");
938 }
939
940 if (show & show_mask) {
941 struct symbol *sym = dso__find_symbol(dso, ip);
942
943 if (hist_entry__add(thread, map, dso, sym, ip, level)) {
944 fprintf(stderr,
945 "problem incrementing symbol count, skipping event\n");
946 return -1;
947 }
948 }
949 total++;
950
951 return 0;
952}
953
954static int
955process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
956{
957 struct thread *thread = threads__findnew(event->mmap.pid);
958 struct map *map = map__new(&event->mmap);
959
Peter Zijlstra62fc4452009-06-04 16:53:49 +0200960 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
Ingo Molnar75051722009-06-03 23:14:49 +0200961 (void *)(offset + head),
962 (void *)(long)(event->header.size),
Peter Zijlstra62fc4452009-06-04 16:53:49 +0200963 event->mmap.pid,
Ingo Molnar75051722009-06-03 23:14:49 +0200964 (void *)(long)event->mmap.start,
965 (void *)(long)event->mmap.len,
966 (void *)(long)event->mmap.pgoff,
967 event->mmap.filename);
968
969 if (thread == NULL || map == NULL) {
970 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
Ingo Molnardf979922009-06-04 13:41:22 +0200971 return 0;
Ingo Molnar75051722009-06-03 23:14:49 +0200972 }
973
974 thread__insert_map(thread, map);
975 total_mmap++;
976
977 return 0;
978}
979
980static int
981process_comm_event(event_t *event, unsigned long offset, unsigned long head)
982{
983 struct thread *thread = threads__findnew(event->comm.pid);
984
985 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
986 (void *)(offset + head),
987 (void *)(long)(event->header.size),
988 event->comm.comm, event->comm.pid);
989
990 if (thread == NULL ||
991 thread__set_comm(thread, event->comm.comm)) {
992 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
993 return -1;
994 }
995 total_comm++;
996
997 return 0;
998}
999
1000static int
Peter Zijlstra62fc4452009-06-04 16:53:49 +02001001process_fork_event(event_t *event, unsigned long offset, unsigned long head)
1002{
1003 struct thread *thread = threads__findnew(event->fork.pid);
1004 struct thread *parent = threads__findnew(event->fork.ppid);
1005
1006 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
1007 (void *)(offset + head),
1008 (void *)(long)(event->header.size),
1009 event->fork.pid, event->fork.ppid);
1010
1011 if (!thread || !parent || thread__fork(thread, parent)) {
1012 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
1013 return -1;
1014 }
1015 total_fork++;
1016
1017 return 0;
1018}
1019
1020static int
Ingo Molnard80d3382009-06-03 23:14:49 +02001021process_event(event_t *event, unsigned long offset, unsigned long head)
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -03001022{
Ingo Molnar75051722009-06-03 23:14:49 +02001023 if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
1024 return process_overflow_event(event, offset, head);
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -03001025
Ingo Molnar75051722009-06-03 23:14:49 +02001026 switch (event->header.type) {
1027 case PERF_EVENT_MMAP:
1028 return process_mmap_event(event, offset, head);
Ingo Molnar97b07b62009-05-26 18:48:58 +02001029
Ingo Molnar75051722009-06-03 23:14:49 +02001030 case PERF_EVENT_COMM:
1031 return process_comm_event(event, offset, head);
Ingo Molnared966aa2009-06-03 10:39:26 +02001032
Peter Zijlstra62fc4452009-06-04 16:53:49 +02001033 case PERF_EVENT_FORK:
1034 return process_fork_event(event, offset, head);
1035
Ingo Molnard11444d2009-06-03 23:29:14 +02001036 /*
1037 * We dont process them right now but they are fine:
1038 */
Peter Zijlstra62fc4452009-06-04 16:53:49 +02001039
Ingo Molnard11444d2009-06-03 23:29:14 +02001040 case PERF_EVENT_PERIOD:
1041 case PERF_EVENT_THROTTLE:
1042 case PERF_EVENT_UNTHROTTLE:
1043 return 0;
1044
Ingo Molnard80d3382009-06-03 23:14:49 +02001045 default:
1046 return -1;
1047 }
1048
1049 return 0;
1050}
1051
1052static int __cmd_report(void)
1053{
Ingo Molnar75051722009-06-03 23:14:49 +02001054 int ret, rc = EXIT_FAILURE;
Ingo Molnard80d3382009-06-03 23:14:49 +02001055 unsigned long offset = 0;
1056 unsigned long head = 0;
1057 struct stat stat;
Ingo Molnard80d3382009-06-03 23:14:49 +02001058 event_t *event;
Ingo Molnard80d3382009-06-03 23:14:49 +02001059 uint32_t size;
Ingo Molnar75051722009-06-03 23:14:49 +02001060 char *buf;
Ingo Molnard80d3382009-06-03 23:14:49 +02001061
1062 register_idle_thread();
1063
1064 input = open(input_name, O_RDONLY);
1065 if (input < 0) {
1066 perror("failed to open file");
1067 exit(-1);
1068 }
1069
1070 ret = fstat(input, &stat);
1071 if (ret < 0) {
1072 perror("failed to stat file");
1073 exit(-1);
1074 }
1075
1076 if (!stat.st_size) {
1077 fprintf(stderr, "zero-sized file, nothing to do!\n");
1078 exit(0);
1079 }
1080
1081 if (load_kernel() < 0) {
1082 perror("failed to load kernel symbols");
1083 return EXIT_FAILURE;
1084 }
1085
1086 if (!full_paths) {
1087 if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
1088 perror("failed to get the current directory");
1089 return EXIT_FAILURE;
1090 }
1091 cwdlen = strlen(cwd);
1092 } else {
1093 cwd = NULL;
1094 cwdlen = 0;
1095 }
1096remap:
1097 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1098 MAP_SHARED, input, offset);
1099 if (buf == MAP_FAILED) {
1100 perror("failed to mmap file");
1101 exit(-1);
1102 }
1103
1104more:
1105 event = (event_t *)(buf + head);
1106
1107 size = event->header.size;
1108 if (!size)
1109 size = 8;
1110
1111 if (head + event->header.size >= page_size * mmap_window) {
1112 unsigned long shift = page_size * (head / page_size);
1113 int ret;
1114
1115 ret = munmap(buf, page_size * mmap_window);
1116 assert(ret == 0);
1117
1118 offset += shift;
1119 head -= shift;
1120 goto remap;
1121 }
1122
1123 size = event->header.size;
1124
1125 if (!size || process_event(event, offset, head) < 0) {
1126
Ingo Molnar35029732009-06-03 09:38:58 +02001127 dprintf("%p [%p]: skipping unknown header type: %d\n",
1128 (void *)(offset + head),
1129 (void *)(long)(event->header.size),
1130 event->header.type);
Peter Zijlstrab7a16ea2009-05-27 13:35:35 +02001131
Ingo Molnar3e706112009-05-26 18:53:17 +02001132 total_unknown++;
Peter Zijlstra6142f9e2009-05-26 20:51:47 +02001133
1134 /*
1135 * assume we lost track of the stream, check alignment, and
1136 * increment a single u64 in the hope to catch on again 'soon'.
1137 */
1138
1139 if (unlikely(head & 7))
1140 head &= ~7ULL;
1141
1142 size = 8;
Ingo Molnar97b07b62009-05-26 18:48:58 +02001143 }
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -03001144
Peter Zijlstra6142f9e2009-05-26 20:51:47 +02001145 head += size;
Ingo Molnarf49515b2009-05-26 19:03:36 +02001146
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -03001147 if (offset + head < stat.st_size)
1148 goto more;
1149
1150 rc = EXIT_SUCCESS;
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -03001151 close(input);
Ingo Molnar97b07b62009-05-26 18:48:58 +02001152
Ingo Molnar35029732009-06-03 09:38:58 +02001153 dprintf(" IP events: %10ld\n", total);
1154 dprintf(" mmap events: %10ld\n", total_mmap);
1155 dprintf(" comm events: %10ld\n", total_comm);
Peter Zijlstra62fc4452009-06-04 16:53:49 +02001156 dprintf(" fork events: %10ld\n", total_fork);
Ingo Molnar35029732009-06-03 09:38:58 +02001157 dprintf(" unknown events: %10ld\n", total_unknown);
Ingo Molnar97b07b62009-05-26 18:48:58 +02001158
Ingo Molnar35029732009-06-03 09:38:58 +02001159 if (dump_trace)
Ingo Molnar97b07b62009-05-26 18:48:58 +02001160 return 0;
Ingo Molnar97b07b62009-05-26 18:48:58 +02001161
Arnaldo Carvalho de Melo9ac99542009-06-04 13:54:00 -03001162 if (verbose >= 3)
1163 threads__fprintf(stdout);
1164
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +02001165 if (verbose >= 2)
Ingo Molnar16f762a2009-05-27 09:10:38 +02001166 dsos__fprintf(stdout);
Ingo Molnar16f762a2009-05-27 09:10:38 +02001167
Peter Zijlstra82292892009-06-03 12:37:36 +02001168 collapse__resort();
Peter Zijlstrae7fb08b2009-05-27 20:20:24 +02001169 output__resort();
1170 output__fprintf(stdout, total);
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -03001171
Arnaldo Carvalho de Melo8fa66bd2009-05-18 12:45:42 -03001172 return rc;
1173}
1174
Ingo Molnar53cb8bc2009-05-26 09:17:18 +02001175static const char * const report_usage[] = {
1176 "perf report [<options>] <command>",
1177 NULL
1178};
1179
1180static const struct option options[] = {
1181 OPT_STRING('i', "input", &input_name, "file",
1182 "input file name"),
Arnaldo Carvalho de Melo815e7772009-05-26 19:46:14 -03001183 OPT_BOOLEAN('v', "verbose", &verbose,
1184 "be more verbose (show symbol address, etc)"),
Ingo Molnar97b07b62009-05-26 18:48:58 +02001185 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1186 "dump raw trace in ASCII"),
Peter Zijlstra450aaa22009-05-27 20:20:23 +02001187 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
Ingo Molnar63299f02009-05-28 10:52:00 +02001188 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1189 "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
Arnaldo Carvalho de Melob78c07d2009-05-29 13:48:59 -03001190 OPT_BOOLEAN('P', "full-paths", &full_paths,
1191 "Don't shorten the pathnames taking into account the cwd"),
Ingo Molnar53cb8bc2009-05-26 09:17:18 +02001192 OPT_END()
1193};
1194
Ingo Molnar5352f352009-06-03 10:07:39 +02001195static void setup_sorting(void)
1196{
1197 char *tmp, *tok, *str = strdup(sort_order);
1198
1199 for (tok = strtok_r(str, ", ", &tmp);
1200 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1201 if (sort_dimension__add(tok) < 0) {
1202 error("Unknown --sort key: `%s'", tok);
1203 usage_with_options(report_usage, options);
1204 }
1205 }
1206
1207 free(str);
1208}
1209
Ingo Molnar53cb8bc2009-05-26 09:17:18 +02001210int cmd_report(int argc, const char **argv, const char *prefix)
1211{
Arnaldo Carvalho de Meloa2928c42009-05-28 14:55:04 -03001212 symbol__init();
Ingo Molnar53cb8bc2009-05-26 09:17:18 +02001213
1214 page_size = getpagesize();
1215
Ingo Molnaredc52de2009-06-04 16:24:37 +02001216 argc = parse_options(argc, argv, options, report_usage, 0);
Ingo Molnar53cb8bc2009-05-26 09:17:18 +02001217
Peter Zijlstra1aa16732009-05-27 20:20:25 +02001218 setup_sorting();
1219
Ingo Molnaredc52de2009-06-04 16:24:37 +02001220 /*
1221 * Any (unrecognized) arguments left?
1222 */
1223 if (argc)
1224 usage_with_options(report_usage, options);
1225
Ingo Molnara930d2c2009-05-27 09:50:13 +02001226 setup_pager();
1227
Ingo Molnar53cb8bc2009-05-26 09:17:18 +02001228 return __cmd_report();
1229}