blob: 66aa4eb369f17761e2c565b6fc056e0ec06a7518 [file] [log] [blame]
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09001#include <stdio.h>
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09002
3#include "../../util/util.h"
4#include "../../util/hist.h"
5#include "../../util/sort.h"
Namhyung Kim5b9e2142013-01-22 18:09:37 +09006#include "../../util/evsel.h"
Arnaldo Carvalho de Melo632a5ca2017-04-17 16:30:49 -03007#include "../../util/srcline.h"
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -03008#include "../../util/sane_ctype.h"
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09009
10static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
11{
12 int i;
13 int ret = fprintf(fp, " ");
14
15 for (i = 0; i < left_margin; i++)
16 ret += fprintf(fp, " ");
17
18 return ret;
19}
20
Jin Yao0db64dd2017-03-26 04:34:28 +080021static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
22 int depth, int depth_mask, FILE *fp)
23{
24 struct dso *dso;
25 struct inline_node *node;
26 struct inline_list *ilist;
27 int ret = 0, i;
28
29 if (map == NULL)
30 return 0;
31
32 dso = map->dso;
33 if (dso == NULL)
34 return 0;
35
36 if (dso->kernel != DSO_TYPE_USER)
37 return 0;
38
39 node = dso__parse_addr_inlines(dso,
40 map__rip_2objdump(map, ip));
41 if (node == NULL)
42 return 0;
43
44 list_for_each_entry(ilist, &node->val, list) {
45 if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
46 ret += callchain__fprintf_left_margin(fp, left_margin);
47
48 for (i = 0; i < depth; i++) {
49 if (depth_mask & (1 << i))
50 ret += fprintf(fp, "|");
51 else
52 ret += fprintf(fp, " ");
53 ret += fprintf(fp, " ");
54 }
55
Milian Wolff5dfa2102017-03-18 22:49:28 +010056 if (callchain_param.key == CCKEY_ADDRESS ||
57 callchain_param.key == CCKEY_SRCLINE) {
Jin Yao0db64dd2017-03-26 04:34:28 +080058 if (ilist->filename != NULL)
59 ret += fprintf(fp, "%s:%d (inline)",
60 ilist->filename,
61 ilist->line_nr);
62 else
63 ret += fprintf(fp, "??");
64 } else if (ilist->funcname != NULL)
65 ret += fprintf(fp, "%s (inline)",
66 ilist->funcname);
67 else if (ilist->filename != NULL)
68 ret += fprintf(fp, "%s:%d (inline)",
69 ilist->filename,
70 ilist->line_nr);
71 else
72 ret += fprintf(fp, "??");
73
74 ret += fprintf(fp, "\n");
75 }
76 }
77
78 inline_node__delete(node);
79 return ret;
80}
81
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090082static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
83 int left_margin)
84{
85 int i;
86 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
87
88 for (i = 0; i < depth; i++)
89 if (depth_mask & (1 << i))
90 ret += fprintf(fp, "| ");
91 else
92 ret += fprintf(fp, " ");
93
94 ret += fprintf(fp, "\n");
95
96 return ret;
97}
98
Namhyung Kim5ab250c2015-11-09 14:45:39 +090099static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
100 struct callchain_list *chain,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900101 int depth, int depth_mask, int period,
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900102 u64 total_samples, int left_margin)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900103{
104 int i;
105 size_t ret = 0;
Jin Yao8577ae62016-10-31 09:19:52 +0800106 char bf[1024], *alloc_str = NULL;
107 char buf[64];
108 const char *str;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900109
110 ret += callchain__fprintf_left_margin(fp, left_margin);
111 for (i = 0; i < depth; i++) {
112 if (depth_mask & (1 << i))
113 ret += fprintf(fp, "|");
114 else
115 ret += fprintf(fp, " ");
116 if (!period && i == depth - 1) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900117 ret += fprintf(fp, "--");
118 ret += callchain_node__fprintf_value(node, fp, total_samples);
119 ret += fprintf(fp, "--");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900120 } else
121 ret += fprintf(fp, "%s", " ");
122 }
Jin Yao8577ae62016-10-31 09:19:52 +0800123
124 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
125
126 if (symbol_conf.show_branchflag_count) {
127 if (!period)
128 callchain_list_counts__printf_value(node, chain, NULL,
129 buf, sizeof(buf));
130 else
131 callchain_list_counts__printf_value(NULL, chain, NULL,
132 buf, sizeof(buf));
133
134 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
135 str = "Not enough memory!";
136 else
137 str = alloc_str;
138 }
139
140 fputs(str, fp);
Andi Kleen2989cca2014-11-12 18:05:23 -0800141 fputc('\n', fp);
Jin Yao8577ae62016-10-31 09:19:52 +0800142 free(alloc_str);
Jin Yao0db64dd2017-03-26 04:34:28 +0800143
144 if (symbol_conf.inline_name)
145 ret += inline__fprintf(chain->ms.map, chain->ip,
146 left_margin, depth, depth_mask, fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900147 return ret;
148}
149
150static struct symbol *rem_sq_bracket;
151static struct callchain_list rem_hits;
152
153static void init_rem_hits(void)
154{
155 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
156 if (!rem_sq_bracket) {
157 fprintf(stderr, "Not enough memory to display remaining hits\n");
158 return;
159 }
160
161 strcpy(rem_sq_bracket->name, "[...]");
162 rem_hits.ms.sym = rem_sq_bracket;
163}
164
165static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
166 u64 total_samples, int depth,
167 int depth_mask, int left_margin)
168{
169 struct rb_node *node, *next;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900170 struct callchain_node *child = NULL;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900171 struct callchain_list *chain;
172 int new_depth_mask = depth_mask;
173 u64 remaining;
174 size_t ret = 0;
175 int i;
176 uint entries_printed = 0;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900177 int cumul_count = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900178
179 remaining = total_samples;
180
181 node = rb_first(root);
182 while (node) {
183 u64 new_total;
184 u64 cumul;
185
186 child = rb_entry(node, struct callchain_node, rb_node);
187 cumul = callchain_cumul_hits(child);
188 remaining -= cumul;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900189 cumul_count += callchain_cumul_counts(child);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900190
191 /*
192 * The depth mask manages the output of pipes that show
193 * the depth. We don't want to keep the pipes of the current
194 * level for the last child of this depth.
195 * Except if we have remaining filtered hits. They will
196 * supersede the last child
197 */
198 next = rb_next(node);
199 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
200 new_depth_mask &= ~(1 << (depth - 1));
201
202 /*
203 * But we keep the older depth mask for the line separator
204 * to keep the level link until we reach the last child
205 */
206 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
207 left_margin);
208 i = 0;
209 list_for_each_entry(chain, &child->val, list) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900210 ret += ipchain__fprintf_graph(fp, child, chain, depth,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900211 new_depth_mask, i++,
212 total_samples,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900213 left_margin);
214 }
215
216 if (callchain_param.mode == CHAIN_GRAPH_REL)
217 new_total = child->children_hit;
218 else
219 new_total = total_samples;
220
221 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
222 depth + 1,
223 new_depth_mask | (1 << depth),
224 left_margin);
225 node = next;
226 if (++entries_printed == callchain_param.print_limit)
227 break;
228 }
229
230 if (callchain_param.mode == CHAIN_GRAPH_REL &&
231 remaining && remaining != total_samples) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900232 struct callchain_node rem_node = {
233 .hit = remaining,
234 };
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900235
236 if (!rem_sq_bracket)
237 return ret;
238
Namhyung Kimf2af0082015-11-09 14:45:41 +0900239 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
240 rem_node.count = child->parent->children_count - cumul_count;
241 if (rem_node.count <= 0)
242 return ret;
243 }
244
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900245 new_depth_mask &= ~(1 << (depth - 1));
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900246 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900247 new_depth_mask, 0, total_samples,
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900248 left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900249 }
250
251 return ret;
252}
253
Namhyung Kim7ed5d6e2016-01-28 00:40:53 +0900254/*
255 * If have one single callchain root, don't bother printing
256 * its percentage (100 % in fractal mode and the same percentage
257 * than the hist in graph mode). This also avoid one level of column.
258 *
259 * However when percent-limit applied, it's possible that single callchain
260 * node have different (non-100% in fractal mode) percentage.
261 */
262static bool need_percent_display(struct rb_node *node, u64 parent_samples)
263{
264 struct callchain_node *cnode;
265
266 if (rb_next(node))
267 return true;
268
269 cnode = rb_entry(node, struct callchain_node, rb_node);
270 return callchain_cumul_hits(cnode) != parent_samples;
271}
272
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900273static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
Namhyung Kim54d27b32016-01-28 00:40:52 +0900274 u64 total_samples, u64 parent_samples,
275 int left_margin)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900276{
277 struct callchain_node *cnode;
278 struct callchain_list *chain;
279 u32 entries_printed = 0;
280 bool printed = false;
281 struct rb_node *node;
282 int i = 0;
283 int ret = 0;
Andi Kleen2989cca2014-11-12 18:05:23 -0800284 char bf[1024];
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900285
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900286 node = rb_first(root);
Namhyung Kim7ed5d6e2016-01-28 00:40:53 +0900287 if (node && !need_percent_display(node, parent_samples)) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900288 cnode = rb_entry(node, struct callchain_node, rb_node);
289 list_for_each_entry(chain, &cnode->val, list) {
290 /*
291 * If we sort by symbol, the first entry is the same than
292 * the symbol. No need to print it otherwise it appears as
293 * displayed twice.
294 */
Namhyung Kimcfaa1542014-05-19 14:19:30 +0900295 if (!i++ && field_order == NULL &&
296 sort_order && !prefixcmp(sort_order, "sym"))
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900297 continue;
Jin Yao0db64dd2017-03-26 04:34:28 +0800298
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900299 if (!printed) {
300 ret += callchain__fprintf_left_margin(fp, left_margin);
301 ret += fprintf(fp, "|\n");
302 ret += callchain__fprintf_left_margin(fp, left_margin);
303 ret += fprintf(fp, "---");
304 left_margin += 3;
305 printed = true;
306 } else
307 ret += callchain__fprintf_left_margin(fp, left_margin);
308
Jin Yao8577ae62016-10-31 09:19:52 +0800309 ret += fprintf(fp, "%s",
310 callchain_list__sym_name(chain, bf,
311 sizeof(bf),
312 false));
313
314 if (symbol_conf.show_branchflag_count)
315 ret += callchain_list_counts__printf_value(
316 NULL, chain, fp, NULL, 0);
317 ret += fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900318
319 if (++entries_printed == callchain_param.print_limit)
320 break;
Jin Yao0db64dd2017-03-26 04:34:28 +0800321
322 if (symbol_conf.inline_name)
323 ret += inline__fprintf(chain->ms.map,
324 chain->ip,
325 left_margin,
326 0, 0,
327 fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900328 }
329 root = &cnode->rb_root;
330 }
331
Namhyung Kim54d27b32016-01-28 00:40:52 +0900332 if (callchain_param.mode == CHAIN_GRAPH_REL)
333 total_samples = parent_samples;
334
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900335 ret += __callchain__fprintf_graph(fp, root, total_samples,
336 1, 1, left_margin);
Namhyung Kim3848c232016-01-28 21:24:54 +0900337 if (ret) {
338 /* do not add a blank line if it printed nothing */
339 ret += fprintf(fp, "\n");
340 }
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900341
342 return ret;
343}
344
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300345static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900346 u64 total_samples)
347{
348 struct callchain_list *chain;
349 size_t ret = 0;
Andi Kleen2989cca2014-11-12 18:05:23 -0800350 char bf[1024];
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900351
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300352 if (!node)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900353 return 0;
354
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300355 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900356
357
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300358 list_for_each_entry(chain, &node->val, list) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900359 if (chain->ip >= PERF_CONTEXT_MAX)
360 continue;
Andi Kleen2989cca2014-11-12 18:05:23 -0800361 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
362 bf, sizeof(bf), false));
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900363 }
364
365 return ret;
366}
367
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300368static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900369 u64 total_samples)
370{
371 size_t ret = 0;
372 u32 entries_printed = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900373 struct callchain_node *chain;
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300374 struct rb_node *rb_node = rb_first(tree);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900375
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900376 while (rb_node) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900377 chain = rb_entry(rb_node, struct callchain_node, rb_node);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900378
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900379 ret += fprintf(fp, " ");
380 ret += callchain_node__fprintf_value(chain, fp, total_samples);
381 ret += fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900382 ret += __callchain__fprintf_flat(fp, chain, total_samples);
383 ret += fprintf(fp, "\n");
384 if (++entries_printed == callchain_param.print_limit)
385 break;
386
387 rb_node = rb_next(rb_node);
388 }
389
390 return ret;
391}
392
Namhyung Kim26e77922015-11-09 14:45:37 +0900393static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
394{
395 const char *sep = symbol_conf.field_sep ?: ";";
396 struct callchain_list *chain;
397 size_t ret = 0;
398 char bf[1024];
399 bool first;
400
401 if (!node)
402 return 0;
403
404 ret += __callchain__fprintf_folded(fp, node->parent);
405
406 first = (ret == 0);
407 list_for_each_entry(chain, &node->val, list) {
408 if (chain->ip >= PERF_CONTEXT_MAX)
409 continue;
410 ret += fprintf(fp, "%s%s", first ? "" : sep,
411 callchain_list__sym_name(chain,
412 bf, sizeof(bf), false));
413 first = false;
414 }
415
416 return ret;
417}
418
419static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
420 u64 total_samples)
421{
422 size_t ret = 0;
423 u32 entries_printed = 0;
424 struct callchain_node *chain;
425 struct rb_node *rb_node = rb_first(tree);
426
427 while (rb_node) {
Namhyung Kim26e77922015-11-09 14:45:37 +0900428
429 chain = rb_entry(rb_node, struct callchain_node, rb_node);
Namhyung Kim26e77922015-11-09 14:45:37 +0900430
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900431 ret += callchain_node__fprintf_value(chain, fp, total_samples);
432 ret += fprintf(fp, " ");
Namhyung Kim26e77922015-11-09 14:45:37 +0900433 ret += __callchain__fprintf_folded(fp, chain);
434 ret += fprintf(fp, "\n");
435 if (++entries_printed == callchain_param.print_limit)
436 break;
437
438 rb_node = rb_next(rb_node);
439 }
440
441 return ret;
442}
443
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900444static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
445 u64 total_samples, int left_margin,
446 FILE *fp)
447{
Namhyung Kim54d27b32016-01-28 00:40:52 +0900448 u64 parent_samples = he->stat.period;
449
450 if (symbol_conf.cumulate_callchain)
451 parent_samples = he->stat_acc->period;
452
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900453 switch (callchain_param.mode) {
454 case CHAIN_GRAPH_REL:
Namhyung Kim54d27b32016-01-28 00:40:52 +0900455 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
456 parent_samples, left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900457 break;
458 case CHAIN_GRAPH_ABS:
459 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
Namhyung Kim54d27b32016-01-28 00:40:52 +0900460 parent_samples, left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900461 break;
462 case CHAIN_FLAT:
463 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
464 break;
Namhyung Kim26e77922015-11-09 14:45:37 +0900465 case CHAIN_FOLDED:
466 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
467 break;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900468 case CHAIN_NONE:
469 break;
470 default:
471 pr_err("Bad callchain mode\n");
472 }
473
474 return 0;
475}
476
Jiri Olsabd28d0c2016-09-22 17:36:36 +0200477int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
478 struct perf_hpp_list *hpp_list)
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100479{
480 const char *sep = symbol_conf.field_sep;
481 struct perf_hpp_fmt *fmt;
482 char *start = hpp->buf;
483 int ret;
484 bool first = true;
485
486 if (symbol_conf.exclude_other && !he->parent)
487 return 0;
488
Jiri Olsa9da44db2016-09-22 17:36:29 +0200489 perf_hpp_list__for_each_format(hpp_list, fmt) {
Namhyung Kim361459f2015-12-23 02:07:08 +0900490 if (perf_hpp__should_skip(fmt, he->hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900491 continue;
492
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100493 /*
494 * If there's no field_sep, we still need
495 * to display initial ' '.
496 */
497 if (!sep || !first) {
498 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
499 advance_hpp(hpp, ret);
500 } else
501 first = false;
502
Jiri Olsa9754c4f2013-10-25 13:24:53 +0200503 if (perf_hpp__use_color() && fmt->color)
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100504 ret = fmt->color(fmt, hpp, he);
505 else
506 ret = fmt->entry(fmt, hpp, he);
507
Arnaldo Carvalho de Melo89fee702016-02-11 17:14:13 -0300508 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100509 advance_hpp(hpp, ret);
510 }
511
512 return hpp->buf - start;
513}
514
Jiri Olsa9da44db2016-09-22 17:36:29 +0200515static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
516{
517 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
518}
519
Namhyung Kimef86d682016-02-25 00:13:41 +0900520static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
521 struct perf_hpp *hpp,
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300522 struct hists *hists,
Namhyung Kimef86d682016-02-25 00:13:41 +0900523 FILE *fp)
524{
525 const char *sep = symbol_conf.field_sep;
526 struct perf_hpp_fmt *fmt;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300527 struct perf_hpp_list_node *fmt_node;
Namhyung Kimef86d682016-02-25 00:13:41 +0900528 char *buf = hpp->buf;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900529 size_t size = hpp->size;
Namhyung Kimef86d682016-02-25 00:13:41 +0900530 int ret, printed = 0;
531 bool first = true;
532
533 if (symbol_conf.exclude_other && !he->parent)
534 return 0;
535
536 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
537 advance_hpp(hpp, ret);
538
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300539 /* the first hpp_list_node is for overhead columns */
540 fmt_node = list_first_entry(&hists->hpp_formats,
541 struct perf_hpp_list_node, list);
542 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
Namhyung Kimef86d682016-02-25 00:13:41 +0900543 /*
544 * If there's no field_sep, we still need
545 * to display initial ' '.
546 */
547 if (!sep || !first) {
548 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
549 advance_hpp(hpp, ret);
550 } else
551 first = false;
552
553 if (perf_hpp__use_color() && fmt->color)
554 ret = fmt->color(fmt, hpp, he);
555 else
556 ret = fmt->entry(fmt, hpp, he);
557
558 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
559 advance_hpp(hpp, ret);
560 }
561
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300562 if (!sep)
Namhyung Kimef86d682016-02-25 00:13:41 +0900563 ret = scnprintf(hpp->buf, hpp->size, "%*s",
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300564 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
Namhyung Kimef86d682016-02-25 00:13:41 +0900565 advance_hpp(hpp, ret);
566
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900567 printed += fprintf(fp, "%s", buf);
568
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300569 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
570 hpp->buf = buf;
571 hpp->size = size;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900572
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300573 /*
574 * No need to call hist_entry__snprintf_alignment() since this
575 * fmt is always the last column in the hierarchy mode.
576 */
577 if (perf_hpp__use_color() && fmt->color)
578 fmt->color(fmt, hpp, he);
579 else
580 fmt->entry(fmt, hpp, he);
Namhyung Kimef86d682016-02-25 00:13:41 +0900581
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300582 /*
583 * dynamic entries are right-aligned but we want left-aligned
584 * in the hierarchy mode
585 */
586 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
587 }
588 printed += putc('\n', fp);
Namhyung Kimef86d682016-02-25 00:13:41 +0900589
590 if (symbol_conf.use_callchain && he->leaf) {
591 u64 total = hists__total_period(hists);
592
593 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
594 goto out;
595 }
596
597out:
598 return printed;
599}
600
Namhyung Kim000078b2012-08-20 13:52:06 +0900601static int hist_entry__fprintf(struct hist_entry *he, size_t size,
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200602 char *bf, size_t bfsz, FILE *fp,
603 bool use_callchain)
Namhyung Kim000078b2012-08-20 13:52:06 +0900604{
Namhyung Kim000078b2012-08-20 13:52:06 +0900605 int ret;
Jin Yao0db64dd2017-03-26 04:34:28 +0800606 int callchain_ret = 0;
607 int inline_ret = 0;
Namhyung Kimea251d52012-09-03 11:53:06 +0900608 struct perf_hpp hpp = {
609 .buf = bf,
610 .size = size,
Namhyung Kimea251d52012-09-03 11:53:06 +0900611 };
Jiri Olsa8f1d1b42016-06-14 20:19:17 +0200612 struct hists *hists = he->hists;
Namhyung Kim7e597d32016-01-28 00:40:51 +0900613 u64 total_period = hists->stats.total_period;
Namhyung Kim000078b2012-08-20 13:52:06 +0900614
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300615 if (size == 0 || size > bfsz)
616 size = hpp.size = bfsz;
Namhyung Kim000078b2012-08-20 13:52:06 +0900617
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300618 if (symbol_conf.report_hierarchy)
619 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
Namhyung Kimef86d682016-02-25 00:13:41 +0900620
Namhyung Kim26d8b332014-03-03 16:16:20 +0900621 hist_entry__snprintf(he, &hpp);
Namhyung Kim000078b2012-08-20 13:52:06 +0900622
623 ret = fprintf(fp, "%s\n", bf);
624
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200625 if (use_callchain)
Jin Yao0db64dd2017-03-26 04:34:28 +0800626 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
627 0, fp);
628
629 if (callchain_ret == 0 && symbol_conf.inline_name) {
630 inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
631 ret += inline_ret;
632 if (inline_ret > 0)
633 ret += fprintf(fp, "\n");
634 } else
635 ret += callchain_ret;
Namhyung Kim000078b2012-08-20 13:52:06 +0900636
637 return ret;
638}
639
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300640static int print_hierarchy_indent(const char *sep, int indent,
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900641 const char *line, FILE *fp)
642{
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300643 if (sep != NULL || indent < 2)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900644 return 0;
645
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300646 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900647}
648
Namhyung Kim195bc0f2016-09-13 16:45:50 +0900649static int hists__fprintf_hierarchy_headers(struct hists *hists,
650 struct perf_hpp *hpp, FILE *fp)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900651{
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300652 bool first_node, first_col;
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300653 int indent;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900654 int depth;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900655 unsigned width = 0;
656 unsigned header_width = 0;
657 struct perf_hpp_fmt *fmt;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300658 struct perf_hpp_list_node *fmt_node;
Namhyung Kim195bc0f2016-09-13 16:45:50 +0900659 const char *sep = symbol_conf.field_sep;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900660
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300661 indent = hists->nr_hpp_node;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900662
663 /* preserve max indent depth for column headers */
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300664 print_hierarchy_indent(sep, indent, spaces, fp);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900665
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300666 /* the first hpp_list_node is for overhead columns */
667 fmt_node = list_first_entry(&hists->hpp_formats,
668 struct perf_hpp_list_node, list);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900669
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300670 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
Jiri Olsa29659ab2016-08-07 17:28:30 +0200671 fmt->header(fmt, hpp, hists, 0, NULL);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300672 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900673 }
674
675 /* combine sort headers with ' / ' */
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300676 first_node = true;
677 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
678 if (!first_node)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900679 header_width += fprintf(fp, " / ");
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300680 first_node = false;
681
682 first_col = true;
683 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
684 if (perf_hpp__should_skip(fmt, hists))
685 continue;
686
687 if (!first_col)
688 header_width += fprintf(fp, "+");
689 first_col = false;
690
Jiri Olsa29659ab2016-08-07 17:28:30 +0200691 fmt->header(fmt, hpp, hists, 0, NULL);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300692
Jiri Olsa7d6a7e72016-04-07 09:11:11 +0200693 header_width += fprintf(fp, "%s", trim(hpp->buf));
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900694 }
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900695 }
696
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900697 fprintf(fp, "\n# ");
698
699 /* preserve max indent depth for initial dots */
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300700 print_hierarchy_indent(sep, indent, dots, fp);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900701
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300702 /* the first hpp_list_node is for overhead columns */
703 fmt_node = list_first_entry(&hists->hpp_formats,
704 struct perf_hpp_list_node, list);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900705
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300706 first_col = true;
707 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
708 if (!first_col)
709 fprintf(fp, "%s", sep ?: "..");
710 first_col = false;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900711
Jiri Olsada1b0402016-06-14 20:19:20 +0200712 width = fmt->width(fmt, hpp, hists);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900713 fprintf(fp, "%.*s", width, dots);
714 }
715
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900716 depth = 0;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300717 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
718 first_col = true;
719 width = depth * HIERARCHY_INDENT;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900720
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300721 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
722 if (perf_hpp__should_skip(fmt, hists))
723 continue;
724
725 if (!first_col)
726 width++; /* for '+' sign between column header */
727 first_col = false;
728
Jiri Olsada1b0402016-06-14 20:19:20 +0200729 width += fmt->width(fmt, hpp, hists);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300730 }
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900731
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900732 if (width > header_width)
733 header_width = width;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900734
735 depth++;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900736 }
737
738 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
739
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900740 fprintf(fp, "\n#\n");
741
742 return 2;
743}
744
Jiri Olsaf3705b02016-08-07 17:28:29 +0200745static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
746 int line, FILE *fp)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900747{
Jiri Olsa12400052012-10-13 00:06:16 +0200748 struct perf_hpp_fmt *fmt;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900749 const char *sep = symbol_conf.field_sep;
Jiri Olsa5395a042012-10-04 21:49:37 +0900750 bool first = true;
Jiri Olsa29659ab2016-08-07 17:28:30 +0200751 int span = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900752
Jiri Olsaf0786af2016-01-18 10:24:23 +0100753 hists__for_each_format(hists, fmt) {
Namhyung Kim361459f2015-12-23 02:07:08 +0900754 if (perf_hpp__should_skip(fmt, hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900755 continue;
756
Jiri Olsa29659ab2016-08-07 17:28:30 +0200757 if (!first && !span)
Namhyung Kimea251d52012-09-03 11:53:06 +0900758 fprintf(fp, "%s", sep ?: " ");
Jiri Olsa5395a042012-10-04 21:49:37 +0900759 else
760 first = false;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900761
Jiri Olsa29659ab2016-08-07 17:28:30 +0200762 fmt->header(fmt, hpp, hists, line, &span);
763
764 if (!span)
765 fprintf(fp, "%s", hpp->buf);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900766 }
Jiri Olsaf3705b02016-08-07 17:28:29 +0200767}
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900768
Jiri Olsaf3705b02016-08-07 17:28:29 +0200769static int
770hists__fprintf_standard_headers(struct hists *hists,
771 struct perf_hpp *hpp,
772 FILE *fp)
773{
774 struct perf_hpp_list *hpp_list = hists->hpp_list;
775 struct perf_hpp_fmt *fmt;
776 unsigned int width;
777 const char *sep = symbol_conf.field_sep;
778 bool first = true;
779 int line;
780
781 for (line = 0; line < hpp_list->nr_header_lines; line++) {
782 /* first # is displayed one level up */
783 if (line)
784 fprintf(fp, "# ");
785 fprintf_line(hists, hpp, line, fp);
786 fprintf(fp, "\n");
787 }
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900788
789 if (sep)
Jiri Olsaf3705b02016-08-07 17:28:29 +0200790 return hpp_list->nr_header_lines;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900791
Jiri Olsa5395a042012-10-04 21:49:37 +0900792 first = true;
793
Namhyung Kimea251d52012-09-03 11:53:06 +0900794 fprintf(fp, "# ");
Namhyung Kimea251d52012-09-03 11:53:06 +0900795
Jiri Olsaf0786af2016-01-18 10:24:23 +0100796 hists__for_each_format(hists, fmt) {
Jiri Olsa12400052012-10-13 00:06:16 +0200797 unsigned int i;
Namhyung Kimea251d52012-09-03 11:53:06 +0900798
Namhyung Kim361459f2015-12-23 02:07:08 +0900799 if (perf_hpp__should_skip(fmt, hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900800 continue;
801
Jiri Olsa5395a042012-10-04 21:49:37 +0900802 if (!first)
Namhyung Kimea251d52012-09-03 11:53:06 +0900803 fprintf(fp, "%s", sep ?: " ");
Jiri Olsa5395a042012-10-04 21:49:37 +0900804 else
805 first = false;
Namhyung Kimea251d52012-09-03 11:53:06 +0900806
Jiri Olsada1b0402016-06-14 20:19:20 +0200807 width = fmt->width(fmt, hpp, hists);
Namhyung Kimea251d52012-09-03 11:53:06 +0900808 for (i = 0; i < width; i++)
809 fprintf(fp, ".");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900810 }
Namhyung Kimea251d52012-09-03 11:53:06 +0900811
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900812 fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900813 fprintf(fp, "#\n");
Jiri Olsaf3705b02016-08-07 17:28:29 +0200814 return hpp_list->nr_header_lines + 2;
Jiri Olsa36592eb2016-06-14 20:19:14 +0200815}
816
Jiri Olsa2d831452016-09-22 17:36:37 +0200817int hists__fprintf_headers(struct hists *hists, FILE *fp)
Jiri Olsa7a72a2e2016-06-14 20:19:16 +0200818{
Jiri Olsad5278222016-09-19 15:09:13 +0200819 char bf[1024];
Jiri Olsa7a72a2e2016-06-14 20:19:16 +0200820 struct perf_hpp dummy_hpp = {
821 .buf = bf,
822 .size = sizeof(bf),
823 };
824
825 fprintf(fp, "# ");
826
827 if (symbol_conf.report_hierarchy)
828 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
829 else
830 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
831
832}
833
Jiri Olsa36592eb2016-06-14 20:19:14 +0200834size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200835 int max_cols, float min_pcnt, FILE *fp,
836 bool use_callchain)
Jiri Olsa36592eb2016-06-14 20:19:14 +0200837{
Jiri Olsa36592eb2016-06-14 20:19:14 +0200838 struct rb_node *nd;
839 size_t ret = 0;
840 const char *sep = symbol_conf.field_sep;
841 int nr_rows = 0;
842 size_t linesz;
843 char *line = NULL;
844 unsigned indent;
845
846 init_rem_hits();
847
Namhyung Kime3b60bc2016-09-20 14:30:24 +0900848 hists__reset_column_width(hists);
Jiri Olsa36592eb2016-06-14 20:19:14 +0200849
850 if (symbol_conf.col_width_list_str)
851 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
852
853 if (show_header)
854 nr_rows += hists__fprintf_headers(hists, fp);
855
856 if (max_rows && nr_rows >= max_rows)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900857 goto out;
858
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300859 linesz = hists__sort_list_width(hists) + 3 + 1;
Jiri Olsa9754c4f2013-10-25 13:24:53 +0200860 linesz += perf_hpp__color_overhead();
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300861 line = malloc(linesz);
862 if (line == NULL) {
863 ret = -1;
864 goto out;
865 }
866
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900867 indent = hists__overhead_width(hists) + 4;
868
Namhyung Kimef86d682016-02-25 00:13:41 +0900869 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900870 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
Namhyung Kim14135662013-10-31 10:17:39 +0900871 float percent;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900872
873 if (h->filtered)
874 continue;
875
Namhyung Kim14135662013-10-31 10:17:39 +0900876 percent = hist_entry__get_percent_limit(h);
Namhyung Kim064f1982013-05-14 11:09:04 +0900877 if (percent < min_pcnt)
878 continue;
879
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200880 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900881
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900882 if (max_rows && ++nr_rows >= max_rows)
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300883 break;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900884
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900885 /*
886 * If all children are filtered out or percent-limited,
887 * display "no entry >= x.xx%" message.
888 */
889 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300890 int depth = hists->nr_hpp_node + h->depth + 1;
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900891
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300892 print_hierarchy_indent(sep, depth, spaces, fp);
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900893 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
894
895 if (max_rows && ++nr_rows >= max_rows)
896 break;
897 }
898
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900899 if (h->ms.map == NULL && verbose > 1) {
Arnaldo Carvalho de Melo93d57312014-03-21 17:57:01 -0300900 __map_groups__fprintf_maps(h->thread->mg,
Jiri Olsaacebd402014-07-14 23:46:47 +0200901 MAP__FUNCTION, fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900902 fprintf(fp, "%.10s end\n", graph_dotted_line);
903 }
904 }
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300905
906 free(line);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900907out:
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300908 zfree(&rem_sq_bracket);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900909
910 return ret;
911}
912
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300913size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900914{
915 int i;
916 size_t ret = 0;
917
918 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
919 const char *name;
920
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300921 if (stats->nr_events[i] == 0)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900922 continue;
923
924 name = perf_event__name(i);
925 if (!strcmp(name, "UNKNOWN"))
926 continue;
927
928 ret += fprintf(fp, "%16s events: %10d\n", name,
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300929 stats->nr_events[i]);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900930 }
931
932 return ret;
933}