blob: 297a79c69b715cef9f357a7c2833c2a600cec4b4 [file] [log] [blame]
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09001#include <stdio.h>
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09002
3#include "../../util/util.h"
4#include "../../util/hist.h"
5#include "../../util/sort.h"
Namhyung Kim5b9e2142013-01-22 18:09:37 +09006#include "../../util/evsel.h"
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -03007#include "../../util/sane_ctype.h"
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09008
9static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
10{
11 int i;
12 int ret = fprintf(fp, " ");
13
14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " ");
16
17 return ret;
18}
19
Jin Yao0db64dd2017-03-26 04:34:28 +080020static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
21 int depth, int depth_mask, FILE *fp)
22{
23 struct dso *dso;
24 struct inline_node *node;
25 struct inline_list *ilist;
26 int ret = 0, i;
27
28 if (map == NULL)
29 return 0;
30
31 dso = map->dso;
32 if (dso == NULL)
33 return 0;
34
35 if (dso->kernel != DSO_TYPE_USER)
36 return 0;
37
38 node = dso__parse_addr_inlines(dso,
39 map__rip_2objdump(map, ip));
40 if (node == NULL)
41 return 0;
42
43 list_for_each_entry(ilist, &node->val, list) {
44 if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
45 ret += callchain__fprintf_left_margin(fp, left_margin);
46
47 for (i = 0; i < depth; i++) {
48 if (depth_mask & (1 << i))
49 ret += fprintf(fp, "|");
50 else
51 ret += fprintf(fp, " ");
52 ret += fprintf(fp, " ");
53 }
54
Milian Wolff5dfa2102017-03-18 22:49:28 +010055 if (callchain_param.key == CCKEY_ADDRESS ||
56 callchain_param.key == CCKEY_SRCLINE) {
Jin Yao0db64dd2017-03-26 04:34:28 +080057 if (ilist->filename != NULL)
58 ret += fprintf(fp, "%s:%d (inline)",
59 ilist->filename,
60 ilist->line_nr);
61 else
62 ret += fprintf(fp, "??");
63 } else if (ilist->funcname != NULL)
64 ret += fprintf(fp, "%s (inline)",
65 ilist->funcname);
66 else if (ilist->filename != NULL)
67 ret += fprintf(fp, "%s:%d (inline)",
68 ilist->filename,
69 ilist->line_nr);
70 else
71 ret += fprintf(fp, "??");
72
73 ret += fprintf(fp, "\n");
74 }
75 }
76
77 inline_node__delete(node);
78 return ret;
79}
80
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090081static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
82 int left_margin)
83{
84 int i;
85 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
86
87 for (i = 0; i < depth; i++)
88 if (depth_mask & (1 << i))
89 ret += fprintf(fp, "| ");
90 else
91 ret += fprintf(fp, " ");
92
93 ret += fprintf(fp, "\n");
94
95 return ret;
96}
97
Namhyung Kim5ab250c2015-11-09 14:45:39 +090098static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
99 struct callchain_list *chain,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900100 int depth, int depth_mask, int period,
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900101 u64 total_samples, int left_margin)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900102{
103 int i;
104 size_t ret = 0;
Jin Yao8577ae62016-10-31 09:19:52 +0800105 char bf[1024], *alloc_str = NULL;
106 char buf[64];
107 const char *str;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900108
109 ret += callchain__fprintf_left_margin(fp, left_margin);
110 for (i = 0; i < depth; i++) {
111 if (depth_mask & (1 << i))
112 ret += fprintf(fp, "|");
113 else
114 ret += fprintf(fp, " ");
115 if (!period && i == depth - 1) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900116 ret += fprintf(fp, "--");
117 ret += callchain_node__fprintf_value(node, fp, total_samples);
118 ret += fprintf(fp, "--");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900119 } else
120 ret += fprintf(fp, "%s", " ");
121 }
Jin Yao8577ae62016-10-31 09:19:52 +0800122
123 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
124
125 if (symbol_conf.show_branchflag_count) {
126 if (!period)
127 callchain_list_counts__printf_value(node, chain, NULL,
128 buf, sizeof(buf));
129 else
130 callchain_list_counts__printf_value(NULL, chain, NULL,
131 buf, sizeof(buf));
132
133 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
134 str = "Not enough memory!";
135 else
136 str = alloc_str;
137 }
138
139 fputs(str, fp);
Andi Kleen2989cca2014-11-12 18:05:23 -0800140 fputc('\n', fp);
Jin Yao8577ae62016-10-31 09:19:52 +0800141 free(alloc_str);
Jin Yao0db64dd2017-03-26 04:34:28 +0800142
143 if (symbol_conf.inline_name)
144 ret += inline__fprintf(chain->ms.map, chain->ip,
145 left_margin, depth, depth_mask, fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900146 return ret;
147}
148
149static struct symbol *rem_sq_bracket;
150static struct callchain_list rem_hits;
151
152static void init_rem_hits(void)
153{
154 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
155 if (!rem_sq_bracket) {
156 fprintf(stderr, "Not enough memory to display remaining hits\n");
157 return;
158 }
159
160 strcpy(rem_sq_bracket->name, "[...]");
161 rem_hits.ms.sym = rem_sq_bracket;
162}
163
164static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
165 u64 total_samples, int depth,
166 int depth_mask, int left_margin)
167{
168 struct rb_node *node, *next;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900169 struct callchain_node *child = NULL;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900170 struct callchain_list *chain;
171 int new_depth_mask = depth_mask;
172 u64 remaining;
173 size_t ret = 0;
174 int i;
175 uint entries_printed = 0;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900176 int cumul_count = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900177
178 remaining = total_samples;
179
180 node = rb_first(root);
181 while (node) {
182 u64 new_total;
183 u64 cumul;
184
185 child = rb_entry(node, struct callchain_node, rb_node);
186 cumul = callchain_cumul_hits(child);
187 remaining -= cumul;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900188 cumul_count += callchain_cumul_counts(child);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900189
190 /*
191 * The depth mask manages the output of pipes that show
192 * the depth. We don't want to keep the pipes of the current
193 * level for the last child of this depth.
194 * Except if we have remaining filtered hits. They will
195 * supersede the last child
196 */
197 next = rb_next(node);
198 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
199 new_depth_mask &= ~(1 << (depth - 1));
200
201 /*
202 * But we keep the older depth mask for the line separator
203 * to keep the level link until we reach the last child
204 */
205 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
206 left_margin);
207 i = 0;
208 list_for_each_entry(chain, &child->val, list) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900209 ret += ipchain__fprintf_graph(fp, child, chain, depth,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900210 new_depth_mask, i++,
211 total_samples,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900212 left_margin);
213 }
214
215 if (callchain_param.mode == CHAIN_GRAPH_REL)
216 new_total = child->children_hit;
217 else
218 new_total = total_samples;
219
220 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
221 depth + 1,
222 new_depth_mask | (1 << depth),
223 left_margin);
224 node = next;
225 if (++entries_printed == callchain_param.print_limit)
226 break;
227 }
228
229 if (callchain_param.mode == CHAIN_GRAPH_REL &&
230 remaining && remaining != total_samples) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900231 struct callchain_node rem_node = {
232 .hit = remaining,
233 };
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900234
235 if (!rem_sq_bracket)
236 return ret;
237
Namhyung Kimf2af0082015-11-09 14:45:41 +0900238 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
239 rem_node.count = child->parent->children_count - cumul_count;
240 if (rem_node.count <= 0)
241 return ret;
242 }
243
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900244 new_depth_mask &= ~(1 << (depth - 1));
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900245 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900246 new_depth_mask, 0, total_samples,
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900247 left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900248 }
249
250 return ret;
251}
252
Namhyung Kim7ed5d6e2016-01-28 00:40:53 +0900253/*
254 * If have one single callchain root, don't bother printing
255 * its percentage (100 % in fractal mode and the same percentage
256 * than the hist in graph mode). This also avoid one level of column.
257 *
258 * However when percent-limit applied, it's possible that single callchain
259 * node have different (non-100% in fractal mode) percentage.
260 */
261static bool need_percent_display(struct rb_node *node, u64 parent_samples)
262{
263 struct callchain_node *cnode;
264
265 if (rb_next(node))
266 return true;
267
268 cnode = rb_entry(node, struct callchain_node, rb_node);
269 return callchain_cumul_hits(cnode) != parent_samples;
270}
271
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900272static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
Namhyung Kim54d27b32016-01-28 00:40:52 +0900273 u64 total_samples, u64 parent_samples,
274 int left_margin)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900275{
276 struct callchain_node *cnode;
277 struct callchain_list *chain;
278 u32 entries_printed = 0;
279 bool printed = false;
280 struct rb_node *node;
281 int i = 0;
282 int ret = 0;
Andi Kleen2989cca2014-11-12 18:05:23 -0800283 char bf[1024];
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900284
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900285 node = rb_first(root);
Namhyung Kim7ed5d6e2016-01-28 00:40:53 +0900286 if (node && !need_percent_display(node, parent_samples)) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900287 cnode = rb_entry(node, struct callchain_node, rb_node);
288 list_for_each_entry(chain, &cnode->val, list) {
289 /*
290 * If we sort by symbol, the first entry is the same than
291 * the symbol. No need to print it otherwise it appears as
292 * displayed twice.
293 */
Namhyung Kimcfaa1542014-05-19 14:19:30 +0900294 if (!i++ && field_order == NULL &&
295 sort_order && !prefixcmp(sort_order, "sym"))
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900296 continue;
Jin Yao0db64dd2017-03-26 04:34:28 +0800297
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900298 if (!printed) {
299 ret += callchain__fprintf_left_margin(fp, left_margin);
300 ret += fprintf(fp, "|\n");
301 ret += callchain__fprintf_left_margin(fp, left_margin);
302 ret += fprintf(fp, "---");
303 left_margin += 3;
304 printed = true;
305 } else
306 ret += callchain__fprintf_left_margin(fp, left_margin);
307
Jin Yao8577ae62016-10-31 09:19:52 +0800308 ret += fprintf(fp, "%s",
309 callchain_list__sym_name(chain, bf,
310 sizeof(bf),
311 false));
312
313 if (symbol_conf.show_branchflag_count)
314 ret += callchain_list_counts__printf_value(
315 NULL, chain, fp, NULL, 0);
316 ret += fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900317
318 if (++entries_printed == callchain_param.print_limit)
319 break;
Jin Yao0db64dd2017-03-26 04:34:28 +0800320
321 if (symbol_conf.inline_name)
322 ret += inline__fprintf(chain->ms.map,
323 chain->ip,
324 left_margin,
325 0, 0,
326 fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900327 }
328 root = &cnode->rb_root;
329 }
330
Namhyung Kim54d27b32016-01-28 00:40:52 +0900331 if (callchain_param.mode == CHAIN_GRAPH_REL)
332 total_samples = parent_samples;
333
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900334 ret += __callchain__fprintf_graph(fp, root, total_samples,
335 1, 1, left_margin);
Namhyung Kim3848c232016-01-28 21:24:54 +0900336 if (ret) {
337 /* do not add a blank line if it printed nothing */
338 ret += fprintf(fp, "\n");
339 }
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900340
341 return ret;
342}
343
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300344static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900345 u64 total_samples)
346{
347 struct callchain_list *chain;
348 size_t ret = 0;
Andi Kleen2989cca2014-11-12 18:05:23 -0800349 char bf[1024];
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900350
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300351 if (!node)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900352 return 0;
353
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300354 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900355
356
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300357 list_for_each_entry(chain, &node->val, list) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900358 if (chain->ip >= PERF_CONTEXT_MAX)
359 continue;
Andi Kleen2989cca2014-11-12 18:05:23 -0800360 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
361 bf, sizeof(bf), false));
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900362 }
363
364 return ret;
365}
366
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300367static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900368 u64 total_samples)
369{
370 size_t ret = 0;
371 u32 entries_printed = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900372 struct callchain_node *chain;
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300373 struct rb_node *rb_node = rb_first(tree);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900374
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900375 while (rb_node) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900376 chain = rb_entry(rb_node, struct callchain_node, rb_node);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900377
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900378 ret += fprintf(fp, " ");
379 ret += callchain_node__fprintf_value(chain, fp, total_samples);
380 ret += fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900381 ret += __callchain__fprintf_flat(fp, chain, total_samples);
382 ret += fprintf(fp, "\n");
383 if (++entries_printed == callchain_param.print_limit)
384 break;
385
386 rb_node = rb_next(rb_node);
387 }
388
389 return ret;
390}
391
Namhyung Kim26e77922015-11-09 14:45:37 +0900392static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
393{
394 const char *sep = symbol_conf.field_sep ?: ";";
395 struct callchain_list *chain;
396 size_t ret = 0;
397 char bf[1024];
398 bool first;
399
400 if (!node)
401 return 0;
402
403 ret += __callchain__fprintf_folded(fp, node->parent);
404
405 first = (ret == 0);
406 list_for_each_entry(chain, &node->val, list) {
407 if (chain->ip >= PERF_CONTEXT_MAX)
408 continue;
409 ret += fprintf(fp, "%s%s", first ? "" : sep,
410 callchain_list__sym_name(chain,
411 bf, sizeof(bf), false));
412 first = false;
413 }
414
415 return ret;
416}
417
418static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
419 u64 total_samples)
420{
421 size_t ret = 0;
422 u32 entries_printed = 0;
423 struct callchain_node *chain;
424 struct rb_node *rb_node = rb_first(tree);
425
426 while (rb_node) {
Namhyung Kim26e77922015-11-09 14:45:37 +0900427
428 chain = rb_entry(rb_node, struct callchain_node, rb_node);
Namhyung Kim26e77922015-11-09 14:45:37 +0900429
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900430 ret += callchain_node__fprintf_value(chain, fp, total_samples);
431 ret += fprintf(fp, " ");
Namhyung Kim26e77922015-11-09 14:45:37 +0900432 ret += __callchain__fprintf_folded(fp, chain);
433 ret += fprintf(fp, "\n");
434 if (++entries_printed == callchain_param.print_limit)
435 break;
436
437 rb_node = rb_next(rb_node);
438 }
439
440 return ret;
441}
442
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900443static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
444 u64 total_samples, int left_margin,
445 FILE *fp)
446{
Namhyung Kim54d27b32016-01-28 00:40:52 +0900447 u64 parent_samples = he->stat.period;
448
449 if (symbol_conf.cumulate_callchain)
450 parent_samples = he->stat_acc->period;
451
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900452 switch (callchain_param.mode) {
453 case CHAIN_GRAPH_REL:
Namhyung Kim54d27b32016-01-28 00:40:52 +0900454 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
455 parent_samples, left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900456 break;
457 case CHAIN_GRAPH_ABS:
458 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
Namhyung Kim54d27b32016-01-28 00:40:52 +0900459 parent_samples, left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900460 break;
461 case CHAIN_FLAT:
462 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
463 break;
Namhyung Kim26e77922015-11-09 14:45:37 +0900464 case CHAIN_FOLDED:
465 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
466 break;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900467 case CHAIN_NONE:
468 break;
469 default:
470 pr_err("Bad callchain mode\n");
471 }
472
473 return 0;
474}
475
Jiri Olsabd28d0c2016-09-22 17:36:36 +0200476int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
477 struct perf_hpp_list *hpp_list)
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100478{
479 const char *sep = symbol_conf.field_sep;
480 struct perf_hpp_fmt *fmt;
481 char *start = hpp->buf;
482 int ret;
483 bool first = true;
484
485 if (symbol_conf.exclude_other && !he->parent)
486 return 0;
487
Jiri Olsa9da44db2016-09-22 17:36:29 +0200488 perf_hpp_list__for_each_format(hpp_list, fmt) {
Namhyung Kim361459f2015-12-23 02:07:08 +0900489 if (perf_hpp__should_skip(fmt, he->hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900490 continue;
491
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100492 /*
493 * If there's no field_sep, we still need
494 * to display initial ' '.
495 */
496 if (!sep || !first) {
497 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
498 advance_hpp(hpp, ret);
499 } else
500 first = false;
501
Jiri Olsa9754c4f2013-10-25 13:24:53 +0200502 if (perf_hpp__use_color() && fmt->color)
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100503 ret = fmt->color(fmt, hpp, he);
504 else
505 ret = fmt->entry(fmt, hpp, he);
506
Arnaldo Carvalho de Melo89fee702016-02-11 17:14:13 -0300507 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100508 advance_hpp(hpp, ret);
509 }
510
511 return hpp->buf - start;
512}
513
Jiri Olsa9da44db2016-09-22 17:36:29 +0200514static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
515{
516 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
517}
518
Namhyung Kimef86d682016-02-25 00:13:41 +0900519static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
520 struct perf_hpp *hpp,
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300521 struct hists *hists,
Namhyung Kimef86d682016-02-25 00:13:41 +0900522 FILE *fp)
523{
524 const char *sep = symbol_conf.field_sep;
525 struct perf_hpp_fmt *fmt;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300526 struct perf_hpp_list_node *fmt_node;
Namhyung Kimef86d682016-02-25 00:13:41 +0900527 char *buf = hpp->buf;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900528 size_t size = hpp->size;
Namhyung Kimef86d682016-02-25 00:13:41 +0900529 int ret, printed = 0;
530 bool first = true;
531
532 if (symbol_conf.exclude_other && !he->parent)
533 return 0;
534
535 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
536 advance_hpp(hpp, ret);
537
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300538 /* the first hpp_list_node is for overhead columns */
539 fmt_node = list_first_entry(&hists->hpp_formats,
540 struct perf_hpp_list_node, list);
541 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
Namhyung Kimef86d682016-02-25 00:13:41 +0900542 /*
543 * If there's no field_sep, we still need
544 * to display initial ' '.
545 */
546 if (!sep || !first) {
547 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
548 advance_hpp(hpp, ret);
549 } else
550 first = false;
551
552 if (perf_hpp__use_color() && fmt->color)
553 ret = fmt->color(fmt, hpp, he);
554 else
555 ret = fmt->entry(fmt, hpp, he);
556
557 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
558 advance_hpp(hpp, ret);
559 }
560
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300561 if (!sep)
Namhyung Kimef86d682016-02-25 00:13:41 +0900562 ret = scnprintf(hpp->buf, hpp->size, "%*s",
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300563 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
Namhyung Kimef86d682016-02-25 00:13:41 +0900564 advance_hpp(hpp, ret);
565
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900566 printed += fprintf(fp, "%s", buf);
567
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300568 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
569 hpp->buf = buf;
570 hpp->size = size;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900571
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300572 /*
573 * No need to call hist_entry__snprintf_alignment() since this
574 * fmt is always the last column in the hierarchy mode.
575 */
576 if (perf_hpp__use_color() && fmt->color)
577 fmt->color(fmt, hpp, he);
578 else
579 fmt->entry(fmt, hpp, he);
Namhyung Kimef86d682016-02-25 00:13:41 +0900580
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300581 /*
582 * dynamic entries are right-aligned but we want left-aligned
583 * in the hierarchy mode
584 */
585 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
586 }
587 printed += putc('\n', fp);
Namhyung Kimef86d682016-02-25 00:13:41 +0900588
589 if (symbol_conf.use_callchain && he->leaf) {
590 u64 total = hists__total_period(hists);
591
592 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
593 goto out;
594 }
595
596out:
597 return printed;
598}
599
Namhyung Kim000078b2012-08-20 13:52:06 +0900600static int hist_entry__fprintf(struct hist_entry *he, size_t size,
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200601 char *bf, size_t bfsz, FILE *fp,
602 bool use_callchain)
Namhyung Kim000078b2012-08-20 13:52:06 +0900603{
Namhyung Kim000078b2012-08-20 13:52:06 +0900604 int ret;
Jin Yao0db64dd2017-03-26 04:34:28 +0800605 int callchain_ret = 0;
606 int inline_ret = 0;
Namhyung Kimea251d52012-09-03 11:53:06 +0900607 struct perf_hpp hpp = {
608 .buf = bf,
609 .size = size,
Namhyung Kimea251d52012-09-03 11:53:06 +0900610 };
Jiri Olsa8f1d1b42016-06-14 20:19:17 +0200611 struct hists *hists = he->hists;
Namhyung Kim7e597d32016-01-28 00:40:51 +0900612 u64 total_period = hists->stats.total_period;
Namhyung Kim000078b2012-08-20 13:52:06 +0900613
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300614 if (size == 0 || size > bfsz)
615 size = hpp.size = bfsz;
Namhyung Kim000078b2012-08-20 13:52:06 +0900616
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300617 if (symbol_conf.report_hierarchy)
618 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
Namhyung Kimef86d682016-02-25 00:13:41 +0900619
Namhyung Kim26d8b332014-03-03 16:16:20 +0900620 hist_entry__snprintf(he, &hpp);
Namhyung Kim000078b2012-08-20 13:52:06 +0900621
622 ret = fprintf(fp, "%s\n", bf);
623
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200624 if (use_callchain)
Jin Yao0db64dd2017-03-26 04:34:28 +0800625 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
626 0, fp);
627
628 if (callchain_ret == 0 && symbol_conf.inline_name) {
629 inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
630 ret += inline_ret;
631 if (inline_ret > 0)
632 ret += fprintf(fp, "\n");
633 } else
634 ret += callchain_ret;
Namhyung Kim000078b2012-08-20 13:52:06 +0900635
636 return ret;
637}
638
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300639static int print_hierarchy_indent(const char *sep, int indent,
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900640 const char *line, FILE *fp)
641{
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300642 if (sep != NULL || indent < 2)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900643 return 0;
644
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300645 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900646}
647
Namhyung Kim195bc0f2016-09-13 16:45:50 +0900648static int hists__fprintf_hierarchy_headers(struct hists *hists,
649 struct perf_hpp *hpp, FILE *fp)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900650{
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300651 bool first_node, first_col;
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300652 int indent;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900653 int depth;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900654 unsigned width = 0;
655 unsigned header_width = 0;
656 struct perf_hpp_fmt *fmt;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300657 struct perf_hpp_list_node *fmt_node;
Namhyung Kim195bc0f2016-09-13 16:45:50 +0900658 const char *sep = symbol_conf.field_sep;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900659
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300660 indent = hists->nr_hpp_node;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900661
662 /* preserve max indent depth for column headers */
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300663 print_hierarchy_indent(sep, indent, spaces, fp);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900664
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300665 /* the first hpp_list_node is for overhead columns */
666 fmt_node = list_first_entry(&hists->hpp_formats,
667 struct perf_hpp_list_node, list);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900668
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300669 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
Jiri Olsa29659ab2016-08-07 17:28:30 +0200670 fmt->header(fmt, hpp, hists, 0, NULL);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300671 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900672 }
673
674 /* combine sort headers with ' / ' */
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300675 first_node = true;
676 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
677 if (!first_node)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900678 header_width += fprintf(fp, " / ");
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300679 first_node = false;
680
681 first_col = true;
682 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
683 if (perf_hpp__should_skip(fmt, hists))
684 continue;
685
686 if (!first_col)
687 header_width += fprintf(fp, "+");
688 first_col = false;
689
Jiri Olsa29659ab2016-08-07 17:28:30 +0200690 fmt->header(fmt, hpp, hists, 0, NULL);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300691
Jiri Olsa7d6a7e72016-04-07 09:11:11 +0200692 header_width += fprintf(fp, "%s", trim(hpp->buf));
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900693 }
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900694 }
695
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900696 fprintf(fp, "\n# ");
697
698 /* preserve max indent depth for initial dots */
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300699 print_hierarchy_indent(sep, indent, dots, fp);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900700
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300701 /* the first hpp_list_node is for overhead columns */
702 fmt_node = list_first_entry(&hists->hpp_formats,
703 struct perf_hpp_list_node, list);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900704
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300705 first_col = true;
706 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
707 if (!first_col)
708 fprintf(fp, "%s", sep ?: "..");
709 first_col = false;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900710
Jiri Olsada1b0402016-06-14 20:19:20 +0200711 width = fmt->width(fmt, hpp, hists);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900712 fprintf(fp, "%.*s", width, dots);
713 }
714
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900715 depth = 0;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300716 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
717 first_col = true;
718 width = depth * HIERARCHY_INDENT;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900719
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300720 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
721 if (perf_hpp__should_skip(fmt, hists))
722 continue;
723
724 if (!first_col)
725 width++; /* for '+' sign between column header */
726 first_col = false;
727
Jiri Olsada1b0402016-06-14 20:19:20 +0200728 width += fmt->width(fmt, hpp, hists);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300729 }
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900730
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900731 if (width > header_width)
732 header_width = width;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900733
734 depth++;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900735 }
736
737 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
738
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900739 fprintf(fp, "\n#\n");
740
741 return 2;
742}
743
Jiri Olsaf3705b02016-08-07 17:28:29 +0200744static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
745 int line, FILE *fp)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900746{
Jiri Olsa12400052012-10-13 00:06:16 +0200747 struct perf_hpp_fmt *fmt;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900748 const char *sep = symbol_conf.field_sep;
Jiri Olsa5395a042012-10-04 21:49:37 +0900749 bool first = true;
Jiri Olsa29659ab2016-08-07 17:28:30 +0200750 int span = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900751
Jiri Olsaf0786af2016-01-18 10:24:23 +0100752 hists__for_each_format(hists, fmt) {
Namhyung Kim361459f2015-12-23 02:07:08 +0900753 if (perf_hpp__should_skip(fmt, hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900754 continue;
755
Jiri Olsa29659ab2016-08-07 17:28:30 +0200756 if (!first && !span)
Namhyung Kimea251d52012-09-03 11:53:06 +0900757 fprintf(fp, "%s", sep ?: " ");
Jiri Olsa5395a042012-10-04 21:49:37 +0900758 else
759 first = false;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900760
Jiri Olsa29659ab2016-08-07 17:28:30 +0200761 fmt->header(fmt, hpp, hists, line, &span);
762
763 if (!span)
764 fprintf(fp, "%s", hpp->buf);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900765 }
Jiri Olsaf3705b02016-08-07 17:28:29 +0200766}
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900767
Jiri Olsaf3705b02016-08-07 17:28:29 +0200768static int
769hists__fprintf_standard_headers(struct hists *hists,
770 struct perf_hpp *hpp,
771 FILE *fp)
772{
773 struct perf_hpp_list *hpp_list = hists->hpp_list;
774 struct perf_hpp_fmt *fmt;
775 unsigned int width;
776 const char *sep = symbol_conf.field_sep;
777 bool first = true;
778 int line;
779
780 for (line = 0; line < hpp_list->nr_header_lines; line++) {
781 /* first # is displayed one level up */
782 if (line)
783 fprintf(fp, "# ");
784 fprintf_line(hists, hpp, line, fp);
785 fprintf(fp, "\n");
786 }
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900787
788 if (sep)
Jiri Olsaf3705b02016-08-07 17:28:29 +0200789 return hpp_list->nr_header_lines;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900790
Jiri Olsa5395a042012-10-04 21:49:37 +0900791 first = true;
792
Namhyung Kimea251d52012-09-03 11:53:06 +0900793 fprintf(fp, "# ");
Namhyung Kimea251d52012-09-03 11:53:06 +0900794
Jiri Olsaf0786af2016-01-18 10:24:23 +0100795 hists__for_each_format(hists, fmt) {
Jiri Olsa12400052012-10-13 00:06:16 +0200796 unsigned int i;
Namhyung Kimea251d52012-09-03 11:53:06 +0900797
Namhyung Kim361459f2015-12-23 02:07:08 +0900798 if (perf_hpp__should_skip(fmt, hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900799 continue;
800
Jiri Olsa5395a042012-10-04 21:49:37 +0900801 if (!first)
Namhyung Kimea251d52012-09-03 11:53:06 +0900802 fprintf(fp, "%s", sep ?: " ");
Jiri Olsa5395a042012-10-04 21:49:37 +0900803 else
804 first = false;
Namhyung Kimea251d52012-09-03 11:53:06 +0900805
Jiri Olsada1b0402016-06-14 20:19:20 +0200806 width = fmt->width(fmt, hpp, hists);
Namhyung Kimea251d52012-09-03 11:53:06 +0900807 for (i = 0; i < width; i++)
808 fprintf(fp, ".");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900809 }
Namhyung Kimea251d52012-09-03 11:53:06 +0900810
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900811 fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900812 fprintf(fp, "#\n");
Jiri Olsaf3705b02016-08-07 17:28:29 +0200813 return hpp_list->nr_header_lines + 2;
Jiri Olsa36592eb2016-06-14 20:19:14 +0200814}
815
Jiri Olsa2d831452016-09-22 17:36:37 +0200816int hists__fprintf_headers(struct hists *hists, FILE *fp)
Jiri Olsa7a72a2e2016-06-14 20:19:16 +0200817{
Jiri Olsad5278222016-09-19 15:09:13 +0200818 char bf[1024];
Jiri Olsa7a72a2e2016-06-14 20:19:16 +0200819 struct perf_hpp dummy_hpp = {
820 .buf = bf,
821 .size = sizeof(bf),
822 };
823
824 fprintf(fp, "# ");
825
826 if (symbol_conf.report_hierarchy)
827 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
828 else
829 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
830
831}
832
Jiri Olsa36592eb2016-06-14 20:19:14 +0200833size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200834 int max_cols, float min_pcnt, FILE *fp,
835 bool use_callchain)
Jiri Olsa36592eb2016-06-14 20:19:14 +0200836{
Jiri Olsa36592eb2016-06-14 20:19:14 +0200837 struct rb_node *nd;
838 size_t ret = 0;
839 const char *sep = symbol_conf.field_sep;
840 int nr_rows = 0;
841 size_t linesz;
842 char *line = NULL;
843 unsigned indent;
844
845 init_rem_hits();
846
Namhyung Kime3b60bc2016-09-20 14:30:24 +0900847 hists__reset_column_width(hists);
Jiri Olsa36592eb2016-06-14 20:19:14 +0200848
849 if (symbol_conf.col_width_list_str)
850 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
851
852 if (show_header)
853 nr_rows += hists__fprintf_headers(hists, fp);
854
855 if (max_rows && nr_rows >= max_rows)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900856 goto out;
857
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300858 linesz = hists__sort_list_width(hists) + 3 + 1;
Jiri Olsa9754c4f2013-10-25 13:24:53 +0200859 linesz += perf_hpp__color_overhead();
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300860 line = malloc(linesz);
861 if (line == NULL) {
862 ret = -1;
863 goto out;
864 }
865
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900866 indent = hists__overhead_width(hists) + 4;
867
Namhyung Kimef86d682016-02-25 00:13:41 +0900868 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900869 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
Namhyung Kim14135662013-10-31 10:17:39 +0900870 float percent;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900871
872 if (h->filtered)
873 continue;
874
Namhyung Kim14135662013-10-31 10:17:39 +0900875 percent = hist_entry__get_percent_limit(h);
Namhyung Kim064f1982013-05-14 11:09:04 +0900876 if (percent < min_pcnt)
877 continue;
878
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200879 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900880
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900881 if (max_rows && ++nr_rows >= max_rows)
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300882 break;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900883
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900884 /*
885 * If all children are filtered out or percent-limited,
886 * display "no entry >= x.xx%" message.
887 */
888 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300889 int depth = hists->nr_hpp_node + h->depth + 1;
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900890
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300891 print_hierarchy_indent(sep, depth, spaces, fp);
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900892 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
893
894 if (max_rows && ++nr_rows >= max_rows)
895 break;
896 }
897
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900898 if (h->ms.map == NULL && verbose > 1) {
Arnaldo Carvalho de Melo93d57312014-03-21 17:57:01 -0300899 __map_groups__fprintf_maps(h->thread->mg,
Jiri Olsaacebd402014-07-14 23:46:47 +0200900 MAP__FUNCTION, fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900901 fprintf(fp, "%.10s end\n", graph_dotted_line);
902 }
903 }
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300904
905 free(line);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900906out:
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300907 zfree(&rem_sq_bracket);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900908
909 return ret;
910}
911
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300912size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900913{
914 int i;
915 size_t ret = 0;
916
917 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
918 const char *name;
919
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300920 if (stats->nr_events[i] == 0)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900921 continue;
922
923 name = perf_event__name(i);
924 if (!strcmp(name, "UNKNOWN"))
925 continue;
926
927 ret += fprintf(fp, "%16s events: %10d\n", name,
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300928 stats->nr_events[i]);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900929 }
930
931 return ret;
932}