blob: 5565105c9688d6e2323836144332407dac052892 [file] [log] [blame]
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09001#include <stdio.h>
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09002
3#include "../../util/util.h"
4#include "../../util/hist.h"
5#include "../../util/sort.h"
Namhyung Kim5b9e2142013-01-22 18:09:37 +09006#include "../../util/evsel.h"
Arnaldo Carvalho de Melo632a5ca2017-04-17 16:30:49 -03007#include "../../util/srcline.h"
Arnaldo Carvalho de Meloa0675582017-04-17 16:51:59 -03008#include "../../util/string2.h"
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -03009#include "../../util/sane_ctype.h"
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090010
11static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
12{
13 int i;
14 int ret = fprintf(fp, " ");
15
16 for (i = 0; i < left_margin; i++)
17 ret += fprintf(fp, " ");
18
19 return ret;
20}
21
Jin Yao0db64dd2017-03-26 04:34:28 +080022static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
23 int depth, int depth_mask, FILE *fp)
24{
25 struct dso *dso;
26 struct inline_node *node;
27 struct inline_list *ilist;
28 int ret = 0, i;
29
30 if (map == NULL)
31 return 0;
32
33 dso = map->dso;
34 if (dso == NULL)
35 return 0;
36
37 if (dso->kernel != DSO_TYPE_USER)
38 return 0;
39
40 node = dso__parse_addr_inlines(dso,
41 map__rip_2objdump(map, ip));
42 if (node == NULL)
43 return 0;
44
45 list_for_each_entry(ilist, &node->val, list) {
46 if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
47 ret += callchain__fprintf_left_margin(fp, left_margin);
48
49 for (i = 0; i < depth; i++) {
50 if (depth_mask & (1 << i))
51 ret += fprintf(fp, "|");
52 else
53 ret += fprintf(fp, " ");
54 ret += fprintf(fp, " ");
55 }
56
Milian Wolff5dfa2102017-03-18 22:49:28 +010057 if (callchain_param.key == CCKEY_ADDRESS ||
58 callchain_param.key == CCKEY_SRCLINE) {
Jin Yao0db64dd2017-03-26 04:34:28 +080059 if (ilist->filename != NULL)
60 ret += fprintf(fp, "%s:%d (inline)",
61 ilist->filename,
62 ilist->line_nr);
63 else
64 ret += fprintf(fp, "??");
65 } else if (ilist->funcname != NULL)
66 ret += fprintf(fp, "%s (inline)",
67 ilist->funcname);
68 else if (ilist->filename != NULL)
69 ret += fprintf(fp, "%s:%d (inline)",
70 ilist->filename,
71 ilist->line_nr);
72 else
73 ret += fprintf(fp, "??");
74
75 ret += fprintf(fp, "\n");
76 }
77 }
78
79 inline_node__delete(node);
80 return ret;
81}
82
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090083static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
84 int left_margin)
85{
86 int i;
87 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
88
89 for (i = 0; i < depth; i++)
90 if (depth_mask & (1 << i))
91 ret += fprintf(fp, "| ");
92 else
93 ret += fprintf(fp, " ");
94
95 ret += fprintf(fp, "\n");
96
97 return ret;
98}
99
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900100static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
101 struct callchain_list *chain,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900102 int depth, int depth_mask, int period,
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900103 u64 total_samples, int left_margin)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900104{
105 int i;
106 size_t ret = 0;
Jin Yao8577ae62016-10-31 09:19:52 +0800107 char bf[1024], *alloc_str = NULL;
108 char buf[64];
109 const char *str;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900110
111 ret += callchain__fprintf_left_margin(fp, left_margin);
112 for (i = 0; i < depth; i++) {
113 if (depth_mask & (1 << i))
114 ret += fprintf(fp, "|");
115 else
116 ret += fprintf(fp, " ");
117 if (!period && i == depth - 1) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900118 ret += fprintf(fp, "--");
119 ret += callchain_node__fprintf_value(node, fp, total_samples);
120 ret += fprintf(fp, "--");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900121 } else
122 ret += fprintf(fp, "%s", " ");
123 }
Jin Yao8577ae62016-10-31 09:19:52 +0800124
125 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
126
127 if (symbol_conf.show_branchflag_count) {
128 if (!period)
129 callchain_list_counts__printf_value(node, chain, NULL,
130 buf, sizeof(buf));
131 else
132 callchain_list_counts__printf_value(NULL, chain, NULL,
133 buf, sizeof(buf));
134
135 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
136 str = "Not enough memory!";
137 else
138 str = alloc_str;
139 }
140
141 fputs(str, fp);
Andi Kleen2989cca2014-11-12 18:05:23 -0800142 fputc('\n', fp);
Jin Yao8577ae62016-10-31 09:19:52 +0800143 free(alloc_str);
Jin Yao0db64dd2017-03-26 04:34:28 +0800144
145 if (symbol_conf.inline_name)
146 ret += inline__fprintf(chain->ms.map, chain->ip,
147 left_margin, depth, depth_mask, fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900148 return ret;
149}
150
151static struct symbol *rem_sq_bracket;
152static struct callchain_list rem_hits;
153
154static void init_rem_hits(void)
155{
156 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
157 if (!rem_sq_bracket) {
158 fprintf(stderr, "Not enough memory to display remaining hits\n");
159 return;
160 }
161
162 strcpy(rem_sq_bracket->name, "[...]");
163 rem_hits.ms.sym = rem_sq_bracket;
164}
165
166static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
167 u64 total_samples, int depth,
168 int depth_mask, int left_margin)
169{
170 struct rb_node *node, *next;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900171 struct callchain_node *child = NULL;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900172 struct callchain_list *chain;
173 int new_depth_mask = depth_mask;
174 u64 remaining;
175 size_t ret = 0;
176 int i;
177 uint entries_printed = 0;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900178 int cumul_count = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900179
180 remaining = total_samples;
181
182 node = rb_first(root);
183 while (node) {
184 u64 new_total;
185 u64 cumul;
186
187 child = rb_entry(node, struct callchain_node, rb_node);
188 cumul = callchain_cumul_hits(child);
189 remaining -= cumul;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900190 cumul_count += callchain_cumul_counts(child);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900191
192 /*
193 * The depth mask manages the output of pipes that show
194 * the depth. We don't want to keep the pipes of the current
195 * level for the last child of this depth.
196 * Except if we have remaining filtered hits. They will
197 * supersede the last child
198 */
199 next = rb_next(node);
200 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
201 new_depth_mask &= ~(1 << (depth - 1));
202
203 /*
204 * But we keep the older depth mask for the line separator
205 * to keep the level link until we reach the last child
206 */
207 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
208 left_margin);
209 i = 0;
210 list_for_each_entry(chain, &child->val, list) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900211 ret += ipchain__fprintf_graph(fp, child, chain, depth,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900212 new_depth_mask, i++,
213 total_samples,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900214 left_margin);
215 }
216
217 if (callchain_param.mode == CHAIN_GRAPH_REL)
218 new_total = child->children_hit;
219 else
220 new_total = total_samples;
221
222 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
223 depth + 1,
224 new_depth_mask | (1 << depth),
225 left_margin);
226 node = next;
227 if (++entries_printed == callchain_param.print_limit)
228 break;
229 }
230
231 if (callchain_param.mode == CHAIN_GRAPH_REL &&
232 remaining && remaining != total_samples) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900233 struct callchain_node rem_node = {
234 .hit = remaining,
235 };
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900236
237 if (!rem_sq_bracket)
238 return ret;
239
Namhyung Kimf2af0082015-11-09 14:45:41 +0900240 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
241 rem_node.count = child->parent->children_count - cumul_count;
242 if (rem_node.count <= 0)
243 return ret;
244 }
245
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900246 new_depth_mask &= ~(1 << (depth - 1));
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900247 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900248 new_depth_mask, 0, total_samples,
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900249 left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900250 }
251
252 return ret;
253}
254
Namhyung Kim7ed5d6e2016-01-28 00:40:53 +0900255/*
256 * If have one single callchain root, don't bother printing
257 * its percentage (100 % in fractal mode and the same percentage
258 * than the hist in graph mode). This also avoid one level of column.
259 *
260 * However when percent-limit applied, it's possible that single callchain
261 * node have different (non-100% in fractal mode) percentage.
262 */
263static bool need_percent_display(struct rb_node *node, u64 parent_samples)
264{
265 struct callchain_node *cnode;
266
267 if (rb_next(node))
268 return true;
269
270 cnode = rb_entry(node, struct callchain_node, rb_node);
271 return callchain_cumul_hits(cnode) != parent_samples;
272}
273
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900274static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
Namhyung Kim54d27b32016-01-28 00:40:52 +0900275 u64 total_samples, u64 parent_samples,
276 int left_margin)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900277{
278 struct callchain_node *cnode;
279 struct callchain_list *chain;
280 u32 entries_printed = 0;
281 bool printed = false;
282 struct rb_node *node;
283 int i = 0;
284 int ret = 0;
Andi Kleen2989cca2014-11-12 18:05:23 -0800285 char bf[1024];
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900286
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900287 node = rb_first(root);
Namhyung Kim7ed5d6e2016-01-28 00:40:53 +0900288 if (node && !need_percent_display(node, parent_samples)) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900289 cnode = rb_entry(node, struct callchain_node, rb_node);
290 list_for_each_entry(chain, &cnode->val, list) {
291 /*
292 * If we sort by symbol, the first entry is the same than
293 * the symbol. No need to print it otherwise it appears as
294 * displayed twice.
295 */
Namhyung Kimcfaa1542014-05-19 14:19:30 +0900296 if (!i++ && field_order == NULL &&
297 sort_order && !prefixcmp(sort_order, "sym"))
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900298 continue;
Jin Yao0db64dd2017-03-26 04:34:28 +0800299
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900300 if (!printed) {
301 ret += callchain__fprintf_left_margin(fp, left_margin);
302 ret += fprintf(fp, "|\n");
303 ret += callchain__fprintf_left_margin(fp, left_margin);
304 ret += fprintf(fp, "---");
305 left_margin += 3;
306 printed = true;
307 } else
308 ret += callchain__fprintf_left_margin(fp, left_margin);
309
Jin Yao8577ae62016-10-31 09:19:52 +0800310 ret += fprintf(fp, "%s",
311 callchain_list__sym_name(chain, bf,
312 sizeof(bf),
313 false));
314
315 if (symbol_conf.show_branchflag_count)
316 ret += callchain_list_counts__printf_value(
317 NULL, chain, fp, NULL, 0);
318 ret += fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900319
320 if (++entries_printed == callchain_param.print_limit)
321 break;
Jin Yao0db64dd2017-03-26 04:34:28 +0800322
323 if (symbol_conf.inline_name)
324 ret += inline__fprintf(chain->ms.map,
325 chain->ip,
326 left_margin,
327 0, 0,
328 fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900329 }
330 root = &cnode->rb_root;
331 }
332
Namhyung Kim54d27b32016-01-28 00:40:52 +0900333 if (callchain_param.mode == CHAIN_GRAPH_REL)
334 total_samples = parent_samples;
335
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900336 ret += __callchain__fprintf_graph(fp, root, total_samples,
337 1, 1, left_margin);
Namhyung Kim3848c232016-01-28 21:24:54 +0900338 if (ret) {
339 /* do not add a blank line if it printed nothing */
340 ret += fprintf(fp, "\n");
341 }
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900342
343 return ret;
344}
345
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300346static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900347 u64 total_samples)
348{
349 struct callchain_list *chain;
350 size_t ret = 0;
Andi Kleen2989cca2014-11-12 18:05:23 -0800351 char bf[1024];
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900352
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300353 if (!node)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900354 return 0;
355
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300356 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900357
358
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300359 list_for_each_entry(chain, &node->val, list) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900360 if (chain->ip >= PERF_CONTEXT_MAX)
361 continue;
Andi Kleen2989cca2014-11-12 18:05:23 -0800362 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
363 bf, sizeof(bf), false));
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900364 }
365
366 return ret;
367}
368
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300369static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900370 u64 total_samples)
371{
372 size_t ret = 0;
373 u32 entries_printed = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900374 struct callchain_node *chain;
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300375 struct rb_node *rb_node = rb_first(tree);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900376
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900377 while (rb_node) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900378 chain = rb_entry(rb_node, struct callchain_node, rb_node);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900379
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900380 ret += fprintf(fp, " ");
381 ret += callchain_node__fprintf_value(chain, fp, total_samples);
382 ret += fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900383 ret += __callchain__fprintf_flat(fp, chain, total_samples);
384 ret += fprintf(fp, "\n");
385 if (++entries_printed == callchain_param.print_limit)
386 break;
387
388 rb_node = rb_next(rb_node);
389 }
390
391 return ret;
392}
393
Namhyung Kim26e77922015-11-09 14:45:37 +0900394static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
395{
396 const char *sep = symbol_conf.field_sep ?: ";";
397 struct callchain_list *chain;
398 size_t ret = 0;
399 char bf[1024];
400 bool first;
401
402 if (!node)
403 return 0;
404
405 ret += __callchain__fprintf_folded(fp, node->parent);
406
407 first = (ret == 0);
408 list_for_each_entry(chain, &node->val, list) {
409 if (chain->ip >= PERF_CONTEXT_MAX)
410 continue;
411 ret += fprintf(fp, "%s%s", first ? "" : sep,
412 callchain_list__sym_name(chain,
413 bf, sizeof(bf), false));
414 first = false;
415 }
416
417 return ret;
418}
419
420static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
421 u64 total_samples)
422{
423 size_t ret = 0;
424 u32 entries_printed = 0;
425 struct callchain_node *chain;
426 struct rb_node *rb_node = rb_first(tree);
427
428 while (rb_node) {
Namhyung Kim26e77922015-11-09 14:45:37 +0900429
430 chain = rb_entry(rb_node, struct callchain_node, rb_node);
Namhyung Kim26e77922015-11-09 14:45:37 +0900431
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900432 ret += callchain_node__fprintf_value(chain, fp, total_samples);
433 ret += fprintf(fp, " ");
Namhyung Kim26e77922015-11-09 14:45:37 +0900434 ret += __callchain__fprintf_folded(fp, chain);
435 ret += fprintf(fp, "\n");
436 if (++entries_printed == callchain_param.print_limit)
437 break;
438
439 rb_node = rb_next(rb_node);
440 }
441
442 return ret;
443}
444
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900445static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
446 u64 total_samples, int left_margin,
447 FILE *fp)
448{
Namhyung Kim54d27b32016-01-28 00:40:52 +0900449 u64 parent_samples = he->stat.period;
450
451 if (symbol_conf.cumulate_callchain)
452 parent_samples = he->stat_acc->period;
453
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900454 switch (callchain_param.mode) {
455 case CHAIN_GRAPH_REL:
Namhyung Kim54d27b32016-01-28 00:40:52 +0900456 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
457 parent_samples, left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900458 break;
459 case CHAIN_GRAPH_ABS:
460 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
Namhyung Kim54d27b32016-01-28 00:40:52 +0900461 parent_samples, left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900462 break;
463 case CHAIN_FLAT:
464 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
465 break;
Namhyung Kim26e77922015-11-09 14:45:37 +0900466 case CHAIN_FOLDED:
467 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
468 break;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900469 case CHAIN_NONE:
470 break;
471 default:
472 pr_err("Bad callchain mode\n");
473 }
474
475 return 0;
476}
477
Jiri Olsabd28d0c2016-09-22 17:36:36 +0200478int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
479 struct perf_hpp_list *hpp_list)
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100480{
481 const char *sep = symbol_conf.field_sep;
482 struct perf_hpp_fmt *fmt;
483 char *start = hpp->buf;
484 int ret;
485 bool first = true;
486
487 if (symbol_conf.exclude_other && !he->parent)
488 return 0;
489
Jiri Olsa9da44db2016-09-22 17:36:29 +0200490 perf_hpp_list__for_each_format(hpp_list, fmt) {
Namhyung Kim361459f2015-12-23 02:07:08 +0900491 if (perf_hpp__should_skip(fmt, he->hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900492 continue;
493
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100494 /*
495 * If there's no field_sep, we still need
496 * to display initial ' '.
497 */
498 if (!sep || !first) {
499 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
500 advance_hpp(hpp, ret);
501 } else
502 first = false;
503
Jiri Olsa9754c4f2013-10-25 13:24:53 +0200504 if (perf_hpp__use_color() && fmt->color)
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100505 ret = fmt->color(fmt, hpp, he);
506 else
507 ret = fmt->entry(fmt, hpp, he);
508
Arnaldo Carvalho de Melo89fee702016-02-11 17:14:13 -0300509 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100510 advance_hpp(hpp, ret);
511 }
512
513 return hpp->buf - start;
514}
515
Jiri Olsa9da44db2016-09-22 17:36:29 +0200516static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
517{
518 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
519}
520
Namhyung Kimef86d682016-02-25 00:13:41 +0900521static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
522 struct perf_hpp *hpp,
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300523 struct hists *hists,
Namhyung Kimef86d682016-02-25 00:13:41 +0900524 FILE *fp)
525{
526 const char *sep = symbol_conf.field_sep;
527 struct perf_hpp_fmt *fmt;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300528 struct perf_hpp_list_node *fmt_node;
Namhyung Kimef86d682016-02-25 00:13:41 +0900529 char *buf = hpp->buf;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900530 size_t size = hpp->size;
Namhyung Kimef86d682016-02-25 00:13:41 +0900531 int ret, printed = 0;
532 bool first = true;
533
534 if (symbol_conf.exclude_other && !he->parent)
535 return 0;
536
537 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
538 advance_hpp(hpp, ret);
539
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300540 /* the first hpp_list_node is for overhead columns */
541 fmt_node = list_first_entry(&hists->hpp_formats,
542 struct perf_hpp_list_node, list);
543 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
Namhyung Kimef86d682016-02-25 00:13:41 +0900544 /*
545 * If there's no field_sep, we still need
546 * to display initial ' '.
547 */
548 if (!sep || !first) {
549 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
550 advance_hpp(hpp, ret);
551 } else
552 first = false;
553
554 if (perf_hpp__use_color() && fmt->color)
555 ret = fmt->color(fmt, hpp, he);
556 else
557 ret = fmt->entry(fmt, hpp, he);
558
559 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
560 advance_hpp(hpp, ret);
561 }
562
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300563 if (!sep)
Namhyung Kimef86d682016-02-25 00:13:41 +0900564 ret = scnprintf(hpp->buf, hpp->size, "%*s",
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300565 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
Namhyung Kimef86d682016-02-25 00:13:41 +0900566 advance_hpp(hpp, ret);
567
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900568 printed += fprintf(fp, "%s", buf);
569
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300570 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
571 hpp->buf = buf;
572 hpp->size = size;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900573
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300574 /*
575 * No need to call hist_entry__snprintf_alignment() since this
576 * fmt is always the last column in the hierarchy mode.
577 */
578 if (perf_hpp__use_color() && fmt->color)
579 fmt->color(fmt, hpp, he);
580 else
581 fmt->entry(fmt, hpp, he);
Namhyung Kimef86d682016-02-25 00:13:41 +0900582
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300583 /*
584 * dynamic entries are right-aligned but we want left-aligned
585 * in the hierarchy mode
586 */
587 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
588 }
589 printed += putc('\n', fp);
Namhyung Kimef86d682016-02-25 00:13:41 +0900590
591 if (symbol_conf.use_callchain && he->leaf) {
592 u64 total = hists__total_period(hists);
593
594 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
595 goto out;
596 }
597
598out:
599 return printed;
600}
601
Namhyung Kim000078b2012-08-20 13:52:06 +0900602static int hist_entry__fprintf(struct hist_entry *he, size_t size,
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200603 char *bf, size_t bfsz, FILE *fp,
604 bool use_callchain)
Namhyung Kim000078b2012-08-20 13:52:06 +0900605{
Namhyung Kim000078b2012-08-20 13:52:06 +0900606 int ret;
Jin Yao0db64dd2017-03-26 04:34:28 +0800607 int callchain_ret = 0;
608 int inline_ret = 0;
Namhyung Kimea251d52012-09-03 11:53:06 +0900609 struct perf_hpp hpp = {
610 .buf = bf,
611 .size = size,
Namhyung Kimea251d52012-09-03 11:53:06 +0900612 };
Jiri Olsa8f1d1b42016-06-14 20:19:17 +0200613 struct hists *hists = he->hists;
Namhyung Kim7e597d32016-01-28 00:40:51 +0900614 u64 total_period = hists->stats.total_period;
Namhyung Kim000078b2012-08-20 13:52:06 +0900615
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300616 if (size == 0 || size > bfsz)
617 size = hpp.size = bfsz;
Namhyung Kim000078b2012-08-20 13:52:06 +0900618
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300619 if (symbol_conf.report_hierarchy)
620 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
Namhyung Kimef86d682016-02-25 00:13:41 +0900621
Namhyung Kim26d8b332014-03-03 16:16:20 +0900622 hist_entry__snprintf(he, &hpp);
Namhyung Kim000078b2012-08-20 13:52:06 +0900623
624 ret = fprintf(fp, "%s\n", bf);
625
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200626 if (use_callchain)
Jin Yao0db64dd2017-03-26 04:34:28 +0800627 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
628 0, fp);
629
630 if (callchain_ret == 0 && symbol_conf.inline_name) {
631 inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
632 ret += inline_ret;
633 if (inline_ret > 0)
634 ret += fprintf(fp, "\n");
635 } else
636 ret += callchain_ret;
Namhyung Kim000078b2012-08-20 13:52:06 +0900637
638 return ret;
639}
640
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300641static int print_hierarchy_indent(const char *sep, int indent,
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900642 const char *line, FILE *fp)
643{
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300644 if (sep != NULL || indent < 2)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900645 return 0;
646
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300647 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900648}
649
Namhyung Kim195bc0f2016-09-13 16:45:50 +0900650static int hists__fprintf_hierarchy_headers(struct hists *hists,
651 struct perf_hpp *hpp, FILE *fp)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900652{
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300653 bool first_node, first_col;
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300654 int indent;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900655 int depth;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900656 unsigned width = 0;
657 unsigned header_width = 0;
658 struct perf_hpp_fmt *fmt;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300659 struct perf_hpp_list_node *fmt_node;
Namhyung Kim195bc0f2016-09-13 16:45:50 +0900660 const char *sep = symbol_conf.field_sep;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900661
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300662 indent = hists->nr_hpp_node;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900663
664 /* preserve max indent depth for column headers */
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300665 print_hierarchy_indent(sep, indent, spaces, fp);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900666
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300667 /* the first hpp_list_node is for overhead columns */
668 fmt_node = list_first_entry(&hists->hpp_formats,
669 struct perf_hpp_list_node, list);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900670
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300671 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
Jiri Olsa29659ab2016-08-07 17:28:30 +0200672 fmt->header(fmt, hpp, hists, 0, NULL);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300673 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900674 }
675
676 /* combine sort headers with ' / ' */
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300677 first_node = true;
678 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
679 if (!first_node)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900680 header_width += fprintf(fp, " / ");
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300681 first_node = false;
682
683 first_col = true;
684 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
685 if (perf_hpp__should_skip(fmt, hists))
686 continue;
687
688 if (!first_col)
689 header_width += fprintf(fp, "+");
690 first_col = false;
691
Jiri Olsa29659ab2016-08-07 17:28:30 +0200692 fmt->header(fmt, hpp, hists, 0, NULL);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300693
Jiri Olsa7d6a7e72016-04-07 09:11:11 +0200694 header_width += fprintf(fp, "%s", trim(hpp->buf));
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900695 }
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900696 }
697
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900698 fprintf(fp, "\n# ");
699
700 /* preserve max indent depth for initial dots */
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300701 print_hierarchy_indent(sep, indent, dots, fp);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900702
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300703 /* the first hpp_list_node is for overhead columns */
704 fmt_node = list_first_entry(&hists->hpp_formats,
705 struct perf_hpp_list_node, list);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900706
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300707 first_col = true;
708 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
709 if (!first_col)
710 fprintf(fp, "%s", sep ?: "..");
711 first_col = false;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900712
Jiri Olsada1b0402016-06-14 20:19:20 +0200713 width = fmt->width(fmt, hpp, hists);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900714 fprintf(fp, "%.*s", width, dots);
715 }
716
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900717 depth = 0;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300718 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
719 first_col = true;
720 width = depth * HIERARCHY_INDENT;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900721
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300722 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
723 if (perf_hpp__should_skip(fmt, hists))
724 continue;
725
726 if (!first_col)
727 width++; /* for '+' sign between column header */
728 first_col = false;
729
Jiri Olsada1b0402016-06-14 20:19:20 +0200730 width += fmt->width(fmt, hpp, hists);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300731 }
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900732
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900733 if (width > header_width)
734 header_width = width;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900735
736 depth++;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900737 }
738
739 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
740
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900741 fprintf(fp, "\n#\n");
742
743 return 2;
744}
745
Jiri Olsaf3705b02016-08-07 17:28:29 +0200746static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
747 int line, FILE *fp)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900748{
Jiri Olsa12400052012-10-13 00:06:16 +0200749 struct perf_hpp_fmt *fmt;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900750 const char *sep = symbol_conf.field_sep;
Jiri Olsa5395a042012-10-04 21:49:37 +0900751 bool first = true;
Jiri Olsa29659ab2016-08-07 17:28:30 +0200752 int span = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900753
Jiri Olsaf0786af2016-01-18 10:24:23 +0100754 hists__for_each_format(hists, fmt) {
Namhyung Kim361459f2015-12-23 02:07:08 +0900755 if (perf_hpp__should_skip(fmt, hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900756 continue;
757
Jiri Olsa29659ab2016-08-07 17:28:30 +0200758 if (!first && !span)
Namhyung Kimea251d52012-09-03 11:53:06 +0900759 fprintf(fp, "%s", sep ?: " ");
Jiri Olsa5395a042012-10-04 21:49:37 +0900760 else
761 first = false;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900762
Jiri Olsa29659ab2016-08-07 17:28:30 +0200763 fmt->header(fmt, hpp, hists, line, &span);
764
765 if (!span)
766 fprintf(fp, "%s", hpp->buf);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900767 }
Jiri Olsaf3705b02016-08-07 17:28:29 +0200768}
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900769
Jiri Olsaf3705b02016-08-07 17:28:29 +0200770static int
771hists__fprintf_standard_headers(struct hists *hists,
772 struct perf_hpp *hpp,
773 FILE *fp)
774{
775 struct perf_hpp_list *hpp_list = hists->hpp_list;
776 struct perf_hpp_fmt *fmt;
777 unsigned int width;
778 const char *sep = symbol_conf.field_sep;
779 bool first = true;
780 int line;
781
782 for (line = 0; line < hpp_list->nr_header_lines; line++) {
783 /* first # is displayed one level up */
784 if (line)
785 fprintf(fp, "# ");
786 fprintf_line(hists, hpp, line, fp);
787 fprintf(fp, "\n");
788 }
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900789
790 if (sep)
Jiri Olsaf3705b02016-08-07 17:28:29 +0200791 return hpp_list->nr_header_lines;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900792
Jiri Olsa5395a042012-10-04 21:49:37 +0900793 first = true;
794
Namhyung Kimea251d52012-09-03 11:53:06 +0900795 fprintf(fp, "# ");
Namhyung Kimea251d52012-09-03 11:53:06 +0900796
Jiri Olsaf0786af2016-01-18 10:24:23 +0100797 hists__for_each_format(hists, fmt) {
Jiri Olsa12400052012-10-13 00:06:16 +0200798 unsigned int i;
Namhyung Kimea251d52012-09-03 11:53:06 +0900799
Namhyung Kim361459f2015-12-23 02:07:08 +0900800 if (perf_hpp__should_skip(fmt, hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900801 continue;
802
Jiri Olsa5395a042012-10-04 21:49:37 +0900803 if (!first)
Namhyung Kimea251d52012-09-03 11:53:06 +0900804 fprintf(fp, "%s", sep ?: " ");
Jiri Olsa5395a042012-10-04 21:49:37 +0900805 else
806 first = false;
Namhyung Kimea251d52012-09-03 11:53:06 +0900807
Jiri Olsada1b0402016-06-14 20:19:20 +0200808 width = fmt->width(fmt, hpp, hists);
Namhyung Kimea251d52012-09-03 11:53:06 +0900809 for (i = 0; i < width; i++)
810 fprintf(fp, ".");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900811 }
Namhyung Kimea251d52012-09-03 11:53:06 +0900812
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900813 fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900814 fprintf(fp, "#\n");
Jiri Olsaf3705b02016-08-07 17:28:29 +0200815 return hpp_list->nr_header_lines + 2;
Jiri Olsa36592eb2016-06-14 20:19:14 +0200816}
817
Jiri Olsa2d831452016-09-22 17:36:37 +0200818int hists__fprintf_headers(struct hists *hists, FILE *fp)
Jiri Olsa7a72a2e2016-06-14 20:19:16 +0200819{
Jiri Olsad5278222016-09-19 15:09:13 +0200820 char bf[1024];
Jiri Olsa7a72a2e2016-06-14 20:19:16 +0200821 struct perf_hpp dummy_hpp = {
822 .buf = bf,
823 .size = sizeof(bf),
824 };
825
826 fprintf(fp, "# ");
827
828 if (symbol_conf.report_hierarchy)
829 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
830 else
831 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
832
833}
834
Jiri Olsa36592eb2016-06-14 20:19:14 +0200835size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200836 int max_cols, float min_pcnt, FILE *fp,
837 bool use_callchain)
Jiri Olsa36592eb2016-06-14 20:19:14 +0200838{
Jiri Olsa36592eb2016-06-14 20:19:14 +0200839 struct rb_node *nd;
840 size_t ret = 0;
841 const char *sep = symbol_conf.field_sep;
842 int nr_rows = 0;
843 size_t linesz;
844 char *line = NULL;
845 unsigned indent;
846
847 init_rem_hits();
848
Namhyung Kime3b60bc2016-09-20 14:30:24 +0900849 hists__reset_column_width(hists);
Jiri Olsa36592eb2016-06-14 20:19:14 +0200850
851 if (symbol_conf.col_width_list_str)
852 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
853
854 if (show_header)
855 nr_rows += hists__fprintf_headers(hists, fp);
856
857 if (max_rows && nr_rows >= max_rows)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900858 goto out;
859
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300860 linesz = hists__sort_list_width(hists) + 3 + 1;
Jiri Olsa9754c4f2013-10-25 13:24:53 +0200861 linesz += perf_hpp__color_overhead();
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300862 line = malloc(linesz);
863 if (line == NULL) {
864 ret = -1;
865 goto out;
866 }
867
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900868 indent = hists__overhead_width(hists) + 4;
869
Namhyung Kimef86d682016-02-25 00:13:41 +0900870 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900871 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
Namhyung Kim14135662013-10-31 10:17:39 +0900872 float percent;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900873
874 if (h->filtered)
875 continue;
876
Namhyung Kim14135662013-10-31 10:17:39 +0900877 percent = hist_entry__get_percent_limit(h);
Namhyung Kim064f1982013-05-14 11:09:04 +0900878 if (percent < min_pcnt)
879 continue;
880
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200881 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900882
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900883 if (max_rows && ++nr_rows >= max_rows)
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300884 break;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900885
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900886 /*
887 * If all children are filtered out or percent-limited,
888 * display "no entry >= x.xx%" message.
889 */
890 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300891 int depth = hists->nr_hpp_node + h->depth + 1;
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900892
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300893 print_hierarchy_indent(sep, depth, spaces, fp);
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900894 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
895
896 if (max_rows && ++nr_rows >= max_rows)
897 break;
898 }
899
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900900 if (h->ms.map == NULL && verbose > 1) {
Arnaldo Carvalho de Melo93d57312014-03-21 17:57:01 -0300901 __map_groups__fprintf_maps(h->thread->mg,
Jiri Olsaacebd402014-07-14 23:46:47 +0200902 MAP__FUNCTION, fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900903 fprintf(fp, "%.10s end\n", graph_dotted_line);
904 }
905 }
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300906
907 free(line);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900908out:
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300909 zfree(&rem_sq_bracket);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900910
911 return ret;
912}
913
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300914size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900915{
916 int i;
917 size_t ret = 0;
918
919 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
920 const char *name;
921
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300922 if (stats->nr_events[i] == 0)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900923 continue;
924
925 name = perf_event__name(i);
926 if (!strcmp(name, "UNKNOWN"))
927 continue;
928
929 ret += fprintf(fp, "%16s events: %10d\n", name,
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300930 stats->nr_events[i]);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900931 }
932
933 return ret;
934}