blob: 6128f485a3c502927d991b222913723578360580 [file] [log] [blame]
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09001#include <stdio.h>
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09002
3#include "../../util/util.h"
4#include "../../util/hist.h"
5#include "../../util/sort.h"
Namhyung Kim5b9e2142013-01-22 18:09:37 +09006#include "../../util/evsel.h"
Namhyung Kim7ccf4f92012-08-20 13:52:05 +09007
8
9static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
10{
11 int i;
12 int ret = fprintf(fp, " ");
13
14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " ");
16
17 return ret;
18}
19
Jin Yao0db64dd2017-03-26 04:34:28 +080020static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
21 int depth, int depth_mask, FILE *fp)
22{
23 struct dso *dso;
24 struct inline_node *node;
25 struct inline_list *ilist;
26 int ret = 0, i;
27
28 if (map == NULL)
29 return 0;
30
31 dso = map->dso;
32 if (dso == NULL)
33 return 0;
34
35 if (dso->kernel != DSO_TYPE_USER)
36 return 0;
37
38 node = dso__parse_addr_inlines(dso,
39 map__rip_2objdump(map, ip));
40 if (node == NULL)
41 return 0;
42
43 list_for_each_entry(ilist, &node->val, list) {
44 if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
45 ret += callchain__fprintf_left_margin(fp, left_margin);
46
47 for (i = 0; i < depth; i++) {
48 if (depth_mask & (1 << i))
49 ret += fprintf(fp, "|");
50 else
51 ret += fprintf(fp, " ");
52 ret += fprintf(fp, " ");
53 }
54
55 if (callchain_param.key == CCKEY_ADDRESS) {
56 if (ilist->filename != NULL)
57 ret += fprintf(fp, "%s:%d (inline)",
58 ilist->filename,
59 ilist->line_nr);
60 else
61 ret += fprintf(fp, "??");
62 } else if (ilist->funcname != NULL)
63 ret += fprintf(fp, "%s (inline)",
64 ilist->funcname);
65 else if (ilist->filename != NULL)
66 ret += fprintf(fp, "%s:%d (inline)",
67 ilist->filename,
68 ilist->line_nr);
69 else
70 ret += fprintf(fp, "??");
71
72 ret += fprintf(fp, "\n");
73 }
74 }
75
76 inline_node__delete(node);
77 return ret;
78}
79
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090080static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
81 int left_margin)
82{
83 int i;
84 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
85
86 for (i = 0; i < depth; i++)
87 if (depth_mask & (1 << i))
88 ret += fprintf(fp, "| ");
89 else
90 ret += fprintf(fp, " ");
91
92 ret += fprintf(fp, "\n");
93
94 return ret;
95}
96
Namhyung Kim5ab250c2015-11-09 14:45:39 +090097static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
98 struct callchain_list *chain,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090099 int depth, int depth_mask, int period,
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900100 u64 total_samples, int left_margin)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900101{
102 int i;
103 size_t ret = 0;
Jin Yao8577ae62016-10-31 09:19:52 +0800104 char bf[1024], *alloc_str = NULL;
105 char buf[64];
106 const char *str;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900107
108 ret += callchain__fprintf_left_margin(fp, left_margin);
109 for (i = 0; i < depth; i++) {
110 if (depth_mask & (1 << i))
111 ret += fprintf(fp, "|");
112 else
113 ret += fprintf(fp, " ");
114 if (!period && i == depth - 1) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900115 ret += fprintf(fp, "--");
116 ret += callchain_node__fprintf_value(node, fp, total_samples);
117 ret += fprintf(fp, "--");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900118 } else
119 ret += fprintf(fp, "%s", " ");
120 }
Jin Yao8577ae62016-10-31 09:19:52 +0800121
122 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
123
124 if (symbol_conf.show_branchflag_count) {
125 if (!period)
126 callchain_list_counts__printf_value(node, chain, NULL,
127 buf, sizeof(buf));
128 else
129 callchain_list_counts__printf_value(NULL, chain, NULL,
130 buf, sizeof(buf));
131
132 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
133 str = "Not enough memory!";
134 else
135 str = alloc_str;
136 }
137
138 fputs(str, fp);
Andi Kleen2989cca2014-11-12 18:05:23 -0800139 fputc('\n', fp);
Jin Yao8577ae62016-10-31 09:19:52 +0800140 free(alloc_str);
Jin Yao0db64dd2017-03-26 04:34:28 +0800141
142 if (symbol_conf.inline_name)
143 ret += inline__fprintf(chain->ms.map, chain->ip,
144 left_margin, depth, depth_mask, fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900145 return ret;
146}
147
148static struct symbol *rem_sq_bracket;
149static struct callchain_list rem_hits;
150
151static void init_rem_hits(void)
152{
153 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
154 if (!rem_sq_bracket) {
155 fprintf(stderr, "Not enough memory to display remaining hits\n");
156 return;
157 }
158
159 strcpy(rem_sq_bracket->name, "[...]");
160 rem_hits.ms.sym = rem_sq_bracket;
161}
162
163static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
164 u64 total_samples, int depth,
165 int depth_mask, int left_margin)
166{
167 struct rb_node *node, *next;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900168 struct callchain_node *child = NULL;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900169 struct callchain_list *chain;
170 int new_depth_mask = depth_mask;
171 u64 remaining;
172 size_t ret = 0;
173 int i;
174 uint entries_printed = 0;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900175 int cumul_count = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900176
177 remaining = total_samples;
178
179 node = rb_first(root);
180 while (node) {
181 u64 new_total;
182 u64 cumul;
183
184 child = rb_entry(node, struct callchain_node, rb_node);
185 cumul = callchain_cumul_hits(child);
186 remaining -= cumul;
Namhyung Kimf2af0082015-11-09 14:45:41 +0900187 cumul_count += callchain_cumul_counts(child);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900188
189 /*
190 * The depth mask manages the output of pipes that show
191 * the depth. We don't want to keep the pipes of the current
192 * level for the last child of this depth.
193 * Except if we have remaining filtered hits. They will
194 * supersede the last child
195 */
196 next = rb_next(node);
197 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
198 new_depth_mask &= ~(1 << (depth - 1));
199
200 /*
201 * But we keep the older depth mask for the line separator
202 * to keep the level link until we reach the last child
203 */
204 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
205 left_margin);
206 i = 0;
207 list_for_each_entry(chain, &child->val, list) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900208 ret += ipchain__fprintf_graph(fp, child, chain, depth,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900209 new_depth_mask, i++,
210 total_samples,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900211 left_margin);
212 }
213
214 if (callchain_param.mode == CHAIN_GRAPH_REL)
215 new_total = child->children_hit;
216 else
217 new_total = total_samples;
218
219 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
220 depth + 1,
221 new_depth_mask | (1 << depth),
222 left_margin);
223 node = next;
224 if (++entries_printed == callchain_param.print_limit)
225 break;
226 }
227
228 if (callchain_param.mode == CHAIN_GRAPH_REL &&
229 remaining && remaining != total_samples) {
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900230 struct callchain_node rem_node = {
231 .hit = remaining,
232 };
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900233
234 if (!rem_sq_bracket)
235 return ret;
236
Namhyung Kimf2af0082015-11-09 14:45:41 +0900237 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
238 rem_node.count = child->parent->children_count - cumul_count;
239 if (rem_node.count <= 0)
240 return ret;
241 }
242
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900243 new_depth_mask &= ~(1 << (depth - 1));
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900244 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900245 new_depth_mask, 0, total_samples,
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900246 left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900247 }
248
249 return ret;
250}
251
Namhyung Kim7ed5d6e2016-01-28 00:40:53 +0900252/*
253 * If have one single callchain root, don't bother printing
254 * its percentage (100 % in fractal mode and the same percentage
255 * than the hist in graph mode). This also avoid one level of column.
256 *
257 * However when percent-limit applied, it's possible that single callchain
258 * node have different (non-100% in fractal mode) percentage.
259 */
260static bool need_percent_display(struct rb_node *node, u64 parent_samples)
261{
262 struct callchain_node *cnode;
263
264 if (rb_next(node))
265 return true;
266
267 cnode = rb_entry(node, struct callchain_node, rb_node);
268 return callchain_cumul_hits(cnode) != parent_samples;
269}
270
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900271static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
Namhyung Kim54d27b32016-01-28 00:40:52 +0900272 u64 total_samples, u64 parent_samples,
273 int left_margin)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900274{
275 struct callchain_node *cnode;
276 struct callchain_list *chain;
277 u32 entries_printed = 0;
278 bool printed = false;
279 struct rb_node *node;
280 int i = 0;
281 int ret = 0;
Andi Kleen2989cca2014-11-12 18:05:23 -0800282 char bf[1024];
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900283
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900284 node = rb_first(root);
Namhyung Kim7ed5d6e2016-01-28 00:40:53 +0900285 if (node && !need_percent_display(node, parent_samples)) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900286 cnode = rb_entry(node, struct callchain_node, rb_node);
287 list_for_each_entry(chain, &cnode->val, list) {
288 /*
289 * If we sort by symbol, the first entry is the same than
290 * the symbol. No need to print it otherwise it appears as
291 * displayed twice.
292 */
Namhyung Kimcfaa1542014-05-19 14:19:30 +0900293 if (!i++ && field_order == NULL &&
294 sort_order && !prefixcmp(sort_order, "sym"))
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900295 continue;
Jin Yao0db64dd2017-03-26 04:34:28 +0800296
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900297 if (!printed) {
298 ret += callchain__fprintf_left_margin(fp, left_margin);
299 ret += fprintf(fp, "|\n");
300 ret += callchain__fprintf_left_margin(fp, left_margin);
301 ret += fprintf(fp, "---");
302 left_margin += 3;
303 printed = true;
304 } else
305 ret += callchain__fprintf_left_margin(fp, left_margin);
306
Jin Yao8577ae62016-10-31 09:19:52 +0800307 ret += fprintf(fp, "%s",
308 callchain_list__sym_name(chain, bf,
309 sizeof(bf),
310 false));
311
312 if (symbol_conf.show_branchflag_count)
313 ret += callchain_list_counts__printf_value(
314 NULL, chain, fp, NULL, 0);
315 ret += fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900316
317 if (++entries_printed == callchain_param.print_limit)
318 break;
Jin Yao0db64dd2017-03-26 04:34:28 +0800319
320 if (symbol_conf.inline_name)
321 ret += inline__fprintf(chain->ms.map,
322 chain->ip,
323 left_margin,
324 0, 0,
325 fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900326 }
327 root = &cnode->rb_root;
328 }
329
Namhyung Kim54d27b32016-01-28 00:40:52 +0900330 if (callchain_param.mode == CHAIN_GRAPH_REL)
331 total_samples = parent_samples;
332
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900333 ret += __callchain__fprintf_graph(fp, root, total_samples,
334 1, 1, left_margin);
Namhyung Kim3848c232016-01-28 21:24:54 +0900335 if (ret) {
336 /* do not add a blank line if it printed nothing */
337 ret += fprintf(fp, "\n");
338 }
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900339
340 return ret;
341}
342
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300343static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900344 u64 total_samples)
345{
346 struct callchain_list *chain;
347 size_t ret = 0;
Andi Kleen2989cca2014-11-12 18:05:23 -0800348 char bf[1024];
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900349
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300350 if (!node)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900351 return 0;
352
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300353 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900354
355
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300356 list_for_each_entry(chain, &node->val, list) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900357 if (chain->ip >= PERF_CONTEXT_MAX)
358 continue;
Andi Kleen2989cca2014-11-12 18:05:23 -0800359 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
360 bf, sizeof(bf), false));
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900361 }
362
363 return ret;
364}
365
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300366static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900367 u64 total_samples)
368{
369 size_t ret = 0;
370 u32 entries_printed = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900371 struct callchain_node *chain;
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300372 struct rb_node *rb_node = rb_first(tree);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900373
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900374 while (rb_node) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900375 chain = rb_entry(rb_node, struct callchain_node, rb_node);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900376
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900377 ret += fprintf(fp, " ");
378 ret += callchain_node__fprintf_value(chain, fp, total_samples);
379 ret += fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900380 ret += __callchain__fprintf_flat(fp, chain, total_samples);
381 ret += fprintf(fp, "\n");
382 if (++entries_printed == callchain_param.print_limit)
383 break;
384
385 rb_node = rb_next(rb_node);
386 }
387
388 return ret;
389}
390
Namhyung Kim26e77922015-11-09 14:45:37 +0900391static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
392{
393 const char *sep = symbol_conf.field_sep ?: ";";
394 struct callchain_list *chain;
395 size_t ret = 0;
396 char bf[1024];
397 bool first;
398
399 if (!node)
400 return 0;
401
402 ret += __callchain__fprintf_folded(fp, node->parent);
403
404 first = (ret == 0);
405 list_for_each_entry(chain, &node->val, list) {
406 if (chain->ip >= PERF_CONTEXT_MAX)
407 continue;
408 ret += fprintf(fp, "%s%s", first ? "" : sep,
409 callchain_list__sym_name(chain,
410 bf, sizeof(bf), false));
411 first = false;
412 }
413
414 return ret;
415}
416
417static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
418 u64 total_samples)
419{
420 size_t ret = 0;
421 u32 entries_printed = 0;
422 struct callchain_node *chain;
423 struct rb_node *rb_node = rb_first(tree);
424
425 while (rb_node) {
Namhyung Kim26e77922015-11-09 14:45:37 +0900426
427 chain = rb_entry(rb_node, struct callchain_node, rb_node);
Namhyung Kim26e77922015-11-09 14:45:37 +0900428
Namhyung Kim5ab250c2015-11-09 14:45:39 +0900429 ret += callchain_node__fprintf_value(chain, fp, total_samples);
430 ret += fprintf(fp, " ");
Namhyung Kim26e77922015-11-09 14:45:37 +0900431 ret += __callchain__fprintf_folded(fp, chain);
432 ret += fprintf(fp, "\n");
433 if (++entries_printed == callchain_param.print_limit)
434 break;
435
436 rb_node = rb_next(rb_node);
437 }
438
439 return ret;
440}
441
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900442static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
443 u64 total_samples, int left_margin,
444 FILE *fp)
445{
Namhyung Kim54d27b32016-01-28 00:40:52 +0900446 u64 parent_samples = he->stat.period;
447
448 if (symbol_conf.cumulate_callchain)
449 parent_samples = he->stat_acc->period;
450
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900451 switch (callchain_param.mode) {
452 case CHAIN_GRAPH_REL:
Namhyung Kim54d27b32016-01-28 00:40:52 +0900453 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
454 parent_samples, left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900455 break;
456 case CHAIN_GRAPH_ABS:
457 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
Namhyung Kim54d27b32016-01-28 00:40:52 +0900458 parent_samples, left_margin);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900459 break;
460 case CHAIN_FLAT:
461 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
462 break;
Namhyung Kim26e77922015-11-09 14:45:37 +0900463 case CHAIN_FOLDED:
464 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
465 break;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900466 case CHAIN_NONE:
467 break;
468 default:
469 pr_err("Bad callchain mode\n");
470 }
471
472 return 0;
473}
474
Jiri Olsabd28d0c2016-09-22 17:36:36 +0200475int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
476 struct perf_hpp_list *hpp_list)
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100477{
478 const char *sep = symbol_conf.field_sep;
479 struct perf_hpp_fmt *fmt;
480 char *start = hpp->buf;
481 int ret;
482 bool first = true;
483
484 if (symbol_conf.exclude_other && !he->parent)
485 return 0;
486
Jiri Olsa9da44db2016-09-22 17:36:29 +0200487 perf_hpp_list__for_each_format(hpp_list, fmt) {
Namhyung Kim361459f2015-12-23 02:07:08 +0900488 if (perf_hpp__should_skip(fmt, he->hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900489 continue;
490
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100491 /*
492 * If there's no field_sep, we still need
493 * to display initial ' '.
494 */
495 if (!sep || !first) {
496 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
497 advance_hpp(hpp, ret);
498 } else
499 first = false;
500
Jiri Olsa9754c4f2013-10-25 13:24:53 +0200501 if (perf_hpp__use_color() && fmt->color)
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100502 ret = fmt->color(fmt, hpp, he);
503 else
504 ret = fmt->entry(fmt, hpp, he);
505
Arnaldo Carvalho de Melo89fee702016-02-11 17:14:13 -0300506 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
Jiri Olsabe0e6d12013-02-04 16:33:19 +0100507 advance_hpp(hpp, ret);
508 }
509
510 return hpp->buf - start;
511}
512
Jiri Olsa9da44db2016-09-22 17:36:29 +0200513static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
514{
515 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
516}
517
Namhyung Kimef86d682016-02-25 00:13:41 +0900518static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
519 struct perf_hpp *hpp,
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300520 struct hists *hists,
Namhyung Kimef86d682016-02-25 00:13:41 +0900521 FILE *fp)
522{
523 const char *sep = symbol_conf.field_sep;
524 struct perf_hpp_fmt *fmt;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300525 struct perf_hpp_list_node *fmt_node;
Namhyung Kimef86d682016-02-25 00:13:41 +0900526 char *buf = hpp->buf;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900527 size_t size = hpp->size;
Namhyung Kimef86d682016-02-25 00:13:41 +0900528 int ret, printed = 0;
529 bool first = true;
530
531 if (symbol_conf.exclude_other && !he->parent)
532 return 0;
533
534 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
535 advance_hpp(hpp, ret);
536
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300537 /* the first hpp_list_node is for overhead columns */
538 fmt_node = list_first_entry(&hists->hpp_formats,
539 struct perf_hpp_list_node, list);
540 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
Namhyung Kimef86d682016-02-25 00:13:41 +0900541 /*
542 * If there's no field_sep, we still need
543 * to display initial ' '.
544 */
545 if (!sep || !first) {
546 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
547 advance_hpp(hpp, ret);
548 } else
549 first = false;
550
551 if (perf_hpp__use_color() && fmt->color)
552 ret = fmt->color(fmt, hpp, he);
553 else
554 ret = fmt->entry(fmt, hpp, he);
555
556 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
557 advance_hpp(hpp, ret);
558 }
559
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300560 if (!sep)
Namhyung Kimef86d682016-02-25 00:13:41 +0900561 ret = scnprintf(hpp->buf, hpp->size, "%*s",
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300562 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
Namhyung Kimef86d682016-02-25 00:13:41 +0900563 advance_hpp(hpp, ret);
564
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900565 printed += fprintf(fp, "%s", buf);
566
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300567 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
568 hpp->buf = buf;
569 hpp->size = size;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900570
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300571 /*
572 * No need to call hist_entry__snprintf_alignment() since this
573 * fmt is always the last column in the hierarchy mode.
574 */
575 if (perf_hpp__use_color() && fmt->color)
576 fmt->color(fmt, hpp, he);
577 else
578 fmt->entry(fmt, hpp, he);
Namhyung Kimef86d682016-02-25 00:13:41 +0900579
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300580 /*
581 * dynamic entries are right-aligned but we want left-aligned
582 * in the hierarchy mode
583 */
584 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
585 }
586 printed += putc('\n', fp);
Namhyung Kimef86d682016-02-25 00:13:41 +0900587
588 if (symbol_conf.use_callchain && he->leaf) {
589 u64 total = hists__total_period(hists);
590
591 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
592 goto out;
593 }
594
595out:
596 return printed;
597}
598
Namhyung Kim000078b2012-08-20 13:52:06 +0900599static int hist_entry__fprintf(struct hist_entry *he, size_t size,
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200600 char *bf, size_t bfsz, FILE *fp,
601 bool use_callchain)
Namhyung Kim000078b2012-08-20 13:52:06 +0900602{
Namhyung Kim000078b2012-08-20 13:52:06 +0900603 int ret;
Jin Yao0db64dd2017-03-26 04:34:28 +0800604 int callchain_ret = 0;
605 int inline_ret = 0;
Namhyung Kimea251d52012-09-03 11:53:06 +0900606 struct perf_hpp hpp = {
607 .buf = bf,
608 .size = size,
Namhyung Kimea251d52012-09-03 11:53:06 +0900609 };
Jiri Olsa8f1d1b42016-06-14 20:19:17 +0200610 struct hists *hists = he->hists;
Namhyung Kim7e597d32016-01-28 00:40:51 +0900611 u64 total_period = hists->stats.total_period;
Namhyung Kim000078b2012-08-20 13:52:06 +0900612
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300613 if (size == 0 || size > bfsz)
614 size = hpp.size = bfsz;
Namhyung Kim000078b2012-08-20 13:52:06 +0900615
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300616 if (symbol_conf.report_hierarchy)
617 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
Namhyung Kimef86d682016-02-25 00:13:41 +0900618
Namhyung Kim26d8b332014-03-03 16:16:20 +0900619 hist_entry__snprintf(he, &hpp);
Namhyung Kim000078b2012-08-20 13:52:06 +0900620
621 ret = fprintf(fp, "%s\n", bf);
622
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200623 if (use_callchain)
Jin Yao0db64dd2017-03-26 04:34:28 +0800624 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
625 0, fp);
626
627 if (callchain_ret == 0 && symbol_conf.inline_name) {
628 inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
629 ret += inline_ret;
630 if (inline_ret > 0)
631 ret += fprintf(fp, "\n");
632 } else
633 ret += callchain_ret;
Namhyung Kim000078b2012-08-20 13:52:06 +0900634
635 return ret;
636}
637
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300638static int print_hierarchy_indent(const char *sep, int indent,
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900639 const char *line, FILE *fp)
640{
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300641 if (sep != NULL || indent < 2)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900642 return 0;
643
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300644 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900645}
646
Namhyung Kim195bc0f2016-09-13 16:45:50 +0900647static int hists__fprintf_hierarchy_headers(struct hists *hists,
648 struct perf_hpp *hpp, FILE *fp)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900649{
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300650 bool first_node, first_col;
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300651 int indent;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900652 int depth;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900653 unsigned width = 0;
654 unsigned header_width = 0;
655 struct perf_hpp_fmt *fmt;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300656 struct perf_hpp_list_node *fmt_node;
Namhyung Kim195bc0f2016-09-13 16:45:50 +0900657 const char *sep = symbol_conf.field_sep;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900658
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300659 indent = hists->nr_hpp_node;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900660
661 /* preserve max indent depth for column headers */
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300662 print_hierarchy_indent(sep, indent, spaces, fp);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900663
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300664 /* the first hpp_list_node is for overhead columns */
665 fmt_node = list_first_entry(&hists->hpp_formats,
666 struct perf_hpp_list_node, list);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900667
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300668 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
Jiri Olsa29659ab2016-08-07 17:28:30 +0200669 fmt->header(fmt, hpp, hists, 0, NULL);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300670 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900671 }
672
673 /* combine sort headers with ' / ' */
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300674 first_node = true;
675 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
676 if (!first_node)
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900677 header_width += fprintf(fp, " / ");
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300678 first_node = false;
679
680 first_col = true;
681 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
682 if (perf_hpp__should_skip(fmt, hists))
683 continue;
684
685 if (!first_col)
686 header_width += fprintf(fp, "+");
687 first_col = false;
688
Jiri Olsa29659ab2016-08-07 17:28:30 +0200689 fmt->header(fmt, hpp, hists, 0, NULL);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300690
Jiri Olsa7d6a7e72016-04-07 09:11:11 +0200691 header_width += fprintf(fp, "%s", trim(hpp->buf));
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900692 }
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900693 }
694
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900695 fprintf(fp, "\n# ");
696
697 /* preserve max indent depth for initial dots */
Namhyung Kim2dbbe9f2016-03-07 16:44:48 -0300698 print_hierarchy_indent(sep, indent, dots, fp);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900699
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300700 /* the first hpp_list_node is for overhead columns */
701 fmt_node = list_first_entry(&hists->hpp_formats,
702 struct perf_hpp_list_node, list);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900703
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300704 first_col = true;
705 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
706 if (!first_col)
707 fprintf(fp, "%s", sep ?: "..");
708 first_col = false;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900709
Jiri Olsada1b0402016-06-14 20:19:20 +0200710 width = fmt->width(fmt, hpp, hists);
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900711 fprintf(fp, "%.*s", width, dots);
712 }
713
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900714 depth = 0;
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300715 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
716 first_col = true;
717 width = depth * HIERARCHY_INDENT;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900718
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300719 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
720 if (perf_hpp__should_skip(fmt, hists))
721 continue;
722
723 if (!first_col)
724 width++; /* for '+' sign between column header */
725 first_col = false;
726
Jiri Olsada1b0402016-06-14 20:19:20 +0200727 width += fmt->width(fmt, hpp, hists);
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300728 }
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900729
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900730 if (width > header_width)
731 header_width = width;
Namhyung Kimcb1fab92016-02-27 03:52:45 +0900732
733 depth++;
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900734 }
735
736 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
737
Namhyung Kim8e2fc442016-02-25 00:13:42 +0900738 fprintf(fp, "\n#\n");
739
740 return 2;
741}
742
Jiri Olsaf3705b02016-08-07 17:28:29 +0200743static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
744 int line, FILE *fp)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900745{
Jiri Olsa12400052012-10-13 00:06:16 +0200746 struct perf_hpp_fmt *fmt;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900747 const char *sep = symbol_conf.field_sep;
Jiri Olsa5395a042012-10-04 21:49:37 +0900748 bool first = true;
Jiri Olsa29659ab2016-08-07 17:28:30 +0200749 int span = 0;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900750
Jiri Olsaf0786af2016-01-18 10:24:23 +0100751 hists__for_each_format(hists, fmt) {
Namhyung Kim361459f2015-12-23 02:07:08 +0900752 if (perf_hpp__should_skip(fmt, hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900753 continue;
754
Jiri Olsa29659ab2016-08-07 17:28:30 +0200755 if (!first && !span)
Namhyung Kimea251d52012-09-03 11:53:06 +0900756 fprintf(fp, "%s", sep ?: " ");
Jiri Olsa5395a042012-10-04 21:49:37 +0900757 else
758 first = false;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900759
Jiri Olsa29659ab2016-08-07 17:28:30 +0200760 fmt->header(fmt, hpp, hists, line, &span);
761
762 if (!span)
763 fprintf(fp, "%s", hpp->buf);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900764 }
Jiri Olsaf3705b02016-08-07 17:28:29 +0200765}
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900766
Jiri Olsaf3705b02016-08-07 17:28:29 +0200767static int
768hists__fprintf_standard_headers(struct hists *hists,
769 struct perf_hpp *hpp,
770 FILE *fp)
771{
772 struct perf_hpp_list *hpp_list = hists->hpp_list;
773 struct perf_hpp_fmt *fmt;
774 unsigned int width;
775 const char *sep = symbol_conf.field_sep;
776 bool first = true;
777 int line;
778
779 for (line = 0; line < hpp_list->nr_header_lines; line++) {
780 /* first # is displayed one level up */
781 if (line)
782 fprintf(fp, "# ");
783 fprintf_line(hists, hpp, line, fp);
784 fprintf(fp, "\n");
785 }
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900786
787 if (sep)
Jiri Olsaf3705b02016-08-07 17:28:29 +0200788 return hpp_list->nr_header_lines;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900789
Jiri Olsa5395a042012-10-04 21:49:37 +0900790 first = true;
791
Namhyung Kimea251d52012-09-03 11:53:06 +0900792 fprintf(fp, "# ");
Namhyung Kimea251d52012-09-03 11:53:06 +0900793
Jiri Olsaf0786af2016-01-18 10:24:23 +0100794 hists__for_each_format(hists, fmt) {
Jiri Olsa12400052012-10-13 00:06:16 +0200795 unsigned int i;
Namhyung Kimea251d52012-09-03 11:53:06 +0900796
Namhyung Kim361459f2015-12-23 02:07:08 +0900797 if (perf_hpp__should_skip(fmt, hists))
Namhyung Kime67d49a2014-03-18 13:00:59 +0900798 continue;
799
Jiri Olsa5395a042012-10-04 21:49:37 +0900800 if (!first)
Namhyung Kimea251d52012-09-03 11:53:06 +0900801 fprintf(fp, "%s", sep ?: " ");
Jiri Olsa5395a042012-10-04 21:49:37 +0900802 else
803 first = false;
Namhyung Kimea251d52012-09-03 11:53:06 +0900804
Jiri Olsada1b0402016-06-14 20:19:20 +0200805 width = fmt->width(fmt, hpp, hists);
Namhyung Kimea251d52012-09-03 11:53:06 +0900806 for (i = 0; i < width; i++)
807 fprintf(fp, ".");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900808 }
Namhyung Kimea251d52012-09-03 11:53:06 +0900809
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900810 fprintf(fp, "\n");
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900811 fprintf(fp, "#\n");
Jiri Olsaf3705b02016-08-07 17:28:29 +0200812 return hpp_list->nr_header_lines + 2;
Jiri Olsa36592eb2016-06-14 20:19:14 +0200813}
814
Jiri Olsa2d831452016-09-22 17:36:37 +0200815int hists__fprintf_headers(struct hists *hists, FILE *fp)
Jiri Olsa7a72a2e2016-06-14 20:19:16 +0200816{
Jiri Olsad5278222016-09-19 15:09:13 +0200817 char bf[1024];
Jiri Olsa7a72a2e2016-06-14 20:19:16 +0200818 struct perf_hpp dummy_hpp = {
819 .buf = bf,
820 .size = sizeof(bf),
821 };
822
823 fprintf(fp, "# ");
824
825 if (symbol_conf.report_hierarchy)
826 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
827 else
828 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
829
830}
831
Jiri Olsa36592eb2016-06-14 20:19:14 +0200832size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200833 int max_cols, float min_pcnt, FILE *fp,
834 bool use_callchain)
Jiri Olsa36592eb2016-06-14 20:19:14 +0200835{
Jiri Olsa36592eb2016-06-14 20:19:14 +0200836 struct rb_node *nd;
837 size_t ret = 0;
838 const char *sep = symbol_conf.field_sep;
839 int nr_rows = 0;
840 size_t linesz;
841 char *line = NULL;
842 unsigned indent;
843
844 init_rem_hits();
845
Namhyung Kime3b60bc2016-09-20 14:30:24 +0900846 hists__reset_column_width(hists);
Jiri Olsa36592eb2016-06-14 20:19:14 +0200847
848 if (symbol_conf.col_width_list_str)
849 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
850
851 if (show_header)
852 nr_rows += hists__fprintf_headers(hists, fp);
853
854 if (max_rows && nr_rows >= max_rows)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900855 goto out;
856
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300857 linesz = hists__sort_list_width(hists) + 3 + 1;
Jiri Olsa9754c4f2013-10-25 13:24:53 +0200858 linesz += perf_hpp__color_overhead();
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300859 line = malloc(linesz);
860 if (line == NULL) {
861 ret = -1;
862 goto out;
863 }
864
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900865 indent = hists__overhead_width(hists) + 4;
866
Namhyung Kimef86d682016-02-25 00:13:41 +0900867 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900868 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
Namhyung Kim14135662013-10-31 10:17:39 +0900869 float percent;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900870
871 if (h->filtered)
872 continue;
873
Namhyung Kim14135662013-10-31 10:17:39 +0900874 percent = hist_entry__get_percent_limit(h);
Namhyung Kim064f1982013-05-14 11:09:04 +0900875 if (percent < min_pcnt)
876 continue;
877
Jiri Olsad05e3aa2016-06-14 20:19:18 +0200878 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900879
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900880 if (max_rows && ++nr_rows >= max_rows)
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300881 break;
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900882
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900883 /*
884 * If all children are filtered out or percent-limited,
885 * display "no entry >= x.xx%" message.
886 */
887 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300888 int depth = hists->nr_hpp_node + h->depth + 1;
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900889
Namhyung Kimf58c95e2016-03-07 16:44:49 -0300890 print_hierarchy_indent(sep, depth, spaces, fp);
Namhyung Kimbd4abd32016-02-26 21:13:17 +0900891 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
892
893 if (max_rows && ++nr_rows >= max_rows)
894 break;
895 }
896
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900897 if (h->ms.map == NULL && verbose > 1) {
Arnaldo Carvalho de Melo93d57312014-03-21 17:57:01 -0300898 __map_groups__fprintf_maps(h->thread->mg,
Jiri Olsaacebd402014-07-14 23:46:47 +0200899 MAP__FUNCTION, fp);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900900 fprintf(fp, "%.10s end\n", graph_dotted_line);
901 }
902 }
Arnaldo Carvalho de Melo99cf6662013-09-05 15:39:12 -0300903
904 free(line);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900905out:
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300906 zfree(&rem_sq_bracket);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900907
908 return ret;
909}
910
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300911size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900912{
913 int i;
914 size_t ret = 0;
915
916 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
917 const char *name;
918
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300919 if (stats->nr_events[i] == 0)
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900920 continue;
921
922 name = perf_event__name(i);
923 if (!strcmp(name, "UNKNOWN"))
924 continue;
925
926 ret += fprintf(fp, "%16s events: %10d\n", name,
Arnaldo Carvalho de Melo52168ee2012-12-18 16:02:17 -0300927 stats->nr_events[i]);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900928 }
929
930 return ret;
931}