blob: 0c427e5d37c784d0eedaca2dace6a4e58aaa29fd [file] [log] [blame]
Namhyung Kimea251d52012-09-03 11:53:06 +09001#include <math.h>
Jiri Olsa2c5d4b42013-01-31 23:31:11 +01002#include <linux/compiler.h>
Namhyung Kimea251d52012-09-03 11:53:06 +09003
4#include "../util/hist.h"
5#include "../util/util.h"
6#include "../util/sort.h"
Namhyung Kim4fb71072013-01-22 18:09:34 +09007#include "../util/evsel.h"
Namhyung Kimea251d52012-09-03 11:53:06 +09008
9/* hist period print (hpp) functions */
Namhyung Kimea251d52012-09-03 11:53:06 +090010
Namhyung Kima0088ad2014-03-03 10:14:04 +090011#define hpp__call_print_fn(hpp, fn, fmt, ...) \
12({ \
13 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
14 advance_hpp(hpp, __ret); \
15 __ret; \
16})
17
Namhyung Kim4a621092014-03-03 10:14:03 +090018int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
19 u64 (*get_field)(struct hist_entry *),
20 const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent)
Namhyung Kimea251d52012-09-03 11:53:06 +090021{
Namhyung Kim4fb71072013-01-22 18:09:34 +090022 int ret;
Jiri Olsab5ff71c2012-10-04 21:49:40 +090023 struct hists *hists = he->hists;
Namhyung Kim759ff492013-03-05 14:53:26 +090024 struct perf_evsel *evsel = hists_to_evsel(hists);
Namhyung Kima0088ad2014-03-03 10:14:04 +090025 char *buf = hpp->buf;
26 size_t size = hpp->size;
Namhyung Kimea251d52012-09-03 11:53:06 +090027
Jiri Olsa0c5268b2013-02-04 13:32:55 +010028 if (fmt_percent) {
29 double percent = 0.0;
Namhyung Kim4fb71072013-01-22 18:09:34 +090030
Jiri Olsa0c5268b2013-02-04 13:32:55 +010031 if (hists->stats.total_period)
32 percent = 100.0 * get_field(he) /
33 hists->stats.total_period;
34
Namhyung Kima0088ad2014-03-03 10:14:04 +090035 ret = hpp__call_print_fn(hpp, print_fn, fmt, percent);
Jiri Olsa0c5268b2013-02-04 13:32:55 +010036 } else
Namhyung Kima0088ad2014-03-03 10:14:04 +090037 ret = hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
Namhyung Kim5b9e2142013-01-22 18:09:37 +090038
Namhyung Kim759ff492013-03-05 14:53:26 +090039 if (perf_evsel__is_group_event(evsel)) {
Namhyung Kim5b9e2142013-01-22 18:09:37 +090040 int prev_idx, idx_delta;
Namhyung Kim5b9e2142013-01-22 18:09:37 +090041 struct hist_entry *pair;
42 int nr_members = evsel->nr_members;
43
Namhyung Kim5b9e2142013-01-22 18:09:37 +090044 prev_idx = perf_evsel__group_idx(evsel);
45
46 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
47 u64 period = get_field(pair);
48 u64 total = pair->hists->stats.total_period;
49
50 if (!total)
51 continue;
52
53 evsel = hists_to_evsel(pair->hists);
54 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
55
56 while (idx_delta--) {
57 /*
58 * zero-fill group members in the middle which
59 * have no sample
60 */
Namhyung Kim9b0d2fb2014-03-03 10:14:02 +090061 if (fmt_percent) {
Namhyung Kima0088ad2014-03-03 10:14:04 +090062 ret += hpp__call_print_fn(hpp, print_fn,
63 fmt, 0.0);
Namhyung Kim9b0d2fb2014-03-03 10:14:02 +090064 } else {
Namhyung Kima0088ad2014-03-03 10:14:04 +090065 ret += hpp__call_print_fn(hpp, print_fn,
66 fmt, 0ULL);
Namhyung Kim9b0d2fb2014-03-03 10:14:02 +090067 }
Namhyung Kim5b9e2142013-01-22 18:09:37 +090068 }
69
Namhyung Kima0088ad2014-03-03 10:14:04 +090070 if (fmt_percent) {
71 ret += hpp__call_print_fn(hpp, print_fn, fmt,
72 100.0 * period / total);
73 } else {
74 ret += hpp__call_print_fn(hpp, print_fn, fmt,
75 period);
76 }
Namhyung Kim5b9e2142013-01-22 18:09:37 +090077
78 prev_idx = perf_evsel__group_idx(evsel);
79 }
80
81 idx_delta = nr_members - prev_idx - 1;
82
83 while (idx_delta--) {
84 /*
85 * zero-fill group members at last which have no sample
86 */
Namhyung Kim9b0d2fb2014-03-03 10:14:02 +090087 if (fmt_percent) {
Namhyung Kima0088ad2014-03-03 10:14:04 +090088 ret += hpp__call_print_fn(hpp, print_fn,
89 fmt, 0.0);
Namhyung Kim9b0d2fb2014-03-03 10:14:02 +090090 } else {
Namhyung Kima0088ad2014-03-03 10:14:04 +090091 ret += hpp__call_print_fn(hpp, print_fn,
92 fmt, 0ULL);
Namhyung Kim9b0d2fb2014-03-03 10:14:02 +090093 }
Namhyung Kim5b9e2142013-01-22 18:09:37 +090094 }
95 }
Namhyung Kima0088ad2014-03-03 10:14:04 +090096
97 /*
98 * Restore original buf and size as it's where caller expects
99 * the result will be saved.
100 */
101 hpp->buf = buf;
102 hpp->size = size;
103
Namhyung Kim4fb71072013-01-22 18:09:34 +0900104 return ret;
Namhyung Kimea251d52012-09-03 11:53:06 +0900105}
106
Namhyung Kim4fb71072013-01-22 18:09:34 +0900107#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
Jiri Olsa2c5d4b42013-01-31 23:31:11 +0100108static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
109 struct perf_hpp *hpp) \
Namhyung Kim4fb71072013-01-22 18:09:34 +0900110{ \
111 int len = _min_width; \
112 \
Namhyung Kim5b9e2142013-01-22 18:09:37 +0900113 if (symbol_conf.event_group) { \
114 struct perf_evsel *evsel = hpp->ptr; \
115 \
116 len = max(len, evsel->nr_members * _unit_width); \
117 } \
Namhyung Kim4fb71072013-01-22 18:09:34 +0900118 return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \
Namhyung Kimea251d52012-09-03 11:53:06 +0900119}
120
Namhyung Kim4fb71072013-01-22 18:09:34 +0900121#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
Jiri Olsa2c5d4b42013-01-31 23:31:11 +0100122static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
123 struct perf_hpp *hpp __maybe_unused) \
Namhyung Kim4fb71072013-01-22 18:09:34 +0900124{ \
125 int len = _min_width; \
126 \
Namhyung Kim5b9e2142013-01-22 18:09:37 +0900127 if (symbol_conf.event_group) { \
128 struct perf_evsel *evsel = hpp->ptr; \
129 \
130 len = max(len, evsel->nr_members * _unit_width); \
131 } \
Namhyung Kim4fb71072013-01-22 18:09:34 +0900132 return len; \
Namhyung Kimea251d52012-09-03 11:53:06 +0900133}
134
Namhyung Kima0088ad2014-03-03 10:14:04 +0900135static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
136{
137 va_list args;
138 ssize_t ssize = hpp->size;
139 double percent;
140 int ret;
141
142 va_start(args, fmt);
143 percent = va_arg(args, double);
144 ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent);
145 va_end(args);
146
147 return (ret >= ssize) ? (ssize - 1) : ret;
148}
149
150static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
151{
152 va_list args;
153 ssize_t ssize = hpp->size;
154 int ret;
155
156 va_start(args, fmt);
157 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
158 va_end(args);
159
160 return (ret >= ssize) ? (ssize - 1) : ret;
161}
162
Namhyung Kim4fb71072013-01-22 18:09:34 +0900163#define __HPP_COLOR_PERCENT_FN(_type, _field) \
164static u64 he_get_##_field(struct hist_entry *he) \
165{ \
166 return he->stat._field; \
167} \
168 \
Jiri Olsa2c5d4b42013-01-31 23:31:11 +0100169static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
170 struct perf_hpp *hpp, struct hist_entry *he) \
Namhyung Kim4fb71072013-01-22 18:09:34 +0900171{ \
Jiri Olsa0c5268b2013-02-04 13:32:55 +0100172 return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
Namhyung Kima0088ad2014-03-03 10:14:04 +0900173 hpp_color_scnprintf, true); \
Namhyung Kimea251d52012-09-03 11:53:06 +0900174}
175
Namhyung Kim4fb71072013-01-22 18:09:34 +0900176#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
Jiri Olsa2c5d4b42013-01-31 23:31:11 +0100177static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
178 struct perf_hpp *hpp, struct hist_entry *he) \
Namhyung Kim4fb71072013-01-22 18:09:34 +0900179{ \
180 const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
Jiri Olsa0c5268b2013-02-04 13:32:55 +0100181 return __hpp__fmt(hpp, he, he_get_##_field, fmt, \
Namhyung Kima0088ad2014-03-03 10:14:04 +0900182 hpp_entry_scnprintf, true); \
Namhyung Kimea251d52012-09-03 11:53:06 +0900183}
184
Namhyung Kim4fb71072013-01-22 18:09:34 +0900185#define __HPP_ENTRY_RAW_FN(_type, _field) \
186static u64 he_get_raw_##_field(struct hist_entry *he) \
187{ \
188 return he->stat._field; \
189} \
190 \
Jiri Olsa2c5d4b42013-01-31 23:31:11 +0100191static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
192 struct perf_hpp *hpp, struct hist_entry *he) \
Namhyung Kim4fb71072013-01-22 18:09:34 +0900193{ \
194 const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
Namhyung Kima0088ad2014-03-03 10:14:04 +0900195 return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, \
196 hpp_entry_scnprintf, false); \
Namhyung Kimea251d52012-09-03 11:53:06 +0900197}
198
Namhyung Kim4fb71072013-01-22 18:09:34 +0900199#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
200__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
201__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
202__HPP_COLOR_PERCENT_FN(_type, _field) \
203__HPP_ENTRY_PERCENT_FN(_type, _field)
Namhyung Kimea251d52012-09-03 11:53:06 +0900204
Namhyung Kim4fb71072013-01-22 18:09:34 +0900205#define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \
206__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
207__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
208__HPP_ENTRY_RAW_FN(_type, _field)
Jiri Olsab5ff71c2012-10-04 21:49:40 +0900209
Namhyung Kimea251d52012-09-03 11:53:06 +0900210
Namhyung Kim4fb71072013-01-22 18:09:34 +0900211HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8)
212HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8)
213HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8)
214HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8)
215HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
Namhyung Kim9ffad982012-09-03 11:53:07 +0900216
Namhyung Kim4fb71072013-01-22 18:09:34 +0900217HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
218HPP_RAW_FNS(period, "Period", period, 12, 12)
Namhyung Kimea251d52012-09-03 11:53:06 +0900219
Jiri Olsa12400052012-10-13 00:06:16 +0200220#define HPP__COLOR_PRINT_FNS(_name) \
221 { \
222 .header = hpp__header_ ## _name, \
223 .width = hpp__width_ ## _name, \
224 .color = hpp__color_ ## _name, \
225 .entry = hpp__entry_ ## _name \
226 }
Namhyung Kimea251d52012-09-03 11:53:06 +0900227
Jiri Olsa12400052012-10-13 00:06:16 +0200228#define HPP__PRINT_FNS(_name) \
229 { \
230 .header = hpp__header_ ## _name, \
231 .width = hpp__width_ ## _name, \
232 .entry = hpp__entry_ ## _name \
233 }
Namhyung Kimea251d52012-09-03 11:53:06 +0900234
235struct perf_hpp_fmt perf_hpp__format[] = {
Jiri Olsa12400052012-10-13 00:06:16 +0200236 HPP__COLOR_PRINT_FNS(overhead),
237 HPP__COLOR_PRINT_FNS(overhead_sys),
238 HPP__COLOR_PRINT_FNS(overhead_us),
239 HPP__COLOR_PRINT_FNS(overhead_guest_sys),
240 HPP__COLOR_PRINT_FNS(overhead_guest_us),
241 HPP__PRINT_FNS(samples),
Jiri Olsa345dc0b2013-02-03 20:08:34 +0100242 HPP__PRINT_FNS(period)
Namhyung Kimea251d52012-09-03 11:53:06 +0900243};
244
Jiri Olsa12400052012-10-13 00:06:16 +0200245LIST_HEAD(perf_hpp__list);
246
Namhyung Kim4fb71072013-01-22 18:09:34 +0900247
Namhyung Kimea251d52012-09-03 11:53:06 +0900248#undef HPP__COLOR_PRINT_FNS
249#undef HPP__PRINT_FNS
250
Namhyung Kim4fb71072013-01-22 18:09:34 +0900251#undef HPP_PERCENT_FNS
252#undef HPP_RAW_FNS
253
254#undef __HPP_HEADER_FN
255#undef __HPP_WIDTH_FN
256#undef __HPP_COLOR_PERCENT_FN
257#undef __HPP_ENTRY_PERCENT_FN
258#undef __HPP_ENTRY_RAW_FN
259
260
Jiri Olsa1d778222012-10-04 21:49:39 +0900261void perf_hpp__init(void)
Namhyung Kimea251d52012-09-03 11:53:06 +0900262{
Jiri Olsa2b8bfa62013-01-31 23:34:25 +0100263 perf_hpp__column_enable(PERF_HPP__OVERHEAD);
264
Namhyung Kimea251d52012-09-03 11:53:06 +0900265 if (symbol_conf.show_cpu_utilization) {
Jiri Olsa12400052012-10-13 00:06:16 +0200266 perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS);
267 perf_hpp__column_enable(PERF_HPP__OVERHEAD_US);
Namhyung Kimea251d52012-09-03 11:53:06 +0900268
269 if (perf_guest) {
Jiri Olsa12400052012-10-13 00:06:16 +0200270 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS);
271 perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US);
Namhyung Kimea251d52012-09-03 11:53:06 +0900272 }
273 }
274
275 if (symbol_conf.show_nr_samples)
Jiri Olsa12400052012-10-13 00:06:16 +0200276 perf_hpp__column_enable(PERF_HPP__SAMPLES);
Namhyung Kimea251d52012-09-03 11:53:06 +0900277
278 if (symbol_conf.show_total_period)
Jiri Olsa12400052012-10-13 00:06:16 +0200279 perf_hpp__column_enable(PERF_HPP__PERIOD);
Jiri Olsa1d778222012-10-04 21:49:39 +0900280}
Namhyung Kimea251d52012-09-03 11:53:06 +0900281
Jiri Olsa12400052012-10-13 00:06:16 +0200282void perf_hpp__column_register(struct perf_hpp_fmt *format)
283{
284 list_add_tail(&format->list, &perf_hpp__list);
285}
286
287void perf_hpp__column_enable(unsigned col)
Jiri Olsa1d778222012-10-04 21:49:39 +0900288{
289 BUG_ON(col >= PERF_HPP__MAX_INDEX);
Jiri Olsa12400052012-10-13 00:06:16 +0200290 perf_hpp__column_register(&perf_hpp__format[col]);
Namhyung Kimea251d52012-09-03 11:53:06 +0900291}
292
Namhyung Kimea251d52012-09-03 11:53:06 +0900293int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
294 struct hists *hists)
295{
296 const char *sep = symbol_conf.field_sep;
297 struct sort_entry *se;
298 int ret = 0;
299
300 list_for_each_entry(se, &hist_entry__sort_list, list) {
301 if (se->elide)
302 continue;
303
304 ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
305 ret += se->se_snprintf(he, s + ret, size - ret,
306 hists__col_len(hists, se->se_width_idx));
307 }
308
309 return ret;
310}
Namhyung Kim7e62ef42012-09-03 11:53:08 +0900311
312/*
313 * See hists__fprintf to match the column widths
314 */
315unsigned int hists__sort_list_width(struct hists *hists)
316{
Jiri Olsa12400052012-10-13 00:06:16 +0200317 struct perf_hpp_fmt *fmt;
Namhyung Kim7e62ef42012-09-03 11:53:08 +0900318 struct sort_entry *se;
Jiri Olsa12400052012-10-13 00:06:16 +0200319 int i = 0, ret = 0;
Namhyung Kim5b9e2142013-01-22 18:09:37 +0900320 struct perf_hpp dummy_hpp = {
321 .ptr = hists_to_evsel(hists),
322 };
Namhyung Kim7e62ef42012-09-03 11:53:08 +0900323
Jiri Olsa12400052012-10-13 00:06:16 +0200324 perf_hpp__for_each_format(fmt) {
Namhyung Kim7e62ef42012-09-03 11:53:08 +0900325 if (i)
326 ret += 2;
327
Jiri Olsa2c5d4b42013-01-31 23:31:11 +0100328 ret += fmt->width(fmt, &dummy_hpp);
Namhyung Kim7e62ef42012-09-03 11:53:08 +0900329 }
330
331 list_for_each_entry(se, &hist_entry__sort_list, list)
332 if (!se->elide)
333 ret += 2 + hists__col_len(hists, se->se_width_idx);
334
335 if (verbose) /* Addr + origin */
336 ret += 3 + BITS_PER_LONG / 4;
337
338 return ret;
339}