blob: 52e4a36749851294e17d61c26efb7d58cb924d8e [file] [log] [blame]
Don Zickus9b32ba72014-06-01 15:38:29 +02001#include <sys/mman.h>
John Kacurdd68ada2009-09-24 18:02:49 +02002#include "sort.h"
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -03003#include "hist.h"
Namhyung Kim4dfced32013-09-13 16:28:57 +09004#include "comm.h"
Namhyung Kim08e71542013-04-03 21:26:19 +09005#include "symbol.h"
Namhyung Kim8b536992014-03-03 11:46:55 +09006#include "evsel.h"
Namhyung Kim40184c42015-12-23 02:07:01 +09007#include "evlist.h"
8#include <traceevent/event-parse.h>
John Kacurdd68ada2009-09-24 18:02:49 +02009
10regex_t parent_regex;
Arnaldo Carvalho de Meloedb7c602010-05-17 16:22:41 -030011const char default_parent_pattern[] = "^sys_|^do_page_fault";
12const char *parent_pattern = default_parent_pattern;
13const char default_sort_order[] = "comm,dso,symbol";
Andi Kleen40997d62015-07-18 08:24:53 -070014const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
Namhyung Kim512ae1b2014-03-18 11:31:39 +090015const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16const char default_top_sort_order[] = "dso,symbol";
17const char default_diff_sort_order[] = "dso,symbol";
Namhyung Kimd49dade2015-12-23 02:07:10 +090018const char default_tracepoint_sort_order[] = "trace";
Namhyung Kim512ae1b2014-03-18 11:31:39 +090019const char *sort_order;
Namhyung Kima7d945b2014-03-04 10:46:34 +090020const char *field_order;
Greg Priceb21484f2012-12-06 21:48:05 -080021regex_t ignore_callees_regex;
22int have_ignore_callees = 0;
Frederic Weisbeckeraf0a6fa2009-10-22 23:23:22 +020023int sort__need_collapse = 0;
24int sort__has_parent = 0;
Namhyung Kim1af556402012-09-14 17:35:27 +090025int sort__has_sym = 0;
Namhyung Kim68f6d022013-12-18 14:21:10 +090026int sort__has_dso = 0;
Kan Liang2e7ea3a2015-09-04 10:45:43 -040027int sort__has_socket = 0;
Namhyung Kimcfd92da2016-01-21 19:13:24 -030028int sort__has_thread = 0;
Namhyung Kim55369fc2013-04-01 20:35:20 +090029enum sort_mode sort__mode = SORT_MODE__NORMAL;
Frederic Weisbeckera4fb5812009-10-22 23:23:23 +020030
John Kacurdd68ada2009-09-24 18:02:49 +020031
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030032static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
John Kacurdd68ada2009-09-24 18:02:49 +020033{
34 int n;
35 va_list ap;
36
37 va_start(ap, fmt);
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030038 n = vsnprintf(bf, size, fmt, ap);
Jiri Olsa0ca0c132012-09-06 17:46:56 +020039 if (symbol_conf.field_sep && n > 0) {
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030040 char *sep = bf;
John Kacurdd68ada2009-09-24 18:02:49 +020041
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030042 while (1) {
Jiri Olsa0ca0c132012-09-06 17:46:56 +020043 sep = strchr(sep, *symbol_conf.field_sep);
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030044 if (sep == NULL)
45 break;
46 *sep = '.';
John Kacurdd68ada2009-09-24 18:02:49 +020047 }
John Kacurdd68ada2009-09-24 18:02:49 +020048 }
49 va_end(ap);
Anton Blanchardb8327962012-03-07 11:42:49 +110050
51 if (n >= (int)size)
52 return size - 1;
John Kacurdd68ada2009-09-24 18:02:49 +020053 return n;
54}
55
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +020056static int64_t cmp_null(const void *l, const void *r)
Frederic Weisbecker872a8782011-06-29 03:14:52 +020057{
58 if (!l && !r)
59 return 0;
60 else if (!l)
61 return -1;
62 else
63 return 1;
64}
65
66/* --sort pid */
67
68static int64_t
69sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
70{
Adrian Hunter38051232013-07-04 16:20:31 +030071 return right->thread->tid - left->thread->tid;
Frederic Weisbecker872a8782011-06-29 03:14:52 +020072}
73
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -030074static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030075 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +020076{
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +020077 const char *comm = thread__comm_str(he->thread);
Namhyung Kim5b591662014-07-31 14:47:38 +090078
79 width = max(7U, width) - 6;
80 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
81 width, width, comm ?: "");
John Kacurdd68ada2009-09-24 18:02:49 +020082}
83
Frederic Weisbecker872a8782011-06-29 03:14:52 +020084struct sort_entry sort_thread = {
Namhyung Kim8246de82014-07-31 14:47:35 +090085 .se_header = " Pid:Command",
Frederic Weisbecker872a8782011-06-29 03:14:52 +020086 .se_cmp = sort__thread_cmp,
87 .se_snprintf = hist_entry__thread_snprintf,
88 .se_width_idx = HISTC_THREAD,
89};
90
91/* --sort comm */
92
93static int64_t
94sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
95{
Frederic Weisbeckerfedd63d2013-09-11 17:18:09 +020096 /* Compare the addr that should be unique among comm */
Jiri Olsa2f15bd82015-05-15 17:54:28 +020097 return strcmp(comm__str(right->comm), comm__str(left->comm));
Frederic Weisbecker872a8782011-06-29 03:14:52 +020098}
99
100static int64_t
101sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
102{
Namhyung Kim4dfced32013-09-13 16:28:57 +0900103 /* Compare the addr that should be unique among comm */
Jiri Olsa2f15bd82015-05-15 17:54:28 +0200104 return strcmp(comm__str(right->comm), comm__str(left->comm));
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200105}
106
Namhyung Kim202e7a62014-03-04 11:01:41 +0900107static int64_t
108sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
109{
110 return strcmp(comm__str(right->comm), comm__str(left->comm));
111}
112
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300113static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -0300114 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +0200115{
Namhyung Kim5b591662014-07-31 14:47:38 +0900116 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
John Kacurdd68ada2009-09-24 18:02:49 +0200117}
118
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900119struct sort_entry sort_comm = {
120 .se_header = "Command",
121 .se_cmp = sort__comm_cmp,
122 .se_collapse = sort__comm_collapse,
Namhyung Kim202e7a62014-03-04 11:01:41 +0900123 .se_sort = sort__comm_sort,
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900124 .se_snprintf = hist_entry__comm_snprintf,
125 .se_width_idx = HISTC_COMM,
126};
127
128/* --sort dso */
129
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100130static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
John Kacurdd68ada2009-09-24 18:02:49 +0200131{
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100132 struct dso *dso_l = map_l ? map_l->dso : NULL;
133 struct dso *dso_r = map_r ? map_r->dso : NULL;
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300134 const char *dso_name_l, *dso_name_r;
John Kacurdd68ada2009-09-24 18:02:49 +0200135
136 if (!dso_l || !dso_r)
Namhyung Kim202e7a62014-03-04 11:01:41 +0900137 return cmp_null(dso_r, dso_l);
John Kacurdd68ada2009-09-24 18:02:49 +0200138
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300139 if (verbose) {
140 dso_name_l = dso_l->long_name;
141 dso_name_r = dso_r->long_name;
142 } else {
143 dso_name_l = dso_l->short_name;
144 dso_name_r = dso_r->short_name;
145 }
146
147 return strcmp(dso_name_l, dso_name_r);
John Kacurdd68ada2009-09-24 18:02:49 +0200148}
149
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100150static int64_t
151sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
John Kacurdd68ada2009-09-24 18:02:49 +0200152{
Namhyung Kim202e7a62014-03-04 11:01:41 +0900153 return _sort__dso_cmp(right->ms.map, left->ms.map);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100154}
155
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100156static int _hist_entry__dso_snprintf(struct map *map, char *bf,
157 size_t size, unsigned int width)
158{
159 if (map && map->dso) {
160 const char *dso_name = !verbose ? map->dso->short_name :
161 map->dso->long_name;
Namhyung Kim5b591662014-07-31 14:47:38 +0900162 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300163 }
John Kacurdd68ada2009-09-24 18:02:49 +0200164
Namhyung Kim5b591662014-07-31 14:47:38 +0900165 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
John Kacurdd68ada2009-09-24 18:02:49 +0200166}
167
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300168static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100169 size_t size, unsigned int width)
170{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300171 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100172}
173
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900174struct sort_entry sort_dso = {
175 .se_header = "Shared Object",
176 .se_cmp = sort__dso_cmp,
177 .se_snprintf = hist_entry__dso_snprintf,
178 .se_width_idx = HISTC_DSO,
179};
180
181/* --sort symbol */
182
Namhyung Kim2037be52013-12-18 14:21:09 +0900183static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
184{
185 return (int64_t)(right_ip - left_ip);
186}
187
Namhyung Kim51f27d12013-02-06 14:57:15 +0900188static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900189{
190 if (!sym_l || !sym_r)
191 return cmp_null(sym_l, sym_r);
192
193 if (sym_l == sym_r)
194 return 0;
195
Yannick Brosseauc05676c2015-06-17 16:41:10 -0700196 if (sym_l->start != sym_r->start)
197 return (int64_t)(sym_r->start - sym_l->start);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900198
Yannick Brosseauc05676c2015-06-17 16:41:10 -0700199 return (int64_t)(sym_r->end - sym_l->end);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900200}
201
202static int64_t
203sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
204{
Namhyung Kim09600e02013-10-15 11:01:56 +0900205 int64_t ret;
206
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900207 if (!left->ms.sym && !right->ms.sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900208 return _sort__addr_cmp(left->ip, right->ip);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900209
Namhyung Kim09600e02013-10-15 11:01:56 +0900210 /*
211 * comparing symbol address alone is not enough since it's a
212 * relative address within a dso.
213 */
Namhyung Kim68f6d022013-12-18 14:21:10 +0900214 if (!sort__has_dso) {
215 ret = sort__dso_cmp(left, right);
216 if (ret != 0)
217 return ret;
218 }
Namhyung Kim09600e02013-10-15 11:01:56 +0900219
Namhyung Kim51f27d12013-02-06 14:57:15 +0900220 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900221}
222
Namhyung Kim202e7a62014-03-04 11:01:41 +0900223static int64_t
224sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
225{
226 if (!left->ms.sym || !right->ms.sym)
227 return cmp_null(left->ms.sym, right->ms.sym);
228
229 return strcmp(right->ms.sym->name, left->ms.sym->name);
230}
231
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100232static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
233 u64 ip, char level, char *bf, size_t size,
Namhyung Kim43355522012-12-27 18:11:39 +0900234 unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100235{
236 size_t ret = 0;
237
238 if (verbose) {
239 char o = map ? dso__symtab_origin(map->dso) : '!';
240 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
Namhyung Kimded19d52013-04-01 20:35:19 +0900241 BITS_PER_LONG / 4 + 2, ip, o);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100242 }
243
244 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
Stephane Eranian98a3b322013-01-24 16:10:35 +0100245 if (sym && map) {
246 if (map->type == MAP__VARIABLE) {
247 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
248 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
Namhyung Kim62667742013-01-24 16:10:42 +0100249 ip - map->unmap_ip(map, sym->start));
Stephane Eranian98a3b322013-01-24 16:10:35 +0100250 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
251 width - ret, "");
252 } else {
253 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
254 width - ret,
255 sym->name);
256 }
257 } else {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100258 size_t len = BITS_PER_LONG / 4;
259 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
260 len, ip);
261 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
262 width - ret, "");
263 }
264
Namhyung Kim5b591662014-07-31 14:47:38 +0900265 if (ret > width)
266 bf[width] = '\0';
267
268 return width;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100269}
270
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300271static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900272 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100273{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300274 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
275 he->level, bf, size, width);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100276}
John Kacurdd68ada2009-09-24 18:02:49 +0200277
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200278struct sort_entry sort_sym = {
279 .se_header = "Symbol",
280 .se_cmp = sort__sym_cmp,
Namhyung Kim202e7a62014-03-04 11:01:41 +0900281 .se_sort = sort__sym_sort,
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200282 .se_snprintf = hist_entry__sym_snprintf,
283 .se_width_idx = HISTC_SYMBOL,
284};
John Kacurdd68ada2009-09-24 18:02:49 +0200285
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300286/* --sort srcline */
287
288static int64_t
289sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
290{
Namhyung Kim4adcc432013-09-11 14:09:33 +0900291 if (!left->srcline) {
292 if (!left->ms.map)
293 left->srcline = SRCLINE_UNKNOWN;
294 else {
295 struct map *map = left->ms.map;
296 left->srcline = get_srcline(map->dso,
Andi Kleen85c116a2014-11-12 18:05:27 -0800297 map__rip_2objdump(map, left->ip),
298 left->ms.sym, true);
Namhyung Kim4adcc432013-09-11 14:09:33 +0900299 }
300 }
301 if (!right->srcline) {
302 if (!right->ms.map)
303 right->srcline = SRCLINE_UNKNOWN;
304 else {
305 struct map *map = right->ms.map;
306 right->srcline = get_srcline(map->dso,
Andi Kleen85c116a2014-11-12 18:05:27 -0800307 map__rip_2objdump(map, right->ip),
308 right->ms.sym, true);
Namhyung Kim4adcc432013-09-11 14:09:33 +0900309 }
310 }
Namhyung Kim202e7a62014-03-04 11:01:41 +0900311 return strcmp(right->srcline, left->srcline);
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300312}
313
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300314static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim5b591662014-07-31 14:47:38 +0900315 size_t size, unsigned int width)
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300316{
Arnaldo Carvalho de Melob2d53672014-11-18 18:02:51 -0300317 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300318}
319
320struct sort_entry sort_srcline = {
321 .se_header = "Source:Line",
322 .se_cmp = sort__srcline_cmp,
323 .se_snprintf = hist_entry__srcline_snprintf,
324 .se_width_idx = HISTC_SRCLINE,
325};
326
Andi Kleen31191a82015-08-07 15:54:24 -0700327/* --sort srcfile */
328
329static char no_srcfile[1];
330
331static char *get_srcfile(struct hist_entry *e)
332{
333 char *sf, *p;
334 struct map *map = e->ms.map;
335
Andi Kleen2f84b422015-09-01 11:47:19 -0700336 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
337 e->ms.sym, false, true);
Andi Kleen76b10652015-08-11 06:36:55 -0700338 if (!strcmp(sf, SRCLINE_UNKNOWN))
339 return no_srcfile;
Andi Kleen31191a82015-08-07 15:54:24 -0700340 p = strchr(sf, ':');
341 if (p && *sf) {
342 *p = 0;
343 return sf;
344 }
345 free(sf);
346 return no_srcfile;
347}
348
349static int64_t
350sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
351{
352 if (!left->srcfile) {
353 if (!left->ms.map)
354 left->srcfile = no_srcfile;
355 else
356 left->srcfile = get_srcfile(left);
357 }
358 if (!right->srcfile) {
359 if (!right->ms.map)
360 right->srcfile = no_srcfile;
361 else
362 right->srcfile = get_srcfile(right);
363 }
364 return strcmp(right->srcfile, left->srcfile);
365}
366
367static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
368 size_t size, unsigned int width)
369{
370 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
371}
372
373struct sort_entry sort_srcfile = {
374 .se_header = "Source File",
375 .se_cmp = sort__srcfile_cmp,
376 .se_snprintf = hist_entry__srcfile_snprintf,
377 .se_width_idx = HISTC_SRCFILE,
378};
379
John Kacurdd68ada2009-09-24 18:02:49 +0200380/* --sort parent */
381
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200382static int64_t
John Kacurdd68ada2009-09-24 18:02:49 +0200383sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
384{
385 struct symbol *sym_l = left->parent;
386 struct symbol *sym_r = right->parent;
387
388 if (!sym_l || !sym_r)
389 return cmp_null(sym_l, sym_r);
390
Namhyung Kim202e7a62014-03-04 11:01:41 +0900391 return strcmp(sym_r->name, sym_l->name);
John Kacurdd68ada2009-09-24 18:02:49 +0200392}
393
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300394static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -0300395 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +0200396{
Namhyung Kim5b591662014-07-31 14:47:38 +0900397 return repsep_snprintf(bf, size, "%-*.*s", width, width,
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300398 he->parent ? he->parent->name : "[other]");
John Kacurdd68ada2009-09-24 18:02:49 +0200399}
400
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200401struct sort_entry sort_parent = {
402 .se_header = "Parent symbol",
403 .se_cmp = sort__parent_cmp,
404 .se_snprintf = hist_entry__parent_snprintf,
405 .se_width_idx = HISTC_PARENT,
406};
407
Arun Sharmaf60f3592010-06-04 11:27:10 -0300408/* --sort cpu */
409
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200410static int64_t
Arun Sharmaf60f3592010-06-04 11:27:10 -0300411sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
412{
413 return right->cpu - left->cpu;
414}
415
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300416static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
417 size_t size, unsigned int width)
Arun Sharmaf60f3592010-06-04 11:27:10 -0300418{
Namhyung Kim5b591662014-07-31 14:47:38 +0900419 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
Arun Sharmaf60f3592010-06-04 11:27:10 -0300420}
421
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200422struct sort_entry sort_cpu = {
423 .se_header = "CPU",
424 .se_cmp = sort__cpu_cmp,
425 .se_snprintf = hist_entry__cpu_snprintf,
426 .se_width_idx = HISTC_CPU,
427};
428
Kan Liang2e7ea3a2015-09-04 10:45:43 -0400429/* --sort socket */
430
431static int64_t
432sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
433{
434 return right->socket - left->socket;
435}
436
437static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
438 size_t size, unsigned int width)
439{
440 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
441}
442
443struct sort_entry sort_socket = {
444 .se_header = "Socket",
445 .se_cmp = sort__socket_cmp,
446 .se_snprintf = hist_entry__socket_snprintf,
447 .se_width_idx = HISTC_SOCKET,
448};
449
Namhyung Kima34bb6a2015-12-23 02:07:04 +0900450/* --sort trace */
451
452static char *get_trace_output(struct hist_entry *he)
453{
454 struct trace_seq seq;
455 struct perf_evsel *evsel;
456 struct pevent_record rec = {
457 .data = he->raw_data,
458 .size = he->raw_size,
459 };
460
461 evsel = hists_to_evsel(he->hists);
462
463 trace_seq_init(&seq);
Namhyung Kim053a3982015-12-23 02:07:05 +0900464 if (symbol_conf.raw_trace) {
465 pevent_print_fields(&seq, he->raw_data, he->raw_size,
466 evsel->tp_format);
467 } else {
468 pevent_event_info(&seq, evsel->tp_format, &rec);
469 }
Namhyung Kima34bb6a2015-12-23 02:07:04 +0900470 return seq.buffer;
471}
472
473static int64_t
474sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
475{
476 struct perf_evsel *evsel;
477
478 evsel = hists_to_evsel(left->hists);
479 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
480 return 0;
481
482 if (left->trace_output == NULL)
483 left->trace_output = get_trace_output(left);
484 if (right->trace_output == NULL)
485 right->trace_output = get_trace_output(right);
486
487 hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
488 hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
489
490 return strcmp(right->trace_output, left->trace_output);
491}
492
493static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
494 size_t size, unsigned int width)
495{
496 struct perf_evsel *evsel;
497
498 evsel = hists_to_evsel(he->hists);
499 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
500 return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
501
502 if (he->trace_output == NULL)
503 he->trace_output = get_trace_output(he);
504 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
505}
506
507struct sort_entry sort_trace = {
508 .se_header = "Trace output",
509 .se_cmp = sort__trace_cmp,
510 .se_snprintf = hist_entry__trace_snprintf,
511 .se_width_idx = HISTC_TRACE,
512};
513
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900514/* sort keys for branch stacks */
515
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100516static int64_t
517sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
518{
Jiri Olsa288a4b92014-10-16 16:07:07 +0200519 if (!left->branch_info || !right->branch_info)
520 return cmp_null(left->branch_info, right->branch_info);
521
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100522 return _sort__dso_cmp(left->branch_info->from.map,
523 right->branch_info->from.map);
524}
525
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300526static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100527 size_t size, unsigned int width)
528{
Jiri Olsa288a4b92014-10-16 16:07:07 +0200529 if (he->branch_info)
530 return _hist_entry__dso_snprintf(he->branch_info->from.map,
531 bf, size, width);
532 else
533 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100534}
535
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100536static int64_t
537sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
538{
Jiri Olsa8b62fa52014-10-16 16:07:06 +0200539 if (!left->branch_info || !right->branch_info)
540 return cmp_null(left->branch_info, right->branch_info);
541
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100542 return _sort__dso_cmp(left->branch_info->to.map,
543 right->branch_info->to.map);
544}
545
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300546static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100547 size_t size, unsigned int width)
548{
Jiri Olsa8b62fa52014-10-16 16:07:06 +0200549 if (he->branch_info)
550 return _hist_entry__dso_snprintf(he->branch_info->to.map,
551 bf, size, width);
552 else
553 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100554}
555
556static int64_t
557sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
558{
559 struct addr_map_symbol *from_l = &left->branch_info->from;
560 struct addr_map_symbol *from_r = &right->branch_info->from;
561
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200562 if (!left->branch_info || !right->branch_info)
563 return cmp_null(left->branch_info, right->branch_info);
564
565 from_l = &left->branch_info->from;
566 from_r = &right->branch_info->from;
567
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100568 if (!from_l->sym && !from_r->sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900569 return _sort__addr_cmp(from_l->addr, from_r->addr);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100570
Namhyung Kim51f27d12013-02-06 14:57:15 +0900571 return _sort__sym_cmp(from_l->sym, from_r->sym);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100572}
573
574static int64_t
575sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
576{
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200577 struct addr_map_symbol *to_l, *to_r;
578
579 if (!left->branch_info || !right->branch_info)
580 return cmp_null(left->branch_info, right->branch_info);
581
582 to_l = &left->branch_info->to;
583 to_r = &right->branch_info->to;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100584
585 if (!to_l->sym && !to_r->sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900586 return _sort__addr_cmp(to_l->addr, to_r->addr);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100587
Namhyung Kim51f27d12013-02-06 14:57:15 +0900588 return _sort__sym_cmp(to_l->sym, to_r->sym);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100589}
590
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300591static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900592 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100593{
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200594 if (he->branch_info) {
595 struct addr_map_symbol *from = &he->branch_info->from;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100596
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200597 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
598 he->level, bf, size, width);
599 }
600
601 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100602}
603
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300604static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900605 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100606{
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200607 if (he->branch_info) {
608 struct addr_map_symbol *to = &he->branch_info->to;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100609
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200610 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
611 he->level, bf, size, width);
612 }
613
614 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100615}
616
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900617struct sort_entry sort_dso_from = {
618 .se_header = "Source Shared Object",
619 .se_cmp = sort__dso_from_cmp,
620 .se_snprintf = hist_entry__dso_from_snprintf,
621 .se_width_idx = HISTC_DSO_FROM,
622};
623
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100624struct sort_entry sort_dso_to = {
625 .se_header = "Target Shared Object",
626 .se_cmp = sort__dso_to_cmp,
627 .se_snprintf = hist_entry__dso_to_snprintf,
628 .se_width_idx = HISTC_DSO_TO,
629};
630
631struct sort_entry sort_sym_from = {
632 .se_header = "Source Symbol",
633 .se_cmp = sort__sym_from_cmp,
634 .se_snprintf = hist_entry__sym_from_snprintf,
635 .se_width_idx = HISTC_SYMBOL_FROM,
636};
637
638struct sort_entry sort_sym_to = {
639 .se_header = "Target Symbol",
640 .se_cmp = sort__sym_to_cmp,
641 .se_snprintf = hist_entry__sym_to_snprintf,
642 .se_width_idx = HISTC_SYMBOL_TO,
643};
644
645static int64_t
646sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
647{
Jiri Olsa428560e2014-10-16 16:07:03 +0200648 unsigned char mp, p;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100649
Jiri Olsa428560e2014-10-16 16:07:03 +0200650 if (!left->branch_info || !right->branch_info)
651 return cmp_null(left->branch_info, right->branch_info);
652
653 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
654 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100655 return mp || p;
656}
657
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300658static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100659 size_t size, unsigned int width){
660 static const char *out = "N/A";
661
Jiri Olsa428560e2014-10-16 16:07:03 +0200662 if (he->branch_info) {
663 if (he->branch_info->flags.predicted)
664 out = "N";
665 else if (he->branch_info->flags.mispred)
666 out = "Y";
667 }
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100668
Namhyung Kim5b591662014-07-31 14:47:38 +0900669 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100670}
671
Andi Kleen0e332f02015-07-18 08:24:46 -0700672static int64_t
673sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
674{
675 return left->branch_info->flags.cycles -
676 right->branch_info->flags.cycles;
677}
678
679static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
680 size_t size, unsigned int width)
681{
682 if (he->branch_info->flags.cycles == 0)
683 return repsep_snprintf(bf, size, "%-*s", width, "-");
684 return repsep_snprintf(bf, size, "%-*hd", width,
685 he->branch_info->flags.cycles);
686}
687
688struct sort_entry sort_cycles = {
689 .se_header = "Basic Block Cycles",
690 .se_cmp = sort__cycles_cmp,
691 .se_snprintf = hist_entry__cycles_snprintf,
692 .se_width_idx = HISTC_CYCLES,
693};
694
Stephane Eranian98a3b322013-01-24 16:10:35 +0100695/* --sort daddr_sym */
696static int64_t
697sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
698{
699 uint64_t l = 0, r = 0;
700
701 if (left->mem_info)
702 l = left->mem_info->daddr.addr;
703 if (right->mem_info)
704 r = right->mem_info->daddr.addr;
705
706 return (int64_t)(r - l);
707}
708
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300709static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100710 size_t size, unsigned int width)
711{
712 uint64_t addr = 0;
713 struct map *map = NULL;
714 struct symbol *sym = NULL;
715
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300716 if (he->mem_info) {
717 addr = he->mem_info->daddr.addr;
718 map = he->mem_info->daddr.map;
719 sym = he->mem_info->daddr.sym;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100720 }
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300721 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100722 width);
723}
724
725static int64_t
Don Zickus28e6db22015-10-05 20:06:07 +0200726sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
727{
728 uint64_t l = 0, r = 0;
729
730 if (left->mem_info)
731 l = left->mem_info->iaddr.addr;
732 if (right->mem_info)
733 r = right->mem_info->iaddr.addr;
734
735 return (int64_t)(r - l);
736}
737
738static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
739 size_t size, unsigned int width)
740{
741 uint64_t addr = 0;
742 struct map *map = NULL;
743 struct symbol *sym = NULL;
744
745 if (he->mem_info) {
746 addr = he->mem_info->iaddr.addr;
747 map = he->mem_info->iaddr.map;
748 sym = he->mem_info->iaddr.sym;
749 }
750 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
751 width);
752}
753
754static int64_t
Stephane Eranian98a3b322013-01-24 16:10:35 +0100755sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
756{
757 struct map *map_l = NULL;
758 struct map *map_r = NULL;
759
760 if (left->mem_info)
761 map_l = left->mem_info->daddr.map;
762 if (right->mem_info)
763 map_r = right->mem_info->daddr.map;
764
765 return _sort__dso_cmp(map_l, map_r);
766}
767
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300768static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100769 size_t size, unsigned int width)
770{
771 struct map *map = NULL;
772
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300773 if (he->mem_info)
774 map = he->mem_info->daddr.map;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100775
776 return _hist_entry__dso_snprintf(map, bf, size, width);
777}
778
779static int64_t
780sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
781{
782 union perf_mem_data_src data_src_l;
783 union perf_mem_data_src data_src_r;
784
785 if (left->mem_info)
786 data_src_l = left->mem_info->data_src;
787 else
788 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
789
790 if (right->mem_info)
791 data_src_r = right->mem_info->data_src;
792 else
793 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
794
795 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
796}
797
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300798static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100799 size_t size, unsigned int width)
800{
801 const char *out;
802 u64 mask = PERF_MEM_LOCK_NA;
803
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300804 if (he->mem_info)
805 mask = he->mem_info->data_src.mem_lock;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100806
807 if (mask & PERF_MEM_LOCK_NA)
808 out = "N/A";
809 else if (mask & PERF_MEM_LOCK_LOCKED)
810 out = "Yes";
811 else
812 out = "No";
813
814 return repsep_snprintf(bf, size, "%-*s", width, out);
815}
816
817static int64_t
818sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
819{
820 union perf_mem_data_src data_src_l;
821 union perf_mem_data_src data_src_r;
822
823 if (left->mem_info)
824 data_src_l = left->mem_info->data_src;
825 else
826 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
827
828 if (right->mem_info)
829 data_src_r = right->mem_info->data_src;
830 else
831 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
832
833 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
834}
835
836static const char * const tlb_access[] = {
837 "N/A",
838 "HIT",
839 "MISS",
840 "L1",
841 "L2",
842 "Walker",
843 "Fault",
844};
845#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
846
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300847static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100848 size_t size, unsigned int width)
849{
850 char out[64];
851 size_t sz = sizeof(out) - 1; /* -1 for null termination */
852 size_t l = 0, i;
853 u64 m = PERF_MEM_TLB_NA;
854 u64 hit, miss;
855
856 out[0] = '\0';
857
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300858 if (he->mem_info)
859 m = he->mem_info->data_src.mem_dtlb;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100860
861 hit = m & PERF_MEM_TLB_HIT;
862 miss = m & PERF_MEM_TLB_MISS;
863
864 /* already taken care of */
865 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
866
867 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
868 if (!(m & 0x1))
869 continue;
870 if (l) {
871 strcat(out, " or ");
872 l += 4;
873 }
874 strncat(out, tlb_access[i], sz - l);
875 l += strlen(tlb_access[i]);
876 }
877 if (*out == '\0')
878 strcpy(out, "N/A");
879 if (hit)
880 strncat(out, " hit", sz - l);
881 if (miss)
882 strncat(out, " miss", sz - l);
883
884 return repsep_snprintf(bf, size, "%-*s", width, out);
885}
886
887static int64_t
888sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
889{
890 union perf_mem_data_src data_src_l;
891 union perf_mem_data_src data_src_r;
892
893 if (left->mem_info)
894 data_src_l = left->mem_info->data_src;
895 else
896 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
897
898 if (right->mem_info)
899 data_src_r = right->mem_info->data_src;
900 else
901 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
902
903 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
904}
905
906static const char * const mem_lvl[] = {
907 "N/A",
908 "HIT",
909 "MISS",
910 "L1",
911 "LFB",
912 "L2",
913 "L3",
914 "Local RAM",
915 "Remote RAM (1 hop)",
916 "Remote RAM (2 hops)",
917 "Remote Cache (1 hop)",
918 "Remote Cache (2 hops)",
919 "I/O",
920 "Uncached",
921};
922#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
923
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300924static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100925 size_t size, unsigned int width)
926{
927 char out[64];
928 size_t sz = sizeof(out) - 1; /* -1 for null termination */
929 size_t i, l = 0;
930 u64 m = PERF_MEM_LVL_NA;
931 u64 hit, miss;
932
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300933 if (he->mem_info)
934 m = he->mem_info->data_src.mem_lvl;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100935
936 out[0] = '\0';
937
938 hit = m & PERF_MEM_LVL_HIT;
939 miss = m & PERF_MEM_LVL_MISS;
940
941 /* already taken care of */
942 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
943
944 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
945 if (!(m & 0x1))
946 continue;
947 if (l) {
948 strcat(out, " or ");
949 l += 4;
950 }
951 strncat(out, mem_lvl[i], sz - l);
952 l += strlen(mem_lvl[i]);
953 }
954 if (*out == '\0')
955 strcpy(out, "N/A");
956 if (hit)
957 strncat(out, " hit", sz - l);
958 if (miss)
959 strncat(out, " miss", sz - l);
960
961 return repsep_snprintf(bf, size, "%-*s", width, out);
962}
963
964static int64_t
965sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
966{
967 union perf_mem_data_src data_src_l;
968 union perf_mem_data_src data_src_r;
969
970 if (left->mem_info)
971 data_src_l = left->mem_info->data_src;
972 else
973 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
974
975 if (right->mem_info)
976 data_src_r = right->mem_info->data_src;
977 else
978 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
979
980 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
981}
982
983static const char * const snoop_access[] = {
984 "N/A",
985 "None",
986 "Miss",
987 "Hit",
988 "HitM",
989};
990#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
991
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300992static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100993 size_t size, unsigned int width)
994{
995 char out[64];
996 size_t sz = sizeof(out) - 1; /* -1 for null termination */
997 size_t i, l = 0;
998 u64 m = PERF_MEM_SNOOP_NA;
999
1000 out[0] = '\0';
1001
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001002 if (he->mem_info)
1003 m = he->mem_info->data_src.mem_snoop;
Stephane Eranian98a3b322013-01-24 16:10:35 +01001004
1005 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1006 if (!(m & 0x1))
1007 continue;
1008 if (l) {
1009 strcat(out, " or ");
1010 l += 4;
1011 }
1012 strncat(out, snoop_access[i], sz - l);
1013 l += strlen(snoop_access[i]);
1014 }
1015
1016 if (*out == '\0')
1017 strcpy(out, "N/A");
1018
1019 return repsep_snprintf(bf, size, "%-*s", width, out);
1020}
1021
Don Zickus9b32ba72014-06-01 15:38:29 +02001022static inline u64 cl_address(u64 address)
1023{
1024 /* return the cacheline of the address */
1025 return (address & ~(cacheline_size - 1));
1026}
1027
1028static int64_t
1029sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1030{
1031 u64 l, r;
1032 struct map *l_map, *r_map;
1033
1034 if (!left->mem_info) return -1;
1035 if (!right->mem_info) return 1;
1036
1037 /* group event types together */
1038 if (left->cpumode > right->cpumode) return -1;
1039 if (left->cpumode < right->cpumode) return 1;
1040
1041 l_map = left->mem_info->daddr.map;
1042 r_map = right->mem_info->daddr.map;
1043
1044 /* if both are NULL, jump to sort on al_addr instead */
1045 if (!l_map && !r_map)
1046 goto addr;
1047
1048 if (!l_map) return -1;
1049 if (!r_map) return 1;
1050
1051 if (l_map->maj > r_map->maj) return -1;
1052 if (l_map->maj < r_map->maj) return 1;
1053
1054 if (l_map->min > r_map->min) return -1;
1055 if (l_map->min < r_map->min) return 1;
1056
1057 if (l_map->ino > r_map->ino) return -1;
1058 if (l_map->ino < r_map->ino) return 1;
1059
1060 if (l_map->ino_generation > r_map->ino_generation) return -1;
1061 if (l_map->ino_generation < r_map->ino_generation) return 1;
1062
1063 /*
1064 * Addresses with no major/minor numbers are assumed to be
1065 * anonymous in userspace. Sort those on pid then address.
1066 *
1067 * The kernel and non-zero major/minor mapped areas are
1068 * assumed to be unity mapped. Sort those on address.
1069 */
1070
1071 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1072 (!(l_map->flags & MAP_SHARED)) &&
1073 !l_map->maj && !l_map->min && !l_map->ino &&
1074 !l_map->ino_generation) {
1075 /* userspace anonymous */
1076
1077 if (left->thread->pid_ > right->thread->pid_) return -1;
1078 if (left->thread->pid_ < right->thread->pid_) return 1;
1079 }
1080
1081addr:
1082 /* al_addr does all the right addr - start + offset calculations */
1083 l = cl_address(left->mem_info->daddr.al_addr);
1084 r = cl_address(right->mem_info->daddr.al_addr);
1085
1086 if (l > r) return -1;
1087 if (l < r) return 1;
1088
1089 return 0;
1090}
1091
1092static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1093 size_t size, unsigned int width)
1094{
1095
1096 uint64_t addr = 0;
1097 struct map *map = NULL;
1098 struct symbol *sym = NULL;
1099 char level = he->level;
1100
1101 if (he->mem_info) {
1102 addr = cl_address(he->mem_info->daddr.al_addr);
1103 map = he->mem_info->daddr.map;
1104 sym = he->mem_info->daddr.sym;
1105
1106 /* print [s] for shared data mmaps */
1107 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1108 map && (map->type == MAP__VARIABLE) &&
1109 (map->flags & MAP_SHARED) &&
1110 (map->maj || map->min || map->ino ||
1111 map->ino_generation))
1112 level = 's';
1113 else if (!map)
1114 level = 'X';
1115 }
1116 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1117 width);
1118}
1119
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001120struct sort_entry sort_mispredict = {
1121 .se_header = "Branch Mispredicted",
1122 .se_cmp = sort__mispredict_cmp,
1123 .se_snprintf = hist_entry__mispredict_snprintf,
1124 .se_width_idx = HISTC_MISPREDICT,
1125};
1126
Andi Kleen05484292013-01-24 16:10:29 +01001127static u64 he_weight(struct hist_entry *he)
1128{
1129 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1130}
1131
1132static int64_t
1133sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1134{
1135 return he_weight(left) - he_weight(right);
1136}
1137
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001138static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
Andi Kleen05484292013-01-24 16:10:29 +01001139 size_t size, unsigned int width)
1140{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001141 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
Andi Kleen05484292013-01-24 16:10:29 +01001142}
1143
1144struct sort_entry sort_local_weight = {
1145 .se_header = "Local Weight",
1146 .se_cmp = sort__local_weight_cmp,
1147 .se_snprintf = hist_entry__local_weight_snprintf,
1148 .se_width_idx = HISTC_LOCAL_WEIGHT,
1149};
1150
1151static int64_t
1152sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1153{
1154 return left->stat.weight - right->stat.weight;
1155}
1156
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001157static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
Andi Kleen05484292013-01-24 16:10:29 +01001158 size_t size, unsigned int width)
1159{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001160 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
Andi Kleen05484292013-01-24 16:10:29 +01001161}
1162
1163struct sort_entry sort_global_weight = {
1164 .se_header = "Weight",
1165 .se_cmp = sort__global_weight_cmp,
1166 .se_snprintf = hist_entry__global_weight_snprintf,
1167 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1168};
1169
Stephane Eranian98a3b322013-01-24 16:10:35 +01001170struct sort_entry sort_mem_daddr_sym = {
1171 .se_header = "Data Symbol",
1172 .se_cmp = sort__daddr_cmp,
1173 .se_snprintf = hist_entry__daddr_snprintf,
1174 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1175};
1176
Don Zickus28e6db22015-10-05 20:06:07 +02001177struct sort_entry sort_mem_iaddr_sym = {
1178 .se_header = "Code Symbol",
1179 .se_cmp = sort__iaddr_cmp,
1180 .se_snprintf = hist_entry__iaddr_snprintf,
1181 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1182};
1183
Stephane Eranian98a3b322013-01-24 16:10:35 +01001184struct sort_entry sort_mem_daddr_dso = {
1185 .se_header = "Data Object",
1186 .se_cmp = sort__dso_daddr_cmp,
1187 .se_snprintf = hist_entry__dso_daddr_snprintf,
1188 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1189};
1190
1191struct sort_entry sort_mem_locked = {
1192 .se_header = "Locked",
1193 .se_cmp = sort__locked_cmp,
1194 .se_snprintf = hist_entry__locked_snprintf,
1195 .se_width_idx = HISTC_MEM_LOCKED,
1196};
1197
1198struct sort_entry sort_mem_tlb = {
1199 .se_header = "TLB access",
1200 .se_cmp = sort__tlb_cmp,
1201 .se_snprintf = hist_entry__tlb_snprintf,
1202 .se_width_idx = HISTC_MEM_TLB,
1203};
1204
1205struct sort_entry sort_mem_lvl = {
1206 .se_header = "Memory access",
1207 .se_cmp = sort__lvl_cmp,
1208 .se_snprintf = hist_entry__lvl_snprintf,
1209 .se_width_idx = HISTC_MEM_LVL,
1210};
1211
1212struct sort_entry sort_mem_snoop = {
1213 .se_header = "Snoop",
1214 .se_cmp = sort__snoop_cmp,
1215 .se_snprintf = hist_entry__snoop_snprintf,
1216 .se_width_idx = HISTC_MEM_SNOOP,
1217};
1218
Don Zickus9b32ba72014-06-01 15:38:29 +02001219struct sort_entry sort_mem_dcacheline = {
1220 .se_header = "Data Cacheline",
1221 .se_cmp = sort__dcacheline_cmp,
1222 .se_snprintf = hist_entry__dcacheline_snprintf,
1223 .se_width_idx = HISTC_MEM_DCACHELINE,
1224};
1225
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001226static int64_t
1227sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1228{
Jiri Olsa49f47442014-10-16 16:07:01 +02001229 if (!left->branch_info || !right->branch_info)
1230 return cmp_null(left->branch_info, right->branch_info);
1231
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001232 return left->branch_info->flags.abort !=
1233 right->branch_info->flags.abort;
1234}
1235
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001236static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001237 size_t size, unsigned int width)
1238{
Jiri Olsa49f47442014-10-16 16:07:01 +02001239 static const char *out = "N/A";
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001240
Jiri Olsa49f47442014-10-16 16:07:01 +02001241 if (he->branch_info) {
1242 if (he->branch_info->flags.abort)
1243 out = "A";
1244 else
1245 out = ".";
1246 }
1247
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001248 return repsep_snprintf(bf, size, "%-*s", width, out);
1249}
1250
1251struct sort_entry sort_abort = {
1252 .se_header = "Transaction abort",
1253 .se_cmp = sort__abort_cmp,
1254 .se_snprintf = hist_entry__abort_snprintf,
1255 .se_width_idx = HISTC_ABORT,
1256};
1257
1258static int64_t
1259sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1260{
Jiri Olsa0199d242014-10-16 16:07:02 +02001261 if (!left->branch_info || !right->branch_info)
1262 return cmp_null(left->branch_info, right->branch_info);
1263
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001264 return left->branch_info->flags.in_tx !=
1265 right->branch_info->flags.in_tx;
1266}
1267
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001268static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001269 size_t size, unsigned int width)
1270{
Jiri Olsa0199d242014-10-16 16:07:02 +02001271 static const char *out = "N/A";
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001272
Jiri Olsa0199d242014-10-16 16:07:02 +02001273 if (he->branch_info) {
1274 if (he->branch_info->flags.in_tx)
1275 out = "T";
1276 else
1277 out = ".";
1278 }
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001279
1280 return repsep_snprintf(bf, size, "%-*s", width, out);
1281}
1282
1283struct sort_entry sort_in_tx = {
1284 .se_header = "Branch in transaction",
1285 .se_cmp = sort__in_tx_cmp,
1286 .se_snprintf = hist_entry__in_tx_snprintf,
1287 .se_width_idx = HISTC_IN_TX,
1288};
1289
Andi Kleen475eeab2013-09-20 07:40:43 -07001290static int64_t
1291sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1292{
1293 return left->transaction - right->transaction;
1294}
1295
1296static inline char *add_str(char *p, const char *str)
1297{
1298 strcpy(p, str);
1299 return p + strlen(str);
1300}
1301
1302static struct txbit {
1303 unsigned flag;
1304 const char *name;
1305 int skip_for_len;
1306} txbits[] = {
1307 { PERF_TXN_ELISION, "EL ", 0 },
1308 { PERF_TXN_TRANSACTION, "TX ", 1 },
1309 { PERF_TXN_SYNC, "SYNC ", 1 },
1310 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1311 { PERF_TXN_RETRY, "RETRY ", 0 },
1312 { PERF_TXN_CONFLICT, "CON ", 0 },
1313 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1314 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1315 { 0, NULL, 0 }
1316};
1317
1318int hist_entry__transaction_len(void)
1319{
1320 int i;
1321 int len = 0;
1322
1323 for (i = 0; txbits[i].name; i++) {
1324 if (!txbits[i].skip_for_len)
1325 len += strlen(txbits[i].name);
1326 }
1327 len += 4; /* :XX<space> */
1328 return len;
1329}
1330
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001331static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
Andi Kleen475eeab2013-09-20 07:40:43 -07001332 size_t size, unsigned int width)
1333{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001334 u64 t = he->transaction;
Andi Kleen475eeab2013-09-20 07:40:43 -07001335 char buf[128];
1336 char *p = buf;
1337 int i;
1338
1339 buf[0] = 0;
1340 for (i = 0; txbits[i].name; i++)
1341 if (txbits[i].flag & t)
1342 p = add_str(p, txbits[i].name);
1343 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1344 p = add_str(p, "NEITHER ");
1345 if (t & PERF_TXN_ABORT_MASK) {
1346 sprintf(p, ":%" PRIx64,
1347 (t & PERF_TXN_ABORT_MASK) >>
1348 PERF_TXN_ABORT_SHIFT);
1349 p += strlen(p);
1350 }
1351
1352 return repsep_snprintf(bf, size, "%-*s", width, buf);
1353}
1354
1355struct sort_entry sort_transaction = {
1356 .se_header = "Transaction ",
1357 .se_cmp = sort__transaction_cmp,
1358 .se_snprintf = hist_entry__transaction_snprintf,
1359 .se_width_idx = HISTC_TRANSACTION,
1360};
1361
Frederic Weisbecker872a8782011-06-29 03:14:52 +02001362struct sort_dimension {
1363 const char *name;
1364 struct sort_entry *entry;
1365 int taken;
1366};
1367
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001368#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1369
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001370static struct sort_dimension common_sort_dimensions[] = {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001371 DIM(SORT_PID, "pid", sort_thread),
1372 DIM(SORT_COMM, "comm", sort_comm),
1373 DIM(SORT_DSO, "dso", sort_dso),
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001374 DIM(SORT_SYM, "symbol", sort_sym),
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001375 DIM(SORT_PARENT, "parent", sort_parent),
1376 DIM(SORT_CPU, "cpu", sort_cpu),
Kan Liang2e7ea3a2015-09-04 10:45:43 -04001377 DIM(SORT_SOCKET, "socket", sort_socket),
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -03001378 DIM(SORT_SRCLINE, "srcline", sort_srcline),
Andi Kleen31191a82015-08-07 15:54:24 -07001379 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
Andi Kleenf9ea55d2013-07-18 15:58:53 -07001380 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1381 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
Andi Kleen475eeab2013-09-20 07:40:43 -07001382 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
Namhyung Kima34bb6a2015-12-23 02:07:04 +09001383 DIM(SORT_TRACE, "trace", sort_trace),
Frederic Weisbecker872a8782011-06-29 03:14:52 +02001384};
1385
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001386#undef DIM
1387
1388#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1389
1390static struct sort_dimension bstack_sort_dimensions[] = {
1391 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1392 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1393 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1394 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1395 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001396 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1397 DIM(SORT_ABORT, "abort", sort_abort),
Andi Kleen0e332f02015-07-18 08:24:46 -07001398 DIM(SORT_CYCLES, "cycles", sort_cycles),
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001399};
1400
1401#undef DIM
1402
Namhyung Kimafab87b2013-04-03 21:26:11 +09001403#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1404
1405static struct sort_dimension memory_sort_dimensions[] = {
Namhyung Kimafab87b2013-04-03 21:26:11 +09001406 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
Don Zickus28e6db22015-10-05 20:06:07 +02001407 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
Namhyung Kimafab87b2013-04-03 21:26:11 +09001408 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1409 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1410 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1411 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1412 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
Don Zickus9b32ba72014-06-01 15:38:29 +02001413 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
Namhyung Kimafab87b2013-04-03 21:26:11 +09001414};
1415
1416#undef DIM
1417
Namhyung Kima2ce0672014-03-04 09:06:42 +09001418struct hpp_dimension {
1419 const char *name;
1420 struct perf_hpp_fmt *fmt;
1421 int taken;
1422};
1423
1424#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1425
1426static struct hpp_dimension hpp_sort_dimensions[] = {
1427 DIM(PERF_HPP__OVERHEAD, "overhead"),
1428 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1429 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1430 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1431 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
Namhyung Kim594dcbf2013-10-30 16:06:59 +09001432 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
Namhyung Kima2ce0672014-03-04 09:06:42 +09001433 DIM(PERF_HPP__SAMPLES, "sample"),
1434 DIM(PERF_HPP__PERIOD, "period"),
1435};
1436
1437#undef DIM
1438
Namhyung Kim8b536992014-03-03 11:46:55 +09001439struct hpp_sort_entry {
1440 struct perf_hpp_fmt hpp;
1441 struct sort_entry *se;
1442};
1443
Namhyung Kime0d66c72014-07-31 14:47:37 +09001444void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
Namhyung Kim678a5002014-03-20 11:18:54 +09001445{
1446 struct hpp_sort_entry *hse;
1447
1448 if (!perf_hpp__is_sort_entry(fmt))
1449 return;
1450
1451 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001452 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
Namhyung Kim678a5002014-03-20 11:18:54 +09001453}
1454
Namhyung Kim8b536992014-03-03 11:46:55 +09001455static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1456 struct perf_evsel *evsel)
1457{
1458 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001459 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001460
1461 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim8b536992014-03-03 11:46:55 +09001462
Namhyung Kim5b591662014-07-31 14:47:38 +09001463 if (!len)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -03001464 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
Namhyung Kim5b591662014-07-31 14:47:38 +09001465
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001466 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
Namhyung Kim8b536992014-03-03 11:46:55 +09001467}
1468
1469static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1470 struct perf_hpp *hpp __maybe_unused,
1471 struct perf_evsel *evsel)
1472{
1473 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001474 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001475
1476 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1477
Namhyung Kim5b591662014-07-31 14:47:38 +09001478 if (!len)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -03001479 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
Namhyung Kim5b591662014-07-31 14:47:38 +09001480
1481 return len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001482}
1483
1484static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1485 struct hist_entry *he)
1486{
1487 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001488 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001489
1490 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim5b591662014-07-31 14:47:38 +09001491
1492 if (!len)
1493 len = hists__col_len(he->hists, hse->se->se_width_idx);
Namhyung Kim8b536992014-03-03 11:46:55 +09001494
1495 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1496}
1497
Namhyung Kim87bbdf72015-01-08 09:45:46 +09001498static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1499 struct hist_entry *a, struct hist_entry *b)
1500{
1501 struct hpp_sort_entry *hse;
1502
1503 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1504 return hse->se->se_cmp(a, b);
1505}
1506
1507static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1508 struct hist_entry *a, struct hist_entry *b)
1509{
1510 struct hpp_sort_entry *hse;
1511 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1512
1513 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1514 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1515 return collapse_fn(a, b);
1516}
1517
1518static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1519 struct hist_entry *a, struct hist_entry *b)
1520{
1521 struct hpp_sort_entry *hse;
1522 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1523
1524 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1525 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1526 return sort_fn(a, b);
1527}
1528
Jiri Olsa97358082016-01-18 10:24:03 +01001529bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1530{
1531 return format->header == __sort__hpp_header;
1532}
1533
1534static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1535{
1536 struct hpp_sort_entry *hse_a;
1537 struct hpp_sort_entry *hse_b;
1538
1539 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1540 return false;
1541
1542 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1543 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1544
1545 return hse_a->se == hse_b->se;
1546}
1547
Namhyung Kima7d945b2014-03-04 10:46:34 +09001548static struct hpp_sort_entry *
1549__sort_dimension__alloc_hpp(struct sort_dimension *sd)
Namhyung Kim8b536992014-03-03 11:46:55 +09001550{
1551 struct hpp_sort_entry *hse;
1552
1553 hse = malloc(sizeof(*hse));
1554 if (hse == NULL) {
1555 pr_err("Memory allocation failed\n");
Namhyung Kima7d945b2014-03-04 10:46:34 +09001556 return NULL;
Namhyung Kim8b536992014-03-03 11:46:55 +09001557 }
1558
1559 hse->se = sd->entry;
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001560 hse->hpp.name = sd->entry->se_header;
Namhyung Kim8b536992014-03-03 11:46:55 +09001561 hse->hpp.header = __sort__hpp_header;
1562 hse->hpp.width = __sort__hpp_width;
1563 hse->hpp.entry = __sort__hpp_entry;
1564 hse->hpp.color = NULL;
1565
Namhyung Kim87bbdf72015-01-08 09:45:46 +09001566 hse->hpp.cmp = __sort__hpp_cmp;
1567 hse->hpp.collapse = __sort__hpp_collapse;
1568 hse->hpp.sort = __sort__hpp_sort;
Jiri Olsa97358082016-01-18 10:24:03 +01001569 hse->hpp.equal = __sort__hpp_equal;
Namhyung Kim8b536992014-03-03 11:46:55 +09001570
1571 INIT_LIST_HEAD(&hse->hpp.list);
1572 INIT_LIST_HEAD(&hse->hpp.sort_list);
Jiri Olsaf2998422014-05-23 17:15:47 +02001573 hse->hpp.elide = false;
Namhyung Kime0d66c72014-07-31 14:47:37 +09001574 hse->hpp.len = 0;
Namhyung Kim5b591662014-07-31 14:47:38 +09001575 hse->hpp.user_len = 0;
Namhyung Kim8b536992014-03-03 11:46:55 +09001576
Namhyung Kima7d945b2014-03-04 10:46:34 +09001577 return hse;
1578}
1579
Jiri Olsa1945c3e2016-01-18 10:24:07 +01001580static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
1581{
1582 struct perf_hpp_fmt *fmt;
1583
1584 fmt = memdup(hd->fmt, sizeof(*fmt));
1585 if (fmt) {
1586 INIT_LIST_HEAD(&fmt->list);
1587 INIT_LIST_HEAD(&fmt->sort_list);
1588 }
1589
1590 return fmt;
1591}
1592
Namhyung Kima7d945b2014-03-04 10:46:34 +09001593static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1594{
1595 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1596
1597 if (hse == NULL)
1598 return -1;
1599
Namhyung Kim8b536992014-03-03 11:46:55 +09001600 perf_hpp__register_sort_field(&hse->hpp);
1601 return 0;
1602}
1603
Namhyung Kima7d945b2014-03-04 10:46:34 +09001604static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1605{
1606 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1607
1608 if (hse == NULL)
1609 return -1;
1610
1611 perf_hpp__column_register(&hse->hpp);
1612 return 0;
1613}
1614
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001615struct hpp_dynamic_entry {
1616 struct perf_hpp_fmt hpp;
1617 struct perf_evsel *evsel;
1618 struct format_field *field;
1619 unsigned dynamic_len;
Namhyung Kim053a3982015-12-23 02:07:05 +09001620 bool raw_trace;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001621};
1622
1623static int hde_width(struct hpp_dynamic_entry *hde)
1624{
1625 if (!hde->hpp.len) {
1626 int len = hde->dynamic_len;
1627 int namelen = strlen(hde->field->name);
1628 int fieldlen = hde->field->size;
1629
1630 if (namelen > len)
1631 len = namelen;
1632
1633 if (!(hde->field->flags & FIELD_IS_STRING)) {
1634 /* length for print hex numbers */
1635 fieldlen = hde->field->size * 2 + 2;
1636 }
1637 if (fieldlen > len)
1638 len = fieldlen;
1639
1640 hde->hpp.len = len;
1641 }
1642 return hde->hpp.len;
1643}
1644
Namhyung Kim60517d22015-12-23 02:07:03 +09001645static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1646 struct hist_entry *he)
1647{
1648 char *str, *pos;
1649 struct format_field *field = hde->field;
1650 size_t namelen;
1651 bool last = false;
1652
Namhyung Kim053a3982015-12-23 02:07:05 +09001653 if (hde->raw_trace)
1654 return;
1655
Namhyung Kim60517d22015-12-23 02:07:03 +09001656 /* parse pretty print result and update max length */
1657 if (!he->trace_output)
1658 he->trace_output = get_trace_output(he);
1659
1660 namelen = strlen(field->name);
1661 str = he->trace_output;
1662
1663 while (str) {
1664 pos = strchr(str, ' ');
1665 if (pos == NULL) {
1666 last = true;
1667 pos = str + strlen(str);
1668 }
1669
1670 if (!strncmp(str, field->name, namelen)) {
1671 size_t len;
1672
1673 str += namelen + 1;
1674 len = pos - str;
1675
1676 if (len > hde->dynamic_len)
1677 hde->dynamic_len = len;
1678 break;
1679 }
1680
1681 if (last)
1682 str = NULL;
1683 else
1684 str = pos + 1;
1685 }
1686}
1687
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001688static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1689 struct perf_evsel *evsel __maybe_unused)
1690{
1691 struct hpp_dynamic_entry *hde;
1692 size_t len = fmt->user_len;
1693
1694 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1695
1696 if (!len)
1697 len = hde_width(hde);
1698
1699 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1700}
1701
1702static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1703 struct perf_hpp *hpp __maybe_unused,
1704 struct perf_evsel *evsel __maybe_unused)
1705{
1706 struct hpp_dynamic_entry *hde;
1707 size_t len = fmt->user_len;
1708
1709 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1710
1711 if (!len)
1712 len = hde_width(hde);
1713
1714 return len;
1715}
1716
Namhyung Kim361459f2015-12-23 02:07:08 +09001717bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1718{
1719 struct hpp_dynamic_entry *hde;
1720
1721 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1722
1723 return hists_to_evsel(hists) == hde->evsel;
1724}
1725
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001726static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1727 struct hist_entry *he)
1728{
1729 struct hpp_dynamic_entry *hde;
1730 size_t len = fmt->user_len;
Namhyung Kim60517d22015-12-23 02:07:03 +09001731 char *str, *pos;
1732 struct format_field *field;
1733 size_t namelen;
1734 bool last = false;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001735 int ret;
1736
1737 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1738
1739 if (!len)
1740 len = hde_width(hde);
1741
Namhyung Kim053a3982015-12-23 02:07:05 +09001742 if (hde->raw_trace)
1743 goto raw_field;
Namhyung Kim60517d22015-12-23 02:07:03 +09001744
Namhyung Kim053a3982015-12-23 02:07:05 +09001745 field = hde->field;
Namhyung Kim60517d22015-12-23 02:07:03 +09001746 namelen = strlen(field->name);
1747 str = he->trace_output;
1748
1749 while (str) {
1750 pos = strchr(str, ' ');
1751 if (pos == NULL) {
1752 last = true;
1753 pos = str + strlen(str);
1754 }
1755
1756 if (!strncmp(str, field->name, namelen)) {
1757 str += namelen + 1;
1758 str = strndup(str, pos - str);
1759
1760 if (str == NULL)
1761 return scnprintf(hpp->buf, hpp->size,
1762 "%*.*s", len, len, "ERROR");
1763 break;
1764 }
1765
1766 if (last)
1767 str = NULL;
1768 else
1769 str = pos + 1;
1770 }
1771
1772 if (str == NULL) {
1773 struct trace_seq seq;
Namhyung Kim053a3982015-12-23 02:07:05 +09001774raw_field:
Namhyung Kim60517d22015-12-23 02:07:03 +09001775 trace_seq_init(&seq);
1776 pevent_print_field(&seq, he->raw_data, hde->field);
1777 str = seq.buffer;
1778 }
1779
1780 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1781 free(str);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001782 return ret;
1783}
1784
1785static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1786 struct hist_entry *a, struct hist_entry *b)
1787{
1788 struct hpp_dynamic_entry *hde;
1789 struct format_field *field;
1790 unsigned offset, size;
1791
1792 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1793
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001794 field = hde->field;
1795 if (field->flags & FIELD_IS_DYNAMIC) {
1796 unsigned long long dyn;
1797
1798 pevent_read_number_field(field, a->raw_data, &dyn);
1799 offset = dyn & 0xffff;
1800 size = (dyn >> 16) & 0xffff;
1801
1802 /* record max width for output */
1803 if (size > hde->dynamic_len)
1804 hde->dynamic_len = size;
1805 } else {
1806 offset = field->offset;
1807 size = field->size;
Namhyung Kim60517d22015-12-23 02:07:03 +09001808
1809 update_dynamic_len(hde, a);
1810 update_dynamic_len(hde, b);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001811 }
1812
1813 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1814}
1815
Namhyung Kim361459f2015-12-23 02:07:08 +09001816bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1817{
1818 return fmt->cmp == __sort__hde_cmp;
1819}
1820
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001821static struct hpp_dynamic_entry *
1822__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1823{
1824 struct hpp_dynamic_entry *hde;
1825
1826 hde = malloc(sizeof(*hde));
1827 if (hde == NULL) {
1828 pr_debug("Memory allocation failed\n");
1829 return NULL;
1830 }
1831
1832 hde->evsel = evsel;
1833 hde->field = field;
1834 hde->dynamic_len = 0;
1835
1836 hde->hpp.name = field->name;
1837 hde->hpp.header = __sort__hde_header;
1838 hde->hpp.width = __sort__hde_width;
1839 hde->hpp.entry = __sort__hde_entry;
1840 hde->hpp.color = NULL;
1841
1842 hde->hpp.cmp = __sort__hde_cmp;
1843 hde->hpp.collapse = __sort__hde_cmp;
1844 hde->hpp.sort = __sort__hde_cmp;
1845
1846 INIT_LIST_HEAD(&hde->hpp.list);
1847 INIT_LIST_HEAD(&hde->hpp.sort_list);
1848 hde->hpp.elide = false;
1849 hde->hpp.len = 0;
1850 hde->hpp.user_len = 0;
1851
1852 return hde;
1853}
1854
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001855static int parse_field_name(char *str, char **event, char **field, char **opt)
1856{
1857 char *event_name, *field_name, *opt_name;
1858
1859 event_name = str;
1860 field_name = strchr(str, '.');
1861
1862 if (field_name) {
1863 *field_name++ = '\0';
1864 } else {
1865 event_name = NULL;
1866 field_name = str;
1867 }
1868
1869 opt_name = strchr(field_name, '/');
1870 if (opt_name)
1871 *opt_name++ = '\0';
1872
1873 *event = event_name;
1874 *field = field_name;
1875 *opt = opt_name;
1876
1877 return 0;
1878}
1879
1880/* find match evsel using a given event name. The event name can be:
Namhyung Kim9735be22016-01-05 19:58:35 +09001881 * 1. '%' + event index (e.g. '%1' for first event)
1882 * 2. full event name (e.g. sched:sched_switch)
1883 * 3. partial event name (should not contain ':')
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001884 */
1885static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1886{
1887 struct perf_evsel *evsel = NULL;
1888 struct perf_evsel *pos;
1889 bool full_name;
1890
1891 /* case 1 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001892 if (event_name[0] == '%') {
1893 int nr = strtol(event_name+1, NULL, 0);
1894
1895 if (nr > evlist->nr_entries)
1896 return NULL;
1897
1898 evsel = perf_evlist__first(evlist);
1899 while (--nr > 0)
1900 evsel = perf_evsel__next(evsel);
1901
1902 return evsel;
1903 }
1904
1905 full_name = !!strchr(event_name, ':');
1906 evlist__for_each(evlist, pos) {
Namhyung Kim9735be22016-01-05 19:58:35 +09001907 /* case 2 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001908 if (full_name && !strcmp(pos->name, event_name))
1909 return pos;
Namhyung Kim9735be22016-01-05 19:58:35 +09001910 /* case 3 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001911 if (!full_name && strstr(pos->name, event_name)) {
1912 if (evsel) {
1913 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1914 event_name, evsel->name, pos->name);
1915 return NULL;
1916 }
1917 evsel = pos;
1918 }
1919 }
1920
1921 return evsel;
1922}
1923
Namhyung Kim3b099bf52015-12-23 02:07:07 +09001924static int __dynamic_dimension__add(struct perf_evsel *evsel,
1925 struct format_field *field,
1926 bool raw_trace)
1927{
1928 struct hpp_dynamic_entry *hde;
1929
1930 hde = __alloc_dynamic_entry(evsel, field);
1931 if (hde == NULL)
1932 return -ENOMEM;
1933
1934 hde->raw_trace = raw_trace;
1935
1936 perf_hpp__register_sort_field(&hde->hpp);
1937 return 0;
1938}
1939
Namhyung Kim2e422fd2015-12-23 02:07:09 +09001940static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1941{
1942 int ret;
1943 struct format_field *field;
1944
1945 field = evsel->tp_format->format.fields;
1946 while (field) {
1947 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1948 if (ret < 0)
1949 return ret;
1950
1951 field = field->next;
1952 }
1953 return 0;
1954}
1955
1956static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1957{
1958 int ret;
1959 struct perf_evsel *evsel;
1960
1961 evlist__for_each(evlist, evsel) {
1962 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1963 continue;
1964
1965 ret = add_evsel_fields(evsel, raw_trace);
1966 if (ret < 0)
1967 return ret;
1968 }
1969 return 0;
1970}
1971
Namhyung Kim9735be22016-01-05 19:58:35 +09001972static int add_all_matching_fields(struct perf_evlist *evlist,
1973 char *field_name, bool raw_trace)
1974{
1975 int ret = -ESRCH;
1976 struct perf_evsel *evsel;
1977 struct format_field *field;
1978
1979 evlist__for_each(evlist, evsel) {
1980 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1981 continue;
1982
1983 field = pevent_find_any_field(evsel->tp_format, field_name);
1984 if (field == NULL)
1985 continue;
1986
1987 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1988 if (ret < 0)
1989 break;
1990 }
1991 return ret;
1992}
1993
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001994static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
1995{
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001996 char *str, *event_name, *field_name, *opt_name;
1997 struct perf_evsel *evsel;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001998 struct format_field *field;
Namhyung Kim053a3982015-12-23 02:07:05 +09001999 bool raw_trace = symbol_conf.raw_trace;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002000 int ret = 0;
2001
2002 if (evlist == NULL)
2003 return -ENOENT;
2004
2005 str = strdup(tok);
2006 if (str == NULL)
2007 return -ENOMEM;
2008
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002009 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002010 ret = -EINVAL;
2011 goto out;
2012 }
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002013
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002014 if (opt_name) {
2015 if (strcmp(opt_name, "raw")) {
2016 pr_debug("unsupported field option %s\n", opt_name);
Namhyung Kim053a3982015-12-23 02:07:05 +09002017 ret = -EINVAL;
2018 goto out;
2019 }
2020 raw_trace = true;
2021 }
2022
Namhyung Kim2e422fd2015-12-23 02:07:09 +09002023 if (!strcmp(field_name, "trace_fields")) {
2024 ret = add_all_dynamic_fields(evlist, raw_trace);
2025 goto out;
2026 }
2027
Namhyung Kim9735be22016-01-05 19:58:35 +09002028 if (event_name == NULL) {
2029 ret = add_all_matching_fields(evlist, field_name, raw_trace);
2030 goto out;
2031 }
2032
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002033 evsel = find_evsel(evlist, event_name);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002034 if (evsel == NULL) {
2035 pr_debug("Cannot find event: %s\n", event_name);
2036 ret = -ENOENT;
2037 goto out;
2038 }
2039
2040 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2041 pr_debug("%s is not a tracepoint event\n", event_name);
2042 ret = -EINVAL;
2043 goto out;
2044 }
2045
Namhyung Kim3b099bf52015-12-23 02:07:07 +09002046 if (!strcmp(field_name, "*")) {
Namhyung Kim2e422fd2015-12-23 02:07:09 +09002047 ret = add_evsel_fields(evsel, raw_trace);
Namhyung Kim3b099bf52015-12-23 02:07:07 +09002048 } else {
2049 field = pevent_find_any_field(evsel->tp_format, field_name);
2050 if (field == NULL) {
2051 pr_debug("Cannot find event field for %s.%s\n",
2052 event_name, field_name);
2053 return -ENOENT;
2054 }
2055
2056 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2057 }
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002058
2059out:
2060 free(str);
2061 return ret;
2062}
2063
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002064static int __sort_dimension__add(struct sort_dimension *sd)
Namhyung Kim2f532d092013-04-03 21:26:10 +09002065{
2066 if (sd->taken)
Namhyung Kim8b536992014-03-03 11:46:55 +09002067 return 0;
2068
Namhyung Kima7d945b2014-03-04 10:46:34 +09002069 if (__sort_dimension__add_hpp_sort(sd) < 0)
Namhyung Kim8b536992014-03-03 11:46:55 +09002070 return -1;
Namhyung Kim2f532d092013-04-03 21:26:10 +09002071
2072 if (sd->entry->se_collapse)
2073 sort__need_collapse = 1;
2074
Namhyung Kim2f532d092013-04-03 21:26:10 +09002075 sd->taken = 1;
Namhyung Kim8b536992014-03-03 11:46:55 +09002076
2077 return 0;
Namhyung Kim2f532d092013-04-03 21:26:10 +09002078}
2079
Namhyung Kima2ce0672014-03-04 09:06:42 +09002080static int __hpp_dimension__add(struct hpp_dimension *hd)
2081{
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002082 struct perf_hpp_fmt *fmt;
Namhyung Kima2ce0672014-03-04 09:06:42 +09002083
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002084 if (hd->taken)
2085 return 0;
2086
2087 fmt = __hpp_dimension__alloc_hpp(hd);
2088 if (!fmt)
2089 return -1;
2090
2091 hd->taken = 1;
2092 perf_hpp__register_sort_field(fmt);
Namhyung Kima2ce0672014-03-04 09:06:42 +09002093 return 0;
2094}
2095
Namhyung Kima7d945b2014-03-04 10:46:34 +09002096static int __sort_dimension__add_output(struct sort_dimension *sd)
2097{
2098 if (sd->taken)
2099 return 0;
2100
2101 if (__sort_dimension__add_hpp_output(sd) < 0)
2102 return -1;
2103
2104 sd->taken = 1;
2105 return 0;
2106}
2107
2108static int __hpp_dimension__add_output(struct hpp_dimension *hd)
2109{
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002110 struct perf_hpp_fmt *fmt;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002111
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002112 if (hd->taken)
2113 return 0;
2114
2115 fmt = __hpp_dimension__alloc_hpp(hd);
2116 if (!fmt)
2117 return -1;
2118
2119 hd->taken = 1;
2120 perf_hpp__column_register(fmt);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002121 return 0;
2122}
2123
Jiri Olsabeeaaeb2015-10-06 14:25:11 +02002124int hpp_dimension__add_output(unsigned col)
2125{
2126 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2127 return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
2128}
2129
Namhyung Kim40184c42015-12-23 02:07:01 +09002130static int sort_dimension__add(const char *tok,
2131 struct perf_evlist *evlist __maybe_unused)
John Kacurdd68ada2009-09-24 18:02:49 +02002132{
2133 unsigned int i;
2134
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002135 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2136 struct sort_dimension *sd = &common_sort_dimensions[i];
John Kacurdd68ada2009-09-24 18:02:49 +02002137
John Kacurdd68ada2009-09-24 18:02:49 +02002138 if (strncasecmp(tok, sd->name, strlen(tok)))
2139 continue;
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002140
John Kacurdd68ada2009-09-24 18:02:49 +02002141 if (sd->entry == &sort_parent) {
2142 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2143 if (ret) {
2144 char err[BUFSIZ];
2145
2146 regerror(ret, &parent_regex, err, sizeof(err));
Arnaldo Carvalho de Melo2aefa4f2010-04-02 12:30:57 -03002147 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2148 return -EINVAL;
John Kacurdd68ada2009-09-24 18:02:49 +02002149 }
2150 sort__has_parent = 1;
Namhyung Kim930477b2013-04-05 10:26:36 +09002151 } else if (sd->entry == &sort_sym) {
Namhyung Kim1af556402012-09-14 17:35:27 +09002152 sort__has_sym = 1;
Kan Liang94ba4622015-02-09 05:39:44 +00002153 /*
2154 * perf diff displays the performance difference amongst
2155 * two or more perf.data files. Those files could come
2156 * from different binaries. So we should not compare
2157 * their ips, but the name of symbol.
2158 */
2159 if (sort__mode == SORT_MODE__DIFF)
2160 sd->entry->se_collapse = sort__sym_sort;
2161
Namhyung Kim68f6d022013-12-18 14:21:10 +09002162 } else if (sd->entry == &sort_dso) {
2163 sort__has_dso = 1;
Kan Liang2e7ea3a2015-09-04 10:45:43 -04002164 } else if (sd->entry == &sort_socket) {
2165 sort__has_socket = 1;
Namhyung Kimcfd92da2016-01-21 19:13:24 -03002166 } else if (sd->entry == &sort_thread) {
2167 sort__has_thread = 1;
John Kacurdd68ada2009-09-24 18:02:49 +02002168 }
2169
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002170 return __sort_dimension__add(sd);
John Kacurdd68ada2009-09-24 18:02:49 +02002171 }
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002172
Namhyung Kima2ce0672014-03-04 09:06:42 +09002173 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2174 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2175
2176 if (strncasecmp(tok, hd->name, strlen(tok)))
2177 continue;
2178
2179 return __hpp_dimension__add(hd);
2180 }
2181
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002182 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2183 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2184
2185 if (strncasecmp(tok, sd->name, strlen(tok)))
2186 continue;
2187
Namhyung Kim55369fc2013-04-01 20:35:20 +09002188 if (sort__mode != SORT_MODE__BRANCH)
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002189 return -EINVAL;
2190
2191 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2192 sort__has_sym = 1;
2193
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002194 __sort_dimension__add(sd);
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002195 return 0;
2196 }
2197
Namhyung Kimafab87b2013-04-03 21:26:11 +09002198 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2199 struct sort_dimension *sd = &memory_sort_dimensions[i];
2200
2201 if (strncasecmp(tok, sd->name, strlen(tok)))
2202 continue;
2203
2204 if (sort__mode != SORT_MODE__MEMORY)
2205 return -EINVAL;
2206
2207 if (sd->entry == &sort_mem_daddr_sym)
2208 sort__has_sym = 1;
2209
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002210 __sort_dimension__add(sd);
Namhyung Kimafab87b2013-04-03 21:26:11 +09002211 return 0;
2212 }
2213
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002214 if (!add_dynamic_entry(evlist, tok))
2215 return 0;
2216
John Kacurdd68ada2009-09-24 18:02:49 +02002217 return -ESRCH;
2218}
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002219
Namhyung Kimd49dade2015-12-23 02:07:10 +09002220static const char *get_default_sort_order(struct perf_evlist *evlist)
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002221{
2222 const char *default_sort_orders[] = {
2223 default_sort_order,
2224 default_branch_sort_order,
2225 default_mem_sort_order,
2226 default_top_sort_order,
2227 default_diff_sort_order,
Namhyung Kimd49dade2015-12-23 02:07:10 +09002228 default_tracepoint_sort_order,
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002229 };
Namhyung Kimd49dade2015-12-23 02:07:10 +09002230 bool use_trace = true;
2231 struct perf_evsel *evsel;
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002232
2233 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2234
Namhyung Kimd49dade2015-12-23 02:07:10 +09002235 if (evlist == NULL)
2236 goto out_no_evlist;
2237
2238 evlist__for_each(evlist, evsel) {
2239 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2240 use_trace = false;
2241 break;
2242 }
2243 }
2244
2245 if (use_trace) {
2246 sort__mode = SORT_MODE__TRACEPOINT;
2247 if (symbol_conf.raw_trace)
2248 return "trace_fields";
2249 }
2250out_no_evlist:
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002251 return default_sort_orders[sort__mode];
2252}
2253
Namhyung Kimd49dade2015-12-23 02:07:10 +09002254static int setup_sort_order(struct perf_evlist *evlist)
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002255{
2256 char *new_sort_order;
2257
2258 /*
2259 * Append '+'-prefixed sort order to the default sort
2260 * order string.
2261 */
2262 if (!sort_order || is_strict_order(sort_order))
2263 return 0;
2264
2265 if (sort_order[1] == '\0') {
2266 error("Invalid --sort key: `+'");
2267 return -EINVAL;
2268 }
2269
2270 /*
2271 * We allocate new sort_order string, but we never free it,
2272 * because it's checked over the rest of the code.
2273 */
2274 if (asprintf(&new_sort_order, "%s,%s",
Namhyung Kimd49dade2015-12-23 02:07:10 +09002275 get_default_sort_order(evlist), sort_order + 1) < 0) {
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002276 error("Not enough memory to set up --sort");
2277 return -ENOMEM;
2278 }
2279
2280 sort_order = new_sort_order;
2281 return 0;
2282}
2283
Jiri Olsab97511c2016-01-07 10:14:08 +01002284/*
2285 * Adds 'pre,' prefix into 'str' is 'pre' is
2286 * not already part of 'str'.
2287 */
2288static char *prefix_if_not_in(const char *pre, char *str)
2289{
2290 char *n;
2291
2292 if (!str || strstr(str, pre))
2293 return str;
2294
2295 if (asprintf(&n, "%s,%s", pre, str) < 0)
2296 return NULL;
2297
2298 free(str);
2299 return n;
2300}
2301
2302static char *setup_overhead(char *keys)
2303{
2304 keys = prefix_if_not_in("overhead", keys);
2305
2306 if (symbol_conf.cumulate_callchain)
2307 keys = prefix_if_not_in("overhead_children", keys);
2308
2309 return keys;
2310}
2311
Namhyung Kim40184c42015-12-23 02:07:01 +09002312static int __setup_sorting(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002313{
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002314 char *tmp, *tok, *str;
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002315 const char *sort_keys;
Namhyung Kim55309982013-02-06 14:57:16 +09002316 int ret = 0;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002317
Namhyung Kimd49dade2015-12-23 02:07:10 +09002318 ret = setup_sort_order(evlist);
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002319 if (ret)
2320 return ret;
2321
2322 sort_keys = sort_order;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002323 if (sort_keys == NULL) {
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002324 if (is_strict_order(field_order)) {
Namhyung Kima7d945b2014-03-04 10:46:34 +09002325 /*
2326 * If user specified field order but no sort order,
2327 * we'll honor it and not add default sort orders.
2328 */
2329 return 0;
2330 }
2331
Namhyung Kimd49dade2015-12-23 02:07:10 +09002332 sort_keys = get_default_sort_order(evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002333 }
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002334
2335 str = strdup(sort_keys);
Namhyung Kim5936f542013-02-06 14:57:17 +09002336 if (str == NULL) {
2337 error("Not enough memory to setup sort keys");
2338 return -ENOMEM;
2339 }
2340
Jiri Olsab97511c2016-01-07 10:14:08 +01002341 /*
2342 * Prepend overhead fields for backward compatibility.
2343 */
2344 if (!is_strict_order(field_order)) {
2345 str = setup_overhead(str);
2346 if (str == NULL) {
2347 error("Not enough memory to setup overhead keys");
2348 return -ENOMEM;
2349 }
2350 }
2351
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002352 for (tok = strtok_r(str, ", ", &tmp);
2353 tok; tok = strtok_r(NULL, ", ", &tmp)) {
Namhyung Kim40184c42015-12-23 02:07:01 +09002354 ret = sort_dimension__add(tok, evlist);
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002355 if (ret == -EINVAL) {
2356 error("Invalid --sort key: `%s'", tok);
Namhyung Kim55309982013-02-06 14:57:16 +09002357 break;
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002358 } else if (ret == -ESRCH) {
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002359 error("Unknown --sort key: `%s'", tok);
Namhyung Kim55309982013-02-06 14:57:16 +09002360 break;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002361 }
2362 }
2363
2364 free(str);
Namhyung Kim55309982013-02-06 14:57:16 +09002365 return ret;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002366}
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002367
Jiri Olsaf2998422014-05-23 17:15:47 +02002368void perf_hpp__set_elide(int idx, bool elide)
Namhyung Kime67d49a2014-03-18 13:00:59 +09002369{
Jiri Olsaf2998422014-05-23 17:15:47 +02002370 struct perf_hpp_fmt *fmt;
2371 struct hpp_sort_entry *hse;
Namhyung Kime67d49a2014-03-18 13:00:59 +09002372
Jiri Olsaf2998422014-05-23 17:15:47 +02002373 perf_hpp__for_each_format(fmt) {
2374 if (!perf_hpp__is_sort_entry(fmt))
2375 continue;
2376
2377 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2378 if (hse->se->se_width_idx == idx) {
2379 fmt->elide = elide;
2380 break;
2381 }
Namhyung Kime67d49a2014-03-18 13:00:59 +09002382 }
Namhyung Kime67d49a2014-03-18 13:00:59 +09002383}
2384
Jiri Olsaf2998422014-05-23 17:15:47 +02002385static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002386{
2387 if (list && strlist__nr_entries(list) == 1) {
2388 if (fp != NULL)
2389 fprintf(fp, "# %s: %s\n", list_name,
2390 strlist__entry(list, 0)->s);
Jiri Olsaf2998422014-05-23 17:15:47 +02002391 return true;
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002392 }
Jiri Olsaf2998422014-05-23 17:15:47 +02002393 return false;
2394}
2395
2396static bool get_elide(int idx, FILE *output)
2397{
2398 switch (idx) {
2399 case HISTC_SYMBOL:
2400 return __get_elide(symbol_conf.sym_list, "symbol", output);
2401 case HISTC_DSO:
2402 return __get_elide(symbol_conf.dso_list, "dso", output);
2403 case HISTC_COMM:
2404 return __get_elide(symbol_conf.comm_list, "comm", output);
2405 default:
2406 break;
2407 }
2408
2409 if (sort__mode != SORT_MODE__BRANCH)
2410 return false;
2411
2412 switch (idx) {
2413 case HISTC_SYMBOL_FROM:
2414 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2415 case HISTC_SYMBOL_TO:
2416 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2417 case HISTC_DSO_FROM:
2418 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2419 case HISTC_DSO_TO:
2420 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2421 default:
2422 break;
2423 }
2424
2425 return false;
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002426}
Namhyung Kim08e71542013-04-03 21:26:19 +09002427
2428void sort__setup_elide(FILE *output)
2429{
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002430 struct perf_hpp_fmt *fmt;
2431 struct hpp_sort_entry *hse;
Namhyung Kim7524f632013-11-08 17:53:42 +09002432
Jiri Olsaf2998422014-05-23 17:15:47 +02002433 perf_hpp__for_each_format(fmt) {
2434 if (!perf_hpp__is_sort_entry(fmt))
2435 continue;
Namhyung Kim08e71542013-04-03 21:26:19 +09002436
Jiri Olsaf2998422014-05-23 17:15:47 +02002437 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2438 fmt->elide = get_elide(hse->se->se_width_idx, output);
Namhyung Kim08e71542013-04-03 21:26:19 +09002439 }
2440
Namhyung Kim7524f632013-11-08 17:53:42 +09002441 /*
2442 * It makes no sense to elide all of sort entries.
2443 * Just revert them to show up again.
2444 */
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002445 perf_hpp__for_each_format(fmt) {
2446 if (!perf_hpp__is_sort_entry(fmt))
2447 continue;
2448
Jiri Olsaf2998422014-05-23 17:15:47 +02002449 if (!fmt->elide)
Namhyung Kim7524f632013-11-08 17:53:42 +09002450 return;
2451 }
2452
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002453 perf_hpp__for_each_format(fmt) {
2454 if (!perf_hpp__is_sort_entry(fmt))
2455 continue;
2456
Jiri Olsaf2998422014-05-23 17:15:47 +02002457 fmt->elide = false;
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002458 }
Namhyung Kim08e71542013-04-03 21:26:19 +09002459}
Namhyung Kima7d945b2014-03-04 10:46:34 +09002460
2461static int output_field_add(char *tok)
2462{
2463 unsigned int i;
2464
2465 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2466 struct sort_dimension *sd = &common_sort_dimensions[i];
2467
2468 if (strncasecmp(tok, sd->name, strlen(tok)))
2469 continue;
2470
2471 return __sort_dimension__add_output(sd);
2472 }
2473
2474 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2475 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2476
2477 if (strncasecmp(tok, hd->name, strlen(tok)))
2478 continue;
2479
2480 return __hpp_dimension__add_output(hd);
2481 }
2482
2483 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2484 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2485
2486 if (strncasecmp(tok, sd->name, strlen(tok)))
2487 continue;
2488
2489 return __sort_dimension__add_output(sd);
2490 }
2491
2492 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2493 struct sort_dimension *sd = &memory_sort_dimensions[i];
2494
2495 if (strncasecmp(tok, sd->name, strlen(tok)))
2496 continue;
2497
2498 return __sort_dimension__add_output(sd);
2499 }
2500
2501 return -ESRCH;
2502}
2503
2504static void reset_dimensions(void)
2505{
2506 unsigned int i;
2507
2508 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2509 common_sort_dimensions[i].taken = 0;
2510
2511 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2512 hpp_sort_dimensions[i].taken = 0;
2513
2514 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2515 bstack_sort_dimensions[i].taken = 0;
2516
2517 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2518 memory_sort_dimensions[i].taken = 0;
2519}
2520
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002521bool is_strict_order(const char *order)
2522{
2523 return order && (*order != '+');
2524}
2525
Namhyung Kima7d945b2014-03-04 10:46:34 +09002526static int __setup_output_field(void)
2527{
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002528 char *tmp, *tok, *str, *strp;
2529 int ret = -EINVAL;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002530
2531 if (field_order == NULL)
2532 return 0;
2533
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002534 strp = str = strdup(field_order);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002535 if (str == NULL) {
2536 error("Not enough memory to setup output fields");
2537 return -ENOMEM;
2538 }
2539
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002540 if (!is_strict_order(field_order))
2541 strp++;
2542
2543 if (!strlen(strp)) {
2544 error("Invalid --fields key: `+'");
2545 goto out;
2546 }
2547
2548 for (tok = strtok_r(strp, ", ", &tmp);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002549 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2550 ret = output_field_add(tok);
2551 if (ret == -EINVAL) {
2552 error("Invalid --fields key: `%s'", tok);
2553 break;
2554 } else if (ret == -ESRCH) {
2555 error("Unknown --fields key: `%s'", tok);
2556 break;
2557 }
2558 }
2559
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002560out:
Namhyung Kima7d945b2014-03-04 10:46:34 +09002561 free(str);
2562 return ret;
2563}
2564
Namhyung Kim40184c42015-12-23 02:07:01 +09002565int setup_sorting(struct perf_evlist *evlist)
Namhyung Kima7d945b2014-03-04 10:46:34 +09002566{
2567 int err;
2568
Namhyung Kim40184c42015-12-23 02:07:01 +09002569 err = __setup_sorting(evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002570 if (err < 0)
2571 return err;
2572
2573 if (parent_pattern != default_parent_pattern) {
Namhyung Kim40184c42015-12-23 02:07:01 +09002574 err = sort_dimension__add("parent", evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002575 if (err < 0)
2576 return err;
2577 }
2578
2579 reset_dimensions();
2580
2581 /*
2582 * perf diff doesn't use default hpp output fields.
2583 */
2584 if (sort__mode != SORT_MODE__DIFF)
2585 perf_hpp__init();
2586
2587 err = __setup_output_field();
2588 if (err < 0)
2589 return err;
2590
2591 /* copy sort keys to output fields */
2592 perf_hpp__setup_output_field();
2593 /* and then copy output fields to sort keys */
2594 perf_hpp__append_sort_keys();
2595
2596 return 0;
2597}
Namhyung Kim1c89fe92014-05-07 18:42:24 +09002598
2599void reset_output_field(void)
2600{
2601 sort__need_collapse = 0;
2602 sort__has_parent = 0;
2603 sort__has_sym = 0;
2604 sort__has_dso = 0;
2605
Namhyung Kimd69b2962014-05-23 10:59:01 +09002606 field_order = NULL;
2607 sort_order = NULL;
2608
Namhyung Kim1c89fe92014-05-07 18:42:24 +09002609 reset_dimensions();
2610 perf_hpp__reset_output_field();
2611}