blob: de620f7f40f4ad01e30e4385865efd1c3797022a [file] [log] [blame]
Don Zickus9b32ba72014-06-01 15:38:29 +02001#include <sys/mman.h>
John Kacurdd68ada2009-09-24 18:02:49 +02002#include "sort.h"
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -03003#include "hist.h"
Namhyung Kim4dfced32013-09-13 16:28:57 +09004#include "comm.h"
Namhyung Kim08e71542013-04-03 21:26:19 +09005#include "symbol.h"
Namhyung Kim8b536992014-03-03 11:46:55 +09006#include "evsel.h"
Namhyung Kim40184c42015-12-23 02:07:01 +09007#include "evlist.h"
8#include <traceevent/event-parse.h>
John Kacurdd68ada2009-09-24 18:02:49 +02009
10regex_t parent_regex;
Arnaldo Carvalho de Meloedb7c602010-05-17 16:22:41 -030011const char default_parent_pattern[] = "^sys_|^do_page_fault";
12const char *parent_pattern = default_parent_pattern;
13const char default_sort_order[] = "comm,dso,symbol";
Andi Kleen40997d62015-07-18 08:24:53 -070014const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
Namhyung Kim512ae1b2014-03-18 11:31:39 +090015const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16const char default_top_sort_order[] = "dso,symbol";
17const char default_diff_sort_order[] = "dso,symbol";
Namhyung Kimd49dade2015-12-23 02:07:10 +090018const char default_tracepoint_sort_order[] = "trace";
Namhyung Kim512ae1b2014-03-18 11:31:39 +090019const char *sort_order;
Namhyung Kima7d945b2014-03-04 10:46:34 +090020const char *field_order;
Greg Priceb21484f2012-12-06 21:48:05 -080021regex_t ignore_callees_regex;
22int have_ignore_callees = 0;
Frederic Weisbeckeraf0a6fa2009-10-22 23:23:22 +020023int sort__need_collapse = 0;
24int sort__has_parent = 0;
Namhyung Kim1af556402012-09-14 17:35:27 +090025int sort__has_sym = 0;
Namhyung Kim68f6d022013-12-18 14:21:10 +090026int sort__has_dso = 0;
Kan Liang2e7ea3a2015-09-04 10:45:43 -040027int sort__has_socket = 0;
Namhyung Kimcfd92da2016-01-21 19:13:24 -030028int sort__has_thread = 0;
Namhyung Kim55369fc2013-04-01 20:35:20 +090029enum sort_mode sort__mode = SORT_MODE__NORMAL;
Frederic Weisbeckera4fb5812009-10-22 23:23:23 +020030
John Kacurdd68ada2009-09-24 18:02:49 +020031
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030032static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
John Kacurdd68ada2009-09-24 18:02:49 +020033{
34 int n;
35 va_list ap;
36
37 va_start(ap, fmt);
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030038 n = vsnprintf(bf, size, fmt, ap);
Jiri Olsa0ca0c132012-09-06 17:46:56 +020039 if (symbol_conf.field_sep && n > 0) {
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030040 char *sep = bf;
John Kacurdd68ada2009-09-24 18:02:49 +020041
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030042 while (1) {
Jiri Olsa0ca0c132012-09-06 17:46:56 +020043 sep = strchr(sep, *symbol_conf.field_sep);
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030044 if (sep == NULL)
45 break;
46 *sep = '.';
John Kacurdd68ada2009-09-24 18:02:49 +020047 }
John Kacurdd68ada2009-09-24 18:02:49 +020048 }
49 va_end(ap);
Anton Blanchardb8327962012-03-07 11:42:49 +110050
51 if (n >= (int)size)
52 return size - 1;
John Kacurdd68ada2009-09-24 18:02:49 +020053 return n;
54}
55
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +020056static int64_t cmp_null(const void *l, const void *r)
Frederic Weisbecker872a8782011-06-29 03:14:52 +020057{
58 if (!l && !r)
59 return 0;
60 else if (!l)
61 return -1;
62 else
63 return 1;
64}
65
66/* --sort pid */
67
68static int64_t
69sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
70{
Adrian Hunter38051232013-07-04 16:20:31 +030071 return right->thread->tid - left->thread->tid;
Frederic Weisbecker872a8782011-06-29 03:14:52 +020072}
73
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -030074static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030075 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +020076{
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +020077 const char *comm = thread__comm_str(he->thread);
Namhyung Kim5b591662014-07-31 14:47:38 +090078
79 width = max(7U, width) - 6;
80 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
81 width, width, comm ?: "");
John Kacurdd68ada2009-09-24 18:02:49 +020082}
83
Frederic Weisbecker872a8782011-06-29 03:14:52 +020084struct sort_entry sort_thread = {
Namhyung Kim8246de82014-07-31 14:47:35 +090085 .se_header = " Pid:Command",
Frederic Weisbecker872a8782011-06-29 03:14:52 +020086 .se_cmp = sort__thread_cmp,
87 .se_snprintf = hist_entry__thread_snprintf,
88 .se_width_idx = HISTC_THREAD,
89};
90
91/* --sort comm */
92
93static int64_t
94sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
95{
Frederic Weisbeckerfedd63d2013-09-11 17:18:09 +020096 /* Compare the addr that should be unique among comm */
Jiri Olsa2f15bd82015-05-15 17:54:28 +020097 return strcmp(comm__str(right->comm), comm__str(left->comm));
Frederic Weisbecker872a8782011-06-29 03:14:52 +020098}
99
100static int64_t
101sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
102{
Namhyung Kim4dfced32013-09-13 16:28:57 +0900103 /* Compare the addr that should be unique among comm */
Jiri Olsa2f15bd82015-05-15 17:54:28 +0200104 return strcmp(comm__str(right->comm), comm__str(left->comm));
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200105}
106
Namhyung Kim202e7a62014-03-04 11:01:41 +0900107static int64_t
108sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
109{
110 return strcmp(comm__str(right->comm), comm__str(left->comm));
111}
112
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300113static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -0300114 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +0200115{
Namhyung Kim5b591662014-07-31 14:47:38 +0900116 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
John Kacurdd68ada2009-09-24 18:02:49 +0200117}
118
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900119struct sort_entry sort_comm = {
120 .se_header = "Command",
121 .se_cmp = sort__comm_cmp,
122 .se_collapse = sort__comm_collapse,
Namhyung Kim202e7a62014-03-04 11:01:41 +0900123 .se_sort = sort__comm_sort,
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900124 .se_snprintf = hist_entry__comm_snprintf,
125 .se_width_idx = HISTC_COMM,
126};
127
128/* --sort dso */
129
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100130static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
John Kacurdd68ada2009-09-24 18:02:49 +0200131{
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100132 struct dso *dso_l = map_l ? map_l->dso : NULL;
133 struct dso *dso_r = map_r ? map_r->dso : NULL;
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300134 const char *dso_name_l, *dso_name_r;
John Kacurdd68ada2009-09-24 18:02:49 +0200135
136 if (!dso_l || !dso_r)
Namhyung Kim202e7a62014-03-04 11:01:41 +0900137 return cmp_null(dso_r, dso_l);
John Kacurdd68ada2009-09-24 18:02:49 +0200138
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300139 if (verbose) {
140 dso_name_l = dso_l->long_name;
141 dso_name_r = dso_r->long_name;
142 } else {
143 dso_name_l = dso_l->short_name;
144 dso_name_r = dso_r->short_name;
145 }
146
147 return strcmp(dso_name_l, dso_name_r);
John Kacurdd68ada2009-09-24 18:02:49 +0200148}
149
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100150static int64_t
151sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
John Kacurdd68ada2009-09-24 18:02:49 +0200152{
Namhyung Kim202e7a62014-03-04 11:01:41 +0900153 return _sort__dso_cmp(right->ms.map, left->ms.map);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100154}
155
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100156static int _hist_entry__dso_snprintf(struct map *map, char *bf,
157 size_t size, unsigned int width)
158{
159 if (map && map->dso) {
160 const char *dso_name = !verbose ? map->dso->short_name :
161 map->dso->long_name;
Namhyung Kim5b591662014-07-31 14:47:38 +0900162 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300163 }
John Kacurdd68ada2009-09-24 18:02:49 +0200164
Namhyung Kim5b591662014-07-31 14:47:38 +0900165 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
John Kacurdd68ada2009-09-24 18:02:49 +0200166}
167
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300168static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100169 size_t size, unsigned int width)
170{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300171 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100172}
173
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900174struct sort_entry sort_dso = {
175 .se_header = "Shared Object",
176 .se_cmp = sort__dso_cmp,
177 .se_snprintf = hist_entry__dso_snprintf,
178 .se_width_idx = HISTC_DSO,
179};
180
181/* --sort symbol */
182
Namhyung Kim2037be52013-12-18 14:21:09 +0900183static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
184{
185 return (int64_t)(right_ip - left_ip);
186}
187
Namhyung Kim51f27d12013-02-06 14:57:15 +0900188static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900189{
190 if (!sym_l || !sym_r)
191 return cmp_null(sym_l, sym_r);
192
193 if (sym_l == sym_r)
194 return 0;
195
Yannick Brosseauc05676c2015-06-17 16:41:10 -0700196 if (sym_l->start != sym_r->start)
197 return (int64_t)(sym_r->start - sym_l->start);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900198
Yannick Brosseauc05676c2015-06-17 16:41:10 -0700199 return (int64_t)(sym_r->end - sym_l->end);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900200}
201
202static int64_t
203sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
204{
Namhyung Kim09600e02013-10-15 11:01:56 +0900205 int64_t ret;
206
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900207 if (!left->ms.sym && !right->ms.sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900208 return _sort__addr_cmp(left->ip, right->ip);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900209
Namhyung Kim09600e02013-10-15 11:01:56 +0900210 /*
211 * comparing symbol address alone is not enough since it's a
212 * relative address within a dso.
213 */
Namhyung Kim68f6d022013-12-18 14:21:10 +0900214 if (!sort__has_dso) {
215 ret = sort__dso_cmp(left, right);
216 if (ret != 0)
217 return ret;
218 }
Namhyung Kim09600e02013-10-15 11:01:56 +0900219
Namhyung Kim51f27d12013-02-06 14:57:15 +0900220 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900221}
222
Namhyung Kim202e7a62014-03-04 11:01:41 +0900223static int64_t
224sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
225{
226 if (!left->ms.sym || !right->ms.sym)
227 return cmp_null(left->ms.sym, right->ms.sym);
228
229 return strcmp(right->ms.sym->name, left->ms.sym->name);
230}
231
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100232static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
233 u64 ip, char level, char *bf, size_t size,
Namhyung Kim43355522012-12-27 18:11:39 +0900234 unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100235{
236 size_t ret = 0;
237
238 if (verbose) {
239 char o = map ? dso__symtab_origin(map->dso) : '!';
240 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
Namhyung Kimded19d52013-04-01 20:35:19 +0900241 BITS_PER_LONG / 4 + 2, ip, o);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100242 }
243
244 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
Stephane Eranian98a3b322013-01-24 16:10:35 +0100245 if (sym && map) {
246 if (map->type == MAP__VARIABLE) {
247 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
248 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
Namhyung Kim62667742013-01-24 16:10:42 +0100249 ip - map->unmap_ip(map, sym->start));
Stephane Eranian98a3b322013-01-24 16:10:35 +0100250 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
251 width - ret, "");
252 } else {
253 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
254 width - ret,
255 sym->name);
256 }
257 } else {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100258 size_t len = BITS_PER_LONG / 4;
259 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
260 len, ip);
261 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
262 width - ret, "");
263 }
264
Namhyung Kim5b591662014-07-31 14:47:38 +0900265 if (ret > width)
266 bf[width] = '\0';
267
268 return width;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100269}
270
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300271static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900272 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100273{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300274 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
275 he->level, bf, size, width);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100276}
John Kacurdd68ada2009-09-24 18:02:49 +0200277
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200278struct sort_entry sort_sym = {
279 .se_header = "Symbol",
280 .se_cmp = sort__sym_cmp,
Namhyung Kim202e7a62014-03-04 11:01:41 +0900281 .se_sort = sort__sym_sort,
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200282 .se_snprintf = hist_entry__sym_snprintf,
283 .se_width_idx = HISTC_SYMBOL,
284};
John Kacurdd68ada2009-09-24 18:02:49 +0200285
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300286/* --sort srcline */
287
288static int64_t
289sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
290{
Namhyung Kim4adcc432013-09-11 14:09:33 +0900291 if (!left->srcline) {
292 if (!left->ms.map)
293 left->srcline = SRCLINE_UNKNOWN;
294 else {
295 struct map *map = left->ms.map;
296 left->srcline = get_srcline(map->dso,
Andi Kleen85c116a2014-11-12 18:05:27 -0800297 map__rip_2objdump(map, left->ip),
298 left->ms.sym, true);
Namhyung Kim4adcc432013-09-11 14:09:33 +0900299 }
300 }
301 if (!right->srcline) {
302 if (!right->ms.map)
303 right->srcline = SRCLINE_UNKNOWN;
304 else {
305 struct map *map = right->ms.map;
306 right->srcline = get_srcline(map->dso,
Andi Kleen85c116a2014-11-12 18:05:27 -0800307 map__rip_2objdump(map, right->ip),
308 right->ms.sym, true);
Namhyung Kim4adcc432013-09-11 14:09:33 +0900309 }
310 }
Namhyung Kim202e7a62014-03-04 11:01:41 +0900311 return strcmp(right->srcline, left->srcline);
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300312}
313
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300314static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim5b591662014-07-31 14:47:38 +0900315 size_t size, unsigned int width)
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300316{
Arnaldo Carvalho de Melob2d53672014-11-18 18:02:51 -0300317 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300318}
319
320struct sort_entry sort_srcline = {
321 .se_header = "Source:Line",
322 .se_cmp = sort__srcline_cmp,
323 .se_snprintf = hist_entry__srcline_snprintf,
324 .se_width_idx = HISTC_SRCLINE,
325};
326
Andi Kleen31191a82015-08-07 15:54:24 -0700327/* --sort srcfile */
328
329static char no_srcfile[1];
330
331static char *get_srcfile(struct hist_entry *e)
332{
333 char *sf, *p;
334 struct map *map = e->ms.map;
335
Andi Kleen2f84b422015-09-01 11:47:19 -0700336 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
337 e->ms.sym, false, true);
Andi Kleen76b10652015-08-11 06:36:55 -0700338 if (!strcmp(sf, SRCLINE_UNKNOWN))
339 return no_srcfile;
Andi Kleen31191a82015-08-07 15:54:24 -0700340 p = strchr(sf, ':');
341 if (p && *sf) {
342 *p = 0;
343 return sf;
344 }
345 free(sf);
346 return no_srcfile;
347}
348
349static int64_t
350sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
351{
352 if (!left->srcfile) {
353 if (!left->ms.map)
354 left->srcfile = no_srcfile;
355 else
356 left->srcfile = get_srcfile(left);
357 }
358 if (!right->srcfile) {
359 if (!right->ms.map)
360 right->srcfile = no_srcfile;
361 else
362 right->srcfile = get_srcfile(right);
363 }
364 return strcmp(right->srcfile, left->srcfile);
365}
366
367static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
368 size_t size, unsigned int width)
369{
370 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
371}
372
373struct sort_entry sort_srcfile = {
374 .se_header = "Source File",
375 .se_cmp = sort__srcfile_cmp,
376 .se_snprintf = hist_entry__srcfile_snprintf,
377 .se_width_idx = HISTC_SRCFILE,
378};
379
John Kacurdd68ada2009-09-24 18:02:49 +0200380/* --sort parent */
381
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200382static int64_t
John Kacurdd68ada2009-09-24 18:02:49 +0200383sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
384{
385 struct symbol *sym_l = left->parent;
386 struct symbol *sym_r = right->parent;
387
388 if (!sym_l || !sym_r)
389 return cmp_null(sym_l, sym_r);
390
Namhyung Kim202e7a62014-03-04 11:01:41 +0900391 return strcmp(sym_r->name, sym_l->name);
John Kacurdd68ada2009-09-24 18:02:49 +0200392}
393
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300394static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -0300395 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +0200396{
Namhyung Kim5b591662014-07-31 14:47:38 +0900397 return repsep_snprintf(bf, size, "%-*.*s", width, width,
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300398 he->parent ? he->parent->name : "[other]");
John Kacurdd68ada2009-09-24 18:02:49 +0200399}
400
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200401struct sort_entry sort_parent = {
402 .se_header = "Parent symbol",
403 .se_cmp = sort__parent_cmp,
404 .se_snprintf = hist_entry__parent_snprintf,
405 .se_width_idx = HISTC_PARENT,
406};
407
Arun Sharmaf60f3592010-06-04 11:27:10 -0300408/* --sort cpu */
409
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200410static int64_t
Arun Sharmaf60f3592010-06-04 11:27:10 -0300411sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
412{
413 return right->cpu - left->cpu;
414}
415
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300416static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
417 size_t size, unsigned int width)
Arun Sharmaf60f3592010-06-04 11:27:10 -0300418{
Namhyung Kim5b591662014-07-31 14:47:38 +0900419 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
Arun Sharmaf60f3592010-06-04 11:27:10 -0300420}
421
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200422struct sort_entry sort_cpu = {
423 .se_header = "CPU",
424 .se_cmp = sort__cpu_cmp,
425 .se_snprintf = hist_entry__cpu_snprintf,
426 .se_width_idx = HISTC_CPU,
427};
428
Kan Liang2e7ea3a2015-09-04 10:45:43 -0400429/* --sort socket */
430
431static int64_t
432sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
433{
434 return right->socket - left->socket;
435}
436
437static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
438 size_t size, unsigned int width)
439{
440 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
441}
442
443struct sort_entry sort_socket = {
444 .se_header = "Socket",
445 .se_cmp = sort__socket_cmp,
446 .se_snprintf = hist_entry__socket_snprintf,
447 .se_width_idx = HISTC_SOCKET,
448};
449
Namhyung Kima34bb6a02015-12-23 02:07:04 +0900450/* --sort trace */
451
452static char *get_trace_output(struct hist_entry *he)
453{
454 struct trace_seq seq;
455 struct perf_evsel *evsel;
456 struct pevent_record rec = {
457 .data = he->raw_data,
458 .size = he->raw_size,
459 };
460
461 evsel = hists_to_evsel(he->hists);
462
463 trace_seq_init(&seq);
Namhyung Kim053a3982015-12-23 02:07:05 +0900464 if (symbol_conf.raw_trace) {
465 pevent_print_fields(&seq, he->raw_data, he->raw_size,
466 evsel->tp_format);
467 } else {
468 pevent_event_info(&seq, evsel->tp_format, &rec);
469 }
Namhyung Kima34bb6a02015-12-23 02:07:04 +0900470 return seq.buffer;
471}
472
473static int64_t
474sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
475{
476 struct perf_evsel *evsel;
477
478 evsel = hists_to_evsel(left->hists);
479 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
480 return 0;
481
482 if (left->trace_output == NULL)
483 left->trace_output = get_trace_output(left);
484 if (right->trace_output == NULL)
485 right->trace_output = get_trace_output(right);
486
487 hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
488 hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
489
490 return strcmp(right->trace_output, left->trace_output);
491}
492
493static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
494 size_t size, unsigned int width)
495{
496 struct perf_evsel *evsel;
497
498 evsel = hists_to_evsel(he->hists);
499 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
500 return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
501
502 if (he->trace_output == NULL)
503 he->trace_output = get_trace_output(he);
504 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
505}
506
507struct sort_entry sort_trace = {
508 .se_header = "Trace output",
509 .se_cmp = sort__trace_cmp,
510 .se_snprintf = hist_entry__trace_snprintf,
511 .se_width_idx = HISTC_TRACE,
512};
513
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900514/* sort keys for branch stacks */
515
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100516static int64_t
517sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
518{
Jiri Olsa288a4b92014-10-16 16:07:07 +0200519 if (!left->branch_info || !right->branch_info)
520 return cmp_null(left->branch_info, right->branch_info);
521
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100522 return _sort__dso_cmp(left->branch_info->from.map,
523 right->branch_info->from.map);
524}
525
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300526static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100527 size_t size, unsigned int width)
528{
Jiri Olsa288a4b92014-10-16 16:07:07 +0200529 if (he->branch_info)
530 return _hist_entry__dso_snprintf(he->branch_info->from.map,
531 bf, size, width);
532 else
533 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100534}
535
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100536static int64_t
537sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
538{
Jiri Olsa8b62fa52014-10-16 16:07:06 +0200539 if (!left->branch_info || !right->branch_info)
540 return cmp_null(left->branch_info, right->branch_info);
541
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100542 return _sort__dso_cmp(left->branch_info->to.map,
543 right->branch_info->to.map);
544}
545
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300546static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100547 size_t size, unsigned int width)
548{
Jiri Olsa8b62fa52014-10-16 16:07:06 +0200549 if (he->branch_info)
550 return _hist_entry__dso_snprintf(he->branch_info->to.map,
551 bf, size, width);
552 else
553 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100554}
555
556static int64_t
557sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
558{
559 struct addr_map_symbol *from_l = &left->branch_info->from;
560 struct addr_map_symbol *from_r = &right->branch_info->from;
561
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200562 if (!left->branch_info || !right->branch_info)
563 return cmp_null(left->branch_info, right->branch_info);
564
565 from_l = &left->branch_info->from;
566 from_r = &right->branch_info->from;
567
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100568 if (!from_l->sym && !from_r->sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900569 return _sort__addr_cmp(from_l->addr, from_r->addr);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100570
Namhyung Kim51f27d12013-02-06 14:57:15 +0900571 return _sort__sym_cmp(from_l->sym, from_r->sym);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100572}
573
574static int64_t
575sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
576{
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200577 struct addr_map_symbol *to_l, *to_r;
578
579 if (!left->branch_info || !right->branch_info)
580 return cmp_null(left->branch_info, right->branch_info);
581
582 to_l = &left->branch_info->to;
583 to_r = &right->branch_info->to;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100584
585 if (!to_l->sym && !to_r->sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900586 return _sort__addr_cmp(to_l->addr, to_r->addr);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100587
Namhyung Kim51f27d12013-02-06 14:57:15 +0900588 return _sort__sym_cmp(to_l->sym, to_r->sym);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100589}
590
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300591static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900592 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100593{
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200594 if (he->branch_info) {
595 struct addr_map_symbol *from = &he->branch_info->from;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100596
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200597 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
598 he->level, bf, size, width);
599 }
600
601 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100602}
603
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300604static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900605 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100606{
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200607 if (he->branch_info) {
608 struct addr_map_symbol *to = &he->branch_info->to;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100609
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200610 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
611 he->level, bf, size, width);
612 }
613
614 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100615}
616
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900617struct sort_entry sort_dso_from = {
618 .se_header = "Source Shared Object",
619 .se_cmp = sort__dso_from_cmp,
620 .se_snprintf = hist_entry__dso_from_snprintf,
621 .se_width_idx = HISTC_DSO_FROM,
622};
623
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100624struct sort_entry sort_dso_to = {
625 .se_header = "Target Shared Object",
626 .se_cmp = sort__dso_to_cmp,
627 .se_snprintf = hist_entry__dso_to_snprintf,
628 .se_width_idx = HISTC_DSO_TO,
629};
630
631struct sort_entry sort_sym_from = {
632 .se_header = "Source Symbol",
633 .se_cmp = sort__sym_from_cmp,
634 .se_snprintf = hist_entry__sym_from_snprintf,
635 .se_width_idx = HISTC_SYMBOL_FROM,
636};
637
638struct sort_entry sort_sym_to = {
639 .se_header = "Target Symbol",
640 .se_cmp = sort__sym_to_cmp,
641 .se_snprintf = hist_entry__sym_to_snprintf,
642 .se_width_idx = HISTC_SYMBOL_TO,
643};
644
645static int64_t
646sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
647{
Jiri Olsa428560e2014-10-16 16:07:03 +0200648 unsigned char mp, p;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100649
Jiri Olsa428560e2014-10-16 16:07:03 +0200650 if (!left->branch_info || !right->branch_info)
651 return cmp_null(left->branch_info, right->branch_info);
652
653 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
654 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100655 return mp || p;
656}
657
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300658static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100659 size_t size, unsigned int width){
660 static const char *out = "N/A";
661
Jiri Olsa428560e2014-10-16 16:07:03 +0200662 if (he->branch_info) {
663 if (he->branch_info->flags.predicted)
664 out = "N";
665 else if (he->branch_info->flags.mispred)
666 out = "Y";
667 }
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100668
Namhyung Kim5b591662014-07-31 14:47:38 +0900669 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100670}
671
Andi Kleen0e332f02015-07-18 08:24:46 -0700672static int64_t
673sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
674{
675 return left->branch_info->flags.cycles -
676 right->branch_info->flags.cycles;
677}
678
679static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
680 size_t size, unsigned int width)
681{
682 if (he->branch_info->flags.cycles == 0)
683 return repsep_snprintf(bf, size, "%-*s", width, "-");
684 return repsep_snprintf(bf, size, "%-*hd", width,
685 he->branch_info->flags.cycles);
686}
687
688struct sort_entry sort_cycles = {
689 .se_header = "Basic Block Cycles",
690 .se_cmp = sort__cycles_cmp,
691 .se_snprintf = hist_entry__cycles_snprintf,
692 .se_width_idx = HISTC_CYCLES,
693};
694
Stephane Eranian98a3b322013-01-24 16:10:35 +0100695/* --sort daddr_sym */
696static int64_t
697sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
698{
699 uint64_t l = 0, r = 0;
700
701 if (left->mem_info)
702 l = left->mem_info->daddr.addr;
703 if (right->mem_info)
704 r = right->mem_info->daddr.addr;
705
706 return (int64_t)(r - l);
707}
708
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300709static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100710 size_t size, unsigned int width)
711{
712 uint64_t addr = 0;
713 struct map *map = NULL;
714 struct symbol *sym = NULL;
715
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300716 if (he->mem_info) {
717 addr = he->mem_info->daddr.addr;
718 map = he->mem_info->daddr.map;
719 sym = he->mem_info->daddr.sym;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100720 }
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300721 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100722 width);
723}
724
725static int64_t
Don Zickus28e6db22015-10-05 20:06:07 +0200726sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
727{
728 uint64_t l = 0, r = 0;
729
730 if (left->mem_info)
731 l = left->mem_info->iaddr.addr;
732 if (right->mem_info)
733 r = right->mem_info->iaddr.addr;
734
735 return (int64_t)(r - l);
736}
737
738static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
739 size_t size, unsigned int width)
740{
741 uint64_t addr = 0;
742 struct map *map = NULL;
743 struct symbol *sym = NULL;
744
745 if (he->mem_info) {
746 addr = he->mem_info->iaddr.addr;
747 map = he->mem_info->iaddr.map;
748 sym = he->mem_info->iaddr.sym;
749 }
750 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
751 width);
752}
753
754static int64_t
Stephane Eranian98a3b322013-01-24 16:10:35 +0100755sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
756{
757 struct map *map_l = NULL;
758 struct map *map_r = NULL;
759
760 if (left->mem_info)
761 map_l = left->mem_info->daddr.map;
762 if (right->mem_info)
763 map_r = right->mem_info->daddr.map;
764
765 return _sort__dso_cmp(map_l, map_r);
766}
767
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300768static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100769 size_t size, unsigned int width)
770{
771 struct map *map = NULL;
772
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300773 if (he->mem_info)
774 map = he->mem_info->daddr.map;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100775
776 return _hist_entry__dso_snprintf(map, bf, size, width);
777}
778
779static int64_t
780sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
781{
782 union perf_mem_data_src data_src_l;
783 union perf_mem_data_src data_src_r;
784
785 if (left->mem_info)
786 data_src_l = left->mem_info->data_src;
787 else
788 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
789
790 if (right->mem_info)
791 data_src_r = right->mem_info->data_src;
792 else
793 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
794
795 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
796}
797
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300798static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100799 size_t size, unsigned int width)
800{
801 const char *out;
802 u64 mask = PERF_MEM_LOCK_NA;
803
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300804 if (he->mem_info)
805 mask = he->mem_info->data_src.mem_lock;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100806
807 if (mask & PERF_MEM_LOCK_NA)
808 out = "N/A";
809 else if (mask & PERF_MEM_LOCK_LOCKED)
810 out = "Yes";
811 else
812 out = "No";
813
814 return repsep_snprintf(bf, size, "%-*s", width, out);
815}
816
817static int64_t
818sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
819{
820 union perf_mem_data_src data_src_l;
821 union perf_mem_data_src data_src_r;
822
823 if (left->mem_info)
824 data_src_l = left->mem_info->data_src;
825 else
826 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
827
828 if (right->mem_info)
829 data_src_r = right->mem_info->data_src;
830 else
831 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
832
833 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
834}
835
836static const char * const tlb_access[] = {
837 "N/A",
838 "HIT",
839 "MISS",
840 "L1",
841 "L2",
842 "Walker",
843 "Fault",
844};
845#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
846
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300847static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100848 size_t size, unsigned int width)
849{
850 char out[64];
851 size_t sz = sizeof(out) - 1; /* -1 for null termination */
852 size_t l = 0, i;
853 u64 m = PERF_MEM_TLB_NA;
854 u64 hit, miss;
855
856 out[0] = '\0';
857
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300858 if (he->mem_info)
859 m = he->mem_info->data_src.mem_dtlb;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100860
861 hit = m & PERF_MEM_TLB_HIT;
862 miss = m & PERF_MEM_TLB_MISS;
863
864 /* already taken care of */
865 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
866
867 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
868 if (!(m & 0x1))
869 continue;
870 if (l) {
871 strcat(out, " or ");
872 l += 4;
873 }
874 strncat(out, tlb_access[i], sz - l);
875 l += strlen(tlb_access[i]);
876 }
877 if (*out == '\0')
878 strcpy(out, "N/A");
879 if (hit)
880 strncat(out, " hit", sz - l);
881 if (miss)
882 strncat(out, " miss", sz - l);
883
884 return repsep_snprintf(bf, size, "%-*s", width, out);
885}
886
887static int64_t
888sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
889{
890 union perf_mem_data_src data_src_l;
891 union perf_mem_data_src data_src_r;
892
893 if (left->mem_info)
894 data_src_l = left->mem_info->data_src;
895 else
896 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
897
898 if (right->mem_info)
899 data_src_r = right->mem_info->data_src;
900 else
901 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
902
903 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
904}
905
906static const char * const mem_lvl[] = {
907 "N/A",
908 "HIT",
909 "MISS",
910 "L1",
911 "LFB",
912 "L2",
913 "L3",
914 "Local RAM",
915 "Remote RAM (1 hop)",
916 "Remote RAM (2 hops)",
917 "Remote Cache (1 hop)",
918 "Remote Cache (2 hops)",
919 "I/O",
920 "Uncached",
921};
922#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
923
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300924static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100925 size_t size, unsigned int width)
926{
927 char out[64];
928 size_t sz = sizeof(out) - 1; /* -1 for null termination */
929 size_t i, l = 0;
930 u64 m = PERF_MEM_LVL_NA;
931 u64 hit, miss;
932
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300933 if (he->mem_info)
934 m = he->mem_info->data_src.mem_lvl;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100935
936 out[0] = '\0';
937
938 hit = m & PERF_MEM_LVL_HIT;
939 miss = m & PERF_MEM_LVL_MISS;
940
941 /* already taken care of */
942 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
943
944 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
945 if (!(m & 0x1))
946 continue;
947 if (l) {
948 strcat(out, " or ");
949 l += 4;
950 }
951 strncat(out, mem_lvl[i], sz - l);
952 l += strlen(mem_lvl[i]);
953 }
954 if (*out == '\0')
955 strcpy(out, "N/A");
956 if (hit)
957 strncat(out, " hit", sz - l);
958 if (miss)
959 strncat(out, " miss", sz - l);
960
961 return repsep_snprintf(bf, size, "%-*s", width, out);
962}
963
964static int64_t
965sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
966{
967 union perf_mem_data_src data_src_l;
968 union perf_mem_data_src data_src_r;
969
970 if (left->mem_info)
971 data_src_l = left->mem_info->data_src;
972 else
973 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
974
975 if (right->mem_info)
976 data_src_r = right->mem_info->data_src;
977 else
978 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
979
980 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
981}
982
983static const char * const snoop_access[] = {
984 "N/A",
985 "None",
986 "Miss",
987 "Hit",
988 "HitM",
989};
990#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
991
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300992static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100993 size_t size, unsigned int width)
994{
995 char out[64];
996 size_t sz = sizeof(out) - 1; /* -1 for null termination */
997 size_t i, l = 0;
998 u64 m = PERF_MEM_SNOOP_NA;
999
1000 out[0] = '\0';
1001
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001002 if (he->mem_info)
1003 m = he->mem_info->data_src.mem_snoop;
Stephane Eranian98a3b322013-01-24 16:10:35 +01001004
1005 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1006 if (!(m & 0x1))
1007 continue;
1008 if (l) {
1009 strcat(out, " or ");
1010 l += 4;
1011 }
1012 strncat(out, snoop_access[i], sz - l);
1013 l += strlen(snoop_access[i]);
1014 }
1015
1016 if (*out == '\0')
1017 strcpy(out, "N/A");
1018
1019 return repsep_snprintf(bf, size, "%-*s", width, out);
1020}
1021
Don Zickus9b32ba72014-06-01 15:38:29 +02001022static inline u64 cl_address(u64 address)
1023{
1024 /* return the cacheline of the address */
1025 return (address & ~(cacheline_size - 1));
1026}
1027
1028static int64_t
1029sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1030{
1031 u64 l, r;
1032 struct map *l_map, *r_map;
1033
1034 if (!left->mem_info) return -1;
1035 if (!right->mem_info) return 1;
1036
1037 /* group event types together */
1038 if (left->cpumode > right->cpumode) return -1;
1039 if (left->cpumode < right->cpumode) return 1;
1040
1041 l_map = left->mem_info->daddr.map;
1042 r_map = right->mem_info->daddr.map;
1043
1044 /* if both are NULL, jump to sort on al_addr instead */
1045 if (!l_map && !r_map)
1046 goto addr;
1047
1048 if (!l_map) return -1;
1049 if (!r_map) return 1;
1050
1051 if (l_map->maj > r_map->maj) return -1;
1052 if (l_map->maj < r_map->maj) return 1;
1053
1054 if (l_map->min > r_map->min) return -1;
1055 if (l_map->min < r_map->min) return 1;
1056
1057 if (l_map->ino > r_map->ino) return -1;
1058 if (l_map->ino < r_map->ino) return 1;
1059
1060 if (l_map->ino_generation > r_map->ino_generation) return -1;
1061 if (l_map->ino_generation < r_map->ino_generation) return 1;
1062
1063 /*
1064 * Addresses with no major/minor numbers are assumed to be
1065 * anonymous in userspace. Sort those on pid then address.
1066 *
1067 * The kernel and non-zero major/minor mapped areas are
1068 * assumed to be unity mapped. Sort those on address.
1069 */
1070
1071 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1072 (!(l_map->flags & MAP_SHARED)) &&
1073 !l_map->maj && !l_map->min && !l_map->ino &&
1074 !l_map->ino_generation) {
1075 /* userspace anonymous */
1076
1077 if (left->thread->pid_ > right->thread->pid_) return -1;
1078 if (left->thread->pid_ < right->thread->pid_) return 1;
1079 }
1080
1081addr:
1082 /* al_addr does all the right addr - start + offset calculations */
1083 l = cl_address(left->mem_info->daddr.al_addr);
1084 r = cl_address(right->mem_info->daddr.al_addr);
1085
1086 if (l > r) return -1;
1087 if (l < r) return 1;
1088
1089 return 0;
1090}
1091
1092static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1093 size_t size, unsigned int width)
1094{
1095
1096 uint64_t addr = 0;
1097 struct map *map = NULL;
1098 struct symbol *sym = NULL;
1099 char level = he->level;
1100
1101 if (he->mem_info) {
1102 addr = cl_address(he->mem_info->daddr.al_addr);
1103 map = he->mem_info->daddr.map;
1104 sym = he->mem_info->daddr.sym;
1105
1106 /* print [s] for shared data mmaps */
1107 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1108 map && (map->type == MAP__VARIABLE) &&
1109 (map->flags & MAP_SHARED) &&
1110 (map->maj || map->min || map->ino ||
1111 map->ino_generation))
1112 level = 's';
1113 else if (!map)
1114 level = 'X';
1115 }
1116 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1117 width);
1118}
1119
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001120struct sort_entry sort_mispredict = {
1121 .se_header = "Branch Mispredicted",
1122 .se_cmp = sort__mispredict_cmp,
1123 .se_snprintf = hist_entry__mispredict_snprintf,
1124 .se_width_idx = HISTC_MISPREDICT,
1125};
1126
Andi Kleen05484292013-01-24 16:10:29 +01001127static u64 he_weight(struct hist_entry *he)
1128{
1129 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1130}
1131
1132static int64_t
1133sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1134{
1135 return he_weight(left) - he_weight(right);
1136}
1137
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001138static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
Andi Kleen05484292013-01-24 16:10:29 +01001139 size_t size, unsigned int width)
1140{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001141 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
Andi Kleen05484292013-01-24 16:10:29 +01001142}
1143
1144struct sort_entry sort_local_weight = {
1145 .se_header = "Local Weight",
1146 .se_cmp = sort__local_weight_cmp,
1147 .se_snprintf = hist_entry__local_weight_snprintf,
1148 .se_width_idx = HISTC_LOCAL_WEIGHT,
1149};
1150
1151static int64_t
1152sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1153{
1154 return left->stat.weight - right->stat.weight;
1155}
1156
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001157static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
Andi Kleen05484292013-01-24 16:10:29 +01001158 size_t size, unsigned int width)
1159{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001160 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
Andi Kleen05484292013-01-24 16:10:29 +01001161}
1162
1163struct sort_entry sort_global_weight = {
1164 .se_header = "Weight",
1165 .se_cmp = sort__global_weight_cmp,
1166 .se_snprintf = hist_entry__global_weight_snprintf,
1167 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1168};
1169
Stephane Eranian98a3b322013-01-24 16:10:35 +01001170struct sort_entry sort_mem_daddr_sym = {
1171 .se_header = "Data Symbol",
1172 .se_cmp = sort__daddr_cmp,
1173 .se_snprintf = hist_entry__daddr_snprintf,
1174 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1175};
1176
Don Zickus28e6db22015-10-05 20:06:07 +02001177struct sort_entry sort_mem_iaddr_sym = {
1178 .se_header = "Code Symbol",
1179 .se_cmp = sort__iaddr_cmp,
1180 .se_snprintf = hist_entry__iaddr_snprintf,
1181 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1182};
1183
Stephane Eranian98a3b322013-01-24 16:10:35 +01001184struct sort_entry sort_mem_daddr_dso = {
1185 .se_header = "Data Object",
1186 .se_cmp = sort__dso_daddr_cmp,
1187 .se_snprintf = hist_entry__dso_daddr_snprintf,
1188 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1189};
1190
1191struct sort_entry sort_mem_locked = {
1192 .se_header = "Locked",
1193 .se_cmp = sort__locked_cmp,
1194 .se_snprintf = hist_entry__locked_snprintf,
1195 .se_width_idx = HISTC_MEM_LOCKED,
1196};
1197
1198struct sort_entry sort_mem_tlb = {
1199 .se_header = "TLB access",
1200 .se_cmp = sort__tlb_cmp,
1201 .se_snprintf = hist_entry__tlb_snprintf,
1202 .se_width_idx = HISTC_MEM_TLB,
1203};
1204
1205struct sort_entry sort_mem_lvl = {
1206 .se_header = "Memory access",
1207 .se_cmp = sort__lvl_cmp,
1208 .se_snprintf = hist_entry__lvl_snprintf,
1209 .se_width_idx = HISTC_MEM_LVL,
1210};
1211
1212struct sort_entry sort_mem_snoop = {
1213 .se_header = "Snoop",
1214 .se_cmp = sort__snoop_cmp,
1215 .se_snprintf = hist_entry__snoop_snprintf,
1216 .se_width_idx = HISTC_MEM_SNOOP,
1217};
1218
Don Zickus9b32ba72014-06-01 15:38:29 +02001219struct sort_entry sort_mem_dcacheline = {
1220 .se_header = "Data Cacheline",
1221 .se_cmp = sort__dcacheline_cmp,
1222 .se_snprintf = hist_entry__dcacheline_snprintf,
1223 .se_width_idx = HISTC_MEM_DCACHELINE,
1224};
1225
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001226static int64_t
1227sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1228{
Jiri Olsa49f47442014-10-16 16:07:01 +02001229 if (!left->branch_info || !right->branch_info)
1230 return cmp_null(left->branch_info, right->branch_info);
1231
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001232 return left->branch_info->flags.abort !=
1233 right->branch_info->flags.abort;
1234}
1235
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001236static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001237 size_t size, unsigned int width)
1238{
Jiri Olsa49f47442014-10-16 16:07:01 +02001239 static const char *out = "N/A";
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001240
Jiri Olsa49f47442014-10-16 16:07:01 +02001241 if (he->branch_info) {
1242 if (he->branch_info->flags.abort)
1243 out = "A";
1244 else
1245 out = ".";
1246 }
1247
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001248 return repsep_snprintf(bf, size, "%-*s", width, out);
1249}
1250
1251struct sort_entry sort_abort = {
1252 .se_header = "Transaction abort",
1253 .se_cmp = sort__abort_cmp,
1254 .se_snprintf = hist_entry__abort_snprintf,
1255 .se_width_idx = HISTC_ABORT,
1256};
1257
1258static int64_t
1259sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1260{
Jiri Olsa0199d242014-10-16 16:07:02 +02001261 if (!left->branch_info || !right->branch_info)
1262 return cmp_null(left->branch_info, right->branch_info);
1263
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001264 return left->branch_info->flags.in_tx !=
1265 right->branch_info->flags.in_tx;
1266}
1267
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001268static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001269 size_t size, unsigned int width)
1270{
Jiri Olsa0199d242014-10-16 16:07:02 +02001271 static const char *out = "N/A";
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001272
Jiri Olsa0199d242014-10-16 16:07:02 +02001273 if (he->branch_info) {
1274 if (he->branch_info->flags.in_tx)
1275 out = "T";
1276 else
1277 out = ".";
1278 }
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001279
1280 return repsep_snprintf(bf, size, "%-*s", width, out);
1281}
1282
1283struct sort_entry sort_in_tx = {
1284 .se_header = "Branch in transaction",
1285 .se_cmp = sort__in_tx_cmp,
1286 .se_snprintf = hist_entry__in_tx_snprintf,
1287 .se_width_idx = HISTC_IN_TX,
1288};
1289
Andi Kleen475eeab2013-09-20 07:40:43 -07001290static int64_t
1291sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1292{
1293 return left->transaction - right->transaction;
1294}
1295
1296static inline char *add_str(char *p, const char *str)
1297{
1298 strcpy(p, str);
1299 return p + strlen(str);
1300}
1301
1302static struct txbit {
1303 unsigned flag;
1304 const char *name;
1305 int skip_for_len;
1306} txbits[] = {
1307 { PERF_TXN_ELISION, "EL ", 0 },
1308 { PERF_TXN_TRANSACTION, "TX ", 1 },
1309 { PERF_TXN_SYNC, "SYNC ", 1 },
1310 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1311 { PERF_TXN_RETRY, "RETRY ", 0 },
1312 { PERF_TXN_CONFLICT, "CON ", 0 },
1313 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1314 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1315 { 0, NULL, 0 }
1316};
1317
1318int hist_entry__transaction_len(void)
1319{
1320 int i;
1321 int len = 0;
1322
1323 for (i = 0; txbits[i].name; i++) {
1324 if (!txbits[i].skip_for_len)
1325 len += strlen(txbits[i].name);
1326 }
1327 len += 4; /* :XX<space> */
1328 return len;
1329}
1330
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001331static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
Andi Kleen475eeab2013-09-20 07:40:43 -07001332 size_t size, unsigned int width)
1333{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001334 u64 t = he->transaction;
Andi Kleen475eeab2013-09-20 07:40:43 -07001335 char buf[128];
1336 char *p = buf;
1337 int i;
1338
1339 buf[0] = 0;
1340 for (i = 0; txbits[i].name; i++)
1341 if (txbits[i].flag & t)
1342 p = add_str(p, txbits[i].name);
1343 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1344 p = add_str(p, "NEITHER ");
1345 if (t & PERF_TXN_ABORT_MASK) {
1346 sprintf(p, ":%" PRIx64,
1347 (t & PERF_TXN_ABORT_MASK) >>
1348 PERF_TXN_ABORT_SHIFT);
1349 p += strlen(p);
1350 }
1351
1352 return repsep_snprintf(bf, size, "%-*s", width, buf);
1353}
1354
1355struct sort_entry sort_transaction = {
1356 .se_header = "Transaction ",
1357 .se_cmp = sort__transaction_cmp,
1358 .se_snprintf = hist_entry__transaction_snprintf,
1359 .se_width_idx = HISTC_TRANSACTION,
1360};
1361
Frederic Weisbecker872a8782011-06-29 03:14:52 +02001362struct sort_dimension {
1363 const char *name;
1364 struct sort_entry *entry;
1365 int taken;
1366};
1367
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001368#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1369
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001370static struct sort_dimension common_sort_dimensions[] = {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001371 DIM(SORT_PID, "pid", sort_thread),
1372 DIM(SORT_COMM, "comm", sort_comm),
1373 DIM(SORT_DSO, "dso", sort_dso),
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001374 DIM(SORT_SYM, "symbol", sort_sym),
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001375 DIM(SORT_PARENT, "parent", sort_parent),
1376 DIM(SORT_CPU, "cpu", sort_cpu),
Kan Liang2e7ea3a2015-09-04 10:45:43 -04001377 DIM(SORT_SOCKET, "socket", sort_socket),
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -03001378 DIM(SORT_SRCLINE, "srcline", sort_srcline),
Andi Kleen31191a82015-08-07 15:54:24 -07001379 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
Andi Kleenf9ea55d2013-07-18 15:58:53 -07001380 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1381 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
Andi Kleen475eeab2013-09-20 07:40:43 -07001382 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
Namhyung Kima34bb6a02015-12-23 02:07:04 +09001383 DIM(SORT_TRACE, "trace", sort_trace),
Frederic Weisbecker872a8782011-06-29 03:14:52 +02001384};
1385
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001386#undef DIM
1387
1388#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1389
1390static struct sort_dimension bstack_sort_dimensions[] = {
1391 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1392 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1393 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1394 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1395 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001396 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1397 DIM(SORT_ABORT, "abort", sort_abort),
Andi Kleen0e332f02015-07-18 08:24:46 -07001398 DIM(SORT_CYCLES, "cycles", sort_cycles),
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001399};
1400
1401#undef DIM
1402
Namhyung Kimafab87b2013-04-03 21:26:11 +09001403#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1404
1405static struct sort_dimension memory_sort_dimensions[] = {
Namhyung Kimafab87b2013-04-03 21:26:11 +09001406 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
Don Zickus28e6db22015-10-05 20:06:07 +02001407 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
Namhyung Kimafab87b2013-04-03 21:26:11 +09001408 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1409 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1410 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1411 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1412 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
Don Zickus9b32ba72014-06-01 15:38:29 +02001413 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
Namhyung Kimafab87b2013-04-03 21:26:11 +09001414};
1415
1416#undef DIM
1417
Namhyung Kima2ce0672014-03-04 09:06:42 +09001418struct hpp_dimension {
1419 const char *name;
1420 struct perf_hpp_fmt *fmt;
1421 int taken;
1422};
1423
1424#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1425
1426static struct hpp_dimension hpp_sort_dimensions[] = {
1427 DIM(PERF_HPP__OVERHEAD, "overhead"),
1428 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1429 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1430 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1431 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
Namhyung Kim594dcbf2013-10-30 16:06:59 +09001432 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
Namhyung Kima2ce0672014-03-04 09:06:42 +09001433 DIM(PERF_HPP__SAMPLES, "sample"),
1434 DIM(PERF_HPP__PERIOD, "period"),
1435};
1436
1437#undef DIM
1438
Namhyung Kim8b536992014-03-03 11:46:55 +09001439struct hpp_sort_entry {
1440 struct perf_hpp_fmt hpp;
1441 struct sort_entry *se;
1442};
1443
Namhyung Kime0d66c72014-07-31 14:47:37 +09001444void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
Namhyung Kim678a5002014-03-20 11:18:54 +09001445{
1446 struct hpp_sort_entry *hse;
1447
1448 if (!perf_hpp__is_sort_entry(fmt))
1449 return;
1450
1451 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001452 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
Namhyung Kim678a5002014-03-20 11:18:54 +09001453}
1454
Namhyung Kim8b536992014-03-03 11:46:55 +09001455static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1456 struct perf_evsel *evsel)
1457{
1458 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001459 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001460
1461 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim8b536992014-03-03 11:46:55 +09001462
Namhyung Kim5b591662014-07-31 14:47:38 +09001463 if (!len)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -03001464 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
Namhyung Kim5b591662014-07-31 14:47:38 +09001465
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001466 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
Namhyung Kim8b536992014-03-03 11:46:55 +09001467}
1468
1469static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1470 struct perf_hpp *hpp __maybe_unused,
1471 struct perf_evsel *evsel)
1472{
1473 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001474 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001475
1476 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1477
Namhyung Kim5b591662014-07-31 14:47:38 +09001478 if (!len)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -03001479 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
Namhyung Kim5b591662014-07-31 14:47:38 +09001480
1481 return len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001482}
1483
1484static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1485 struct hist_entry *he)
1486{
1487 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001488 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001489
1490 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim5b591662014-07-31 14:47:38 +09001491
1492 if (!len)
1493 len = hists__col_len(he->hists, hse->se->se_width_idx);
Namhyung Kim8b536992014-03-03 11:46:55 +09001494
1495 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1496}
1497
Namhyung Kim87bbdf72015-01-08 09:45:46 +09001498static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1499 struct hist_entry *a, struct hist_entry *b)
1500{
1501 struct hpp_sort_entry *hse;
1502
1503 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1504 return hse->se->se_cmp(a, b);
1505}
1506
1507static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1508 struct hist_entry *a, struct hist_entry *b)
1509{
1510 struct hpp_sort_entry *hse;
1511 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1512
1513 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1514 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1515 return collapse_fn(a, b);
1516}
1517
1518static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1519 struct hist_entry *a, struct hist_entry *b)
1520{
1521 struct hpp_sort_entry *hse;
1522 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1523
1524 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1525 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1526 return sort_fn(a, b);
1527}
1528
Jiri Olsa97358082016-01-18 10:24:03 +01001529bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1530{
1531 return format->header == __sort__hpp_header;
1532}
1533
1534static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1535{
1536 struct hpp_sort_entry *hse_a;
1537 struct hpp_sort_entry *hse_b;
1538
1539 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1540 return false;
1541
1542 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1543 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1544
1545 return hse_a->se == hse_b->se;
1546}
1547
Jiri Olsa564132f2016-01-18 10:24:09 +01001548static void hse_free(struct perf_hpp_fmt *fmt)
1549{
1550 struct hpp_sort_entry *hse;
1551
1552 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1553 free(hse);
1554}
1555
Namhyung Kima7d945b2014-03-04 10:46:34 +09001556static struct hpp_sort_entry *
1557__sort_dimension__alloc_hpp(struct sort_dimension *sd)
Namhyung Kim8b536992014-03-03 11:46:55 +09001558{
1559 struct hpp_sort_entry *hse;
1560
1561 hse = malloc(sizeof(*hse));
1562 if (hse == NULL) {
1563 pr_err("Memory allocation failed\n");
Namhyung Kima7d945b2014-03-04 10:46:34 +09001564 return NULL;
Namhyung Kim8b536992014-03-03 11:46:55 +09001565 }
1566
1567 hse->se = sd->entry;
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001568 hse->hpp.name = sd->entry->se_header;
Namhyung Kim8b536992014-03-03 11:46:55 +09001569 hse->hpp.header = __sort__hpp_header;
1570 hse->hpp.width = __sort__hpp_width;
1571 hse->hpp.entry = __sort__hpp_entry;
1572 hse->hpp.color = NULL;
1573
Namhyung Kim87bbdf72015-01-08 09:45:46 +09001574 hse->hpp.cmp = __sort__hpp_cmp;
1575 hse->hpp.collapse = __sort__hpp_collapse;
1576 hse->hpp.sort = __sort__hpp_sort;
Jiri Olsa97358082016-01-18 10:24:03 +01001577 hse->hpp.equal = __sort__hpp_equal;
Jiri Olsa564132f2016-01-18 10:24:09 +01001578 hse->hpp.free = hse_free;
Namhyung Kim8b536992014-03-03 11:46:55 +09001579
1580 INIT_LIST_HEAD(&hse->hpp.list);
1581 INIT_LIST_HEAD(&hse->hpp.sort_list);
Jiri Olsaf2998422014-05-23 17:15:47 +02001582 hse->hpp.elide = false;
Namhyung Kime0d66c72014-07-31 14:47:37 +09001583 hse->hpp.len = 0;
Namhyung Kim5b591662014-07-31 14:47:38 +09001584 hse->hpp.user_len = 0;
Namhyung Kim8b536992014-03-03 11:46:55 +09001585
Namhyung Kima7d945b2014-03-04 10:46:34 +09001586 return hse;
1587}
1588
Jiri Olsa564132f2016-01-18 10:24:09 +01001589static void hpp_free(struct perf_hpp_fmt *fmt)
1590{
1591 free(fmt);
1592}
1593
Jiri Olsa1945c3e2016-01-18 10:24:07 +01001594static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
1595{
1596 struct perf_hpp_fmt *fmt;
1597
1598 fmt = memdup(hd->fmt, sizeof(*fmt));
1599 if (fmt) {
1600 INIT_LIST_HEAD(&fmt->list);
1601 INIT_LIST_HEAD(&fmt->sort_list);
Jiri Olsa564132f2016-01-18 10:24:09 +01001602 fmt->free = hpp_free;
Jiri Olsa1945c3e2016-01-18 10:24:07 +01001603 }
1604
1605 return fmt;
1606}
1607
Namhyung Kima7d945b2014-03-04 10:46:34 +09001608static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1609{
1610 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1611
1612 if (hse == NULL)
1613 return -1;
1614
Namhyung Kim8b536992014-03-03 11:46:55 +09001615 perf_hpp__register_sort_field(&hse->hpp);
1616 return 0;
1617}
1618
Jiri Olsa07600022016-01-18 10:24:16 +01001619static int __sort_dimension__add_hpp_output(struct perf_hpp_list *list,
1620 struct sort_dimension *sd)
Namhyung Kima7d945b2014-03-04 10:46:34 +09001621{
1622 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1623
1624 if (hse == NULL)
1625 return -1;
1626
Jiri Olsa07600022016-01-18 10:24:16 +01001627 perf_hpp_list__column_register(list, &hse->hpp);
Namhyung Kima7d945b2014-03-04 10:46:34 +09001628 return 0;
1629}
1630
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001631struct hpp_dynamic_entry {
1632 struct perf_hpp_fmt hpp;
1633 struct perf_evsel *evsel;
1634 struct format_field *field;
1635 unsigned dynamic_len;
Namhyung Kim053a3982015-12-23 02:07:05 +09001636 bool raw_trace;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001637};
1638
1639static int hde_width(struct hpp_dynamic_entry *hde)
1640{
1641 if (!hde->hpp.len) {
1642 int len = hde->dynamic_len;
1643 int namelen = strlen(hde->field->name);
1644 int fieldlen = hde->field->size;
1645
1646 if (namelen > len)
1647 len = namelen;
1648
1649 if (!(hde->field->flags & FIELD_IS_STRING)) {
1650 /* length for print hex numbers */
1651 fieldlen = hde->field->size * 2 + 2;
1652 }
1653 if (fieldlen > len)
1654 len = fieldlen;
1655
1656 hde->hpp.len = len;
1657 }
1658 return hde->hpp.len;
1659}
1660
Namhyung Kim60517d22015-12-23 02:07:03 +09001661static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1662 struct hist_entry *he)
1663{
1664 char *str, *pos;
1665 struct format_field *field = hde->field;
1666 size_t namelen;
1667 bool last = false;
1668
Namhyung Kim053a3982015-12-23 02:07:05 +09001669 if (hde->raw_trace)
1670 return;
1671
Namhyung Kim60517d22015-12-23 02:07:03 +09001672 /* parse pretty print result and update max length */
1673 if (!he->trace_output)
1674 he->trace_output = get_trace_output(he);
1675
1676 namelen = strlen(field->name);
1677 str = he->trace_output;
1678
1679 while (str) {
1680 pos = strchr(str, ' ');
1681 if (pos == NULL) {
1682 last = true;
1683 pos = str + strlen(str);
1684 }
1685
1686 if (!strncmp(str, field->name, namelen)) {
1687 size_t len;
1688
1689 str += namelen + 1;
1690 len = pos - str;
1691
1692 if (len > hde->dynamic_len)
1693 hde->dynamic_len = len;
1694 break;
1695 }
1696
1697 if (last)
1698 str = NULL;
1699 else
1700 str = pos + 1;
1701 }
1702}
1703
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001704static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1705 struct perf_evsel *evsel __maybe_unused)
1706{
1707 struct hpp_dynamic_entry *hde;
1708 size_t len = fmt->user_len;
1709
1710 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1711
1712 if (!len)
1713 len = hde_width(hde);
1714
1715 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1716}
1717
1718static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1719 struct perf_hpp *hpp __maybe_unused,
1720 struct perf_evsel *evsel __maybe_unused)
1721{
1722 struct hpp_dynamic_entry *hde;
1723 size_t len = fmt->user_len;
1724
1725 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1726
1727 if (!len)
1728 len = hde_width(hde);
1729
1730 return len;
1731}
1732
Namhyung Kim361459f2015-12-23 02:07:08 +09001733bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1734{
1735 struct hpp_dynamic_entry *hde;
1736
1737 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1738
1739 return hists_to_evsel(hists) == hde->evsel;
1740}
1741
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001742static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1743 struct hist_entry *he)
1744{
1745 struct hpp_dynamic_entry *hde;
1746 size_t len = fmt->user_len;
Namhyung Kim60517d22015-12-23 02:07:03 +09001747 char *str, *pos;
1748 struct format_field *field;
1749 size_t namelen;
1750 bool last = false;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001751 int ret;
1752
1753 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1754
1755 if (!len)
1756 len = hde_width(hde);
1757
Namhyung Kim053a3982015-12-23 02:07:05 +09001758 if (hde->raw_trace)
1759 goto raw_field;
Namhyung Kim60517d22015-12-23 02:07:03 +09001760
Namhyung Kim053a3982015-12-23 02:07:05 +09001761 field = hde->field;
Namhyung Kim60517d22015-12-23 02:07:03 +09001762 namelen = strlen(field->name);
1763 str = he->trace_output;
1764
1765 while (str) {
1766 pos = strchr(str, ' ');
1767 if (pos == NULL) {
1768 last = true;
1769 pos = str + strlen(str);
1770 }
1771
1772 if (!strncmp(str, field->name, namelen)) {
1773 str += namelen + 1;
1774 str = strndup(str, pos - str);
1775
1776 if (str == NULL)
1777 return scnprintf(hpp->buf, hpp->size,
1778 "%*.*s", len, len, "ERROR");
1779 break;
1780 }
1781
1782 if (last)
1783 str = NULL;
1784 else
1785 str = pos + 1;
1786 }
1787
1788 if (str == NULL) {
1789 struct trace_seq seq;
Namhyung Kim053a3982015-12-23 02:07:05 +09001790raw_field:
Namhyung Kim60517d22015-12-23 02:07:03 +09001791 trace_seq_init(&seq);
1792 pevent_print_field(&seq, he->raw_data, hde->field);
1793 str = seq.buffer;
1794 }
1795
1796 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1797 free(str);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001798 return ret;
1799}
1800
1801static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1802 struct hist_entry *a, struct hist_entry *b)
1803{
1804 struct hpp_dynamic_entry *hde;
1805 struct format_field *field;
1806 unsigned offset, size;
1807
1808 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1809
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001810 field = hde->field;
1811 if (field->flags & FIELD_IS_DYNAMIC) {
1812 unsigned long long dyn;
1813
1814 pevent_read_number_field(field, a->raw_data, &dyn);
1815 offset = dyn & 0xffff;
1816 size = (dyn >> 16) & 0xffff;
1817
1818 /* record max width for output */
1819 if (size > hde->dynamic_len)
1820 hde->dynamic_len = size;
1821 } else {
1822 offset = field->offset;
1823 size = field->size;
Namhyung Kim60517d22015-12-23 02:07:03 +09001824
1825 update_dynamic_len(hde, a);
1826 update_dynamic_len(hde, b);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001827 }
1828
1829 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1830}
1831
Namhyung Kim361459f2015-12-23 02:07:08 +09001832bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1833{
1834 return fmt->cmp == __sort__hde_cmp;
1835}
1836
Jiri Olsa564132f2016-01-18 10:24:09 +01001837static void hde_free(struct perf_hpp_fmt *fmt)
1838{
1839 struct hpp_dynamic_entry *hde;
1840
1841 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1842 free(hde);
1843}
1844
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001845static struct hpp_dynamic_entry *
1846__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1847{
1848 struct hpp_dynamic_entry *hde;
1849
1850 hde = malloc(sizeof(*hde));
1851 if (hde == NULL) {
1852 pr_debug("Memory allocation failed\n");
1853 return NULL;
1854 }
1855
1856 hde->evsel = evsel;
1857 hde->field = field;
1858 hde->dynamic_len = 0;
1859
1860 hde->hpp.name = field->name;
1861 hde->hpp.header = __sort__hde_header;
1862 hde->hpp.width = __sort__hde_width;
1863 hde->hpp.entry = __sort__hde_entry;
1864 hde->hpp.color = NULL;
1865
1866 hde->hpp.cmp = __sort__hde_cmp;
1867 hde->hpp.collapse = __sort__hde_cmp;
1868 hde->hpp.sort = __sort__hde_cmp;
Jiri Olsa564132f2016-01-18 10:24:09 +01001869 hde->hpp.free = hde_free;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001870
1871 INIT_LIST_HEAD(&hde->hpp.list);
1872 INIT_LIST_HEAD(&hde->hpp.sort_list);
1873 hde->hpp.elide = false;
1874 hde->hpp.len = 0;
1875 hde->hpp.user_len = 0;
1876
1877 return hde;
1878}
1879
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001880static int parse_field_name(char *str, char **event, char **field, char **opt)
1881{
1882 char *event_name, *field_name, *opt_name;
1883
1884 event_name = str;
1885 field_name = strchr(str, '.');
1886
1887 if (field_name) {
1888 *field_name++ = '\0';
1889 } else {
1890 event_name = NULL;
1891 field_name = str;
1892 }
1893
1894 opt_name = strchr(field_name, '/');
1895 if (opt_name)
1896 *opt_name++ = '\0';
1897
1898 *event = event_name;
1899 *field = field_name;
1900 *opt = opt_name;
1901
1902 return 0;
1903}
1904
1905/* find match evsel using a given event name. The event name can be:
Namhyung Kim9735be22016-01-05 19:58:35 +09001906 * 1. '%' + event index (e.g. '%1' for first event)
1907 * 2. full event name (e.g. sched:sched_switch)
1908 * 3. partial event name (should not contain ':')
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001909 */
1910static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1911{
1912 struct perf_evsel *evsel = NULL;
1913 struct perf_evsel *pos;
1914 bool full_name;
1915
1916 /* case 1 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001917 if (event_name[0] == '%') {
1918 int nr = strtol(event_name+1, NULL, 0);
1919
1920 if (nr > evlist->nr_entries)
1921 return NULL;
1922
1923 evsel = perf_evlist__first(evlist);
1924 while (--nr > 0)
1925 evsel = perf_evsel__next(evsel);
1926
1927 return evsel;
1928 }
1929
1930 full_name = !!strchr(event_name, ':');
1931 evlist__for_each(evlist, pos) {
Namhyung Kim9735be22016-01-05 19:58:35 +09001932 /* case 2 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001933 if (full_name && !strcmp(pos->name, event_name))
1934 return pos;
Namhyung Kim9735be22016-01-05 19:58:35 +09001935 /* case 3 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001936 if (!full_name && strstr(pos->name, event_name)) {
1937 if (evsel) {
1938 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1939 event_name, evsel->name, pos->name);
1940 return NULL;
1941 }
1942 evsel = pos;
1943 }
1944 }
1945
1946 return evsel;
1947}
1948
Namhyung Kim3b099bf52015-12-23 02:07:07 +09001949static int __dynamic_dimension__add(struct perf_evsel *evsel,
1950 struct format_field *field,
1951 bool raw_trace)
1952{
1953 struct hpp_dynamic_entry *hde;
1954
1955 hde = __alloc_dynamic_entry(evsel, field);
1956 if (hde == NULL)
1957 return -ENOMEM;
1958
1959 hde->raw_trace = raw_trace;
1960
1961 perf_hpp__register_sort_field(&hde->hpp);
1962 return 0;
1963}
1964
Namhyung Kim2e422fd2015-12-23 02:07:09 +09001965static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1966{
1967 int ret;
1968 struct format_field *field;
1969
1970 field = evsel->tp_format->format.fields;
1971 while (field) {
1972 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1973 if (ret < 0)
1974 return ret;
1975
1976 field = field->next;
1977 }
1978 return 0;
1979}
1980
1981static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1982{
1983 int ret;
1984 struct perf_evsel *evsel;
1985
1986 evlist__for_each(evlist, evsel) {
1987 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1988 continue;
1989
1990 ret = add_evsel_fields(evsel, raw_trace);
1991 if (ret < 0)
1992 return ret;
1993 }
1994 return 0;
1995}
1996
Namhyung Kim9735be22016-01-05 19:58:35 +09001997static int add_all_matching_fields(struct perf_evlist *evlist,
1998 char *field_name, bool raw_trace)
1999{
2000 int ret = -ESRCH;
2001 struct perf_evsel *evsel;
2002 struct format_field *field;
2003
2004 evlist__for_each(evlist, evsel) {
2005 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2006 continue;
2007
2008 field = pevent_find_any_field(evsel->tp_format, field_name);
2009 if (field == NULL)
2010 continue;
2011
2012 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2013 if (ret < 0)
2014 break;
2015 }
2016 return ret;
2017}
2018
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002019static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
2020{
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002021 char *str, *event_name, *field_name, *opt_name;
2022 struct perf_evsel *evsel;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002023 struct format_field *field;
Namhyung Kim053a3982015-12-23 02:07:05 +09002024 bool raw_trace = symbol_conf.raw_trace;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002025 int ret = 0;
2026
2027 if (evlist == NULL)
2028 return -ENOENT;
2029
2030 str = strdup(tok);
2031 if (str == NULL)
2032 return -ENOMEM;
2033
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002034 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002035 ret = -EINVAL;
2036 goto out;
2037 }
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002038
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002039 if (opt_name) {
2040 if (strcmp(opt_name, "raw")) {
2041 pr_debug("unsupported field option %s\n", opt_name);
Namhyung Kim053a3982015-12-23 02:07:05 +09002042 ret = -EINVAL;
2043 goto out;
2044 }
2045 raw_trace = true;
2046 }
2047
Namhyung Kim2e422fd2015-12-23 02:07:09 +09002048 if (!strcmp(field_name, "trace_fields")) {
2049 ret = add_all_dynamic_fields(evlist, raw_trace);
2050 goto out;
2051 }
2052
Namhyung Kim9735be22016-01-05 19:58:35 +09002053 if (event_name == NULL) {
2054 ret = add_all_matching_fields(evlist, field_name, raw_trace);
2055 goto out;
2056 }
2057
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002058 evsel = find_evsel(evlist, event_name);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002059 if (evsel == NULL) {
2060 pr_debug("Cannot find event: %s\n", event_name);
2061 ret = -ENOENT;
2062 goto out;
2063 }
2064
2065 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2066 pr_debug("%s is not a tracepoint event\n", event_name);
2067 ret = -EINVAL;
2068 goto out;
2069 }
2070
Namhyung Kim3b099bf52015-12-23 02:07:07 +09002071 if (!strcmp(field_name, "*")) {
Namhyung Kim2e422fd2015-12-23 02:07:09 +09002072 ret = add_evsel_fields(evsel, raw_trace);
Namhyung Kim3b099bf52015-12-23 02:07:07 +09002073 } else {
2074 field = pevent_find_any_field(evsel->tp_format, field_name);
2075 if (field == NULL) {
2076 pr_debug("Cannot find event field for %s.%s\n",
2077 event_name, field_name);
2078 return -ENOENT;
2079 }
2080
2081 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2082 }
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002083
2084out:
2085 free(str);
2086 return ret;
2087}
2088
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002089static int __sort_dimension__add(struct sort_dimension *sd)
Namhyung Kim2f532d02013-04-03 21:26:10 +09002090{
2091 if (sd->taken)
Namhyung Kim8b536992014-03-03 11:46:55 +09002092 return 0;
2093
Namhyung Kima7d945b2014-03-04 10:46:34 +09002094 if (__sort_dimension__add_hpp_sort(sd) < 0)
Namhyung Kim8b536992014-03-03 11:46:55 +09002095 return -1;
Namhyung Kim2f532d02013-04-03 21:26:10 +09002096
2097 if (sd->entry->se_collapse)
2098 sort__need_collapse = 1;
2099
Namhyung Kim2f532d02013-04-03 21:26:10 +09002100 sd->taken = 1;
Namhyung Kim8b536992014-03-03 11:46:55 +09002101
2102 return 0;
Namhyung Kim2f532d02013-04-03 21:26:10 +09002103}
2104
Namhyung Kima2ce0672014-03-04 09:06:42 +09002105static int __hpp_dimension__add(struct hpp_dimension *hd)
2106{
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002107 struct perf_hpp_fmt *fmt;
Namhyung Kima2ce0672014-03-04 09:06:42 +09002108
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002109 if (hd->taken)
2110 return 0;
2111
2112 fmt = __hpp_dimension__alloc_hpp(hd);
2113 if (!fmt)
2114 return -1;
2115
2116 hd->taken = 1;
2117 perf_hpp__register_sort_field(fmt);
Namhyung Kima2ce0672014-03-04 09:06:42 +09002118 return 0;
2119}
2120
Jiri Olsa07600022016-01-18 10:24:16 +01002121static int __sort_dimension__add_output(struct perf_hpp_list *list,
2122 struct sort_dimension *sd)
Namhyung Kima7d945b2014-03-04 10:46:34 +09002123{
2124 if (sd->taken)
2125 return 0;
2126
Jiri Olsa07600022016-01-18 10:24:16 +01002127 if (__sort_dimension__add_hpp_output(list, sd) < 0)
Namhyung Kima7d945b2014-03-04 10:46:34 +09002128 return -1;
2129
2130 sd->taken = 1;
2131 return 0;
2132}
2133
Jiri Olsa07600022016-01-18 10:24:16 +01002134static int __hpp_dimension__add_output(struct perf_hpp_list *list,
2135 struct hpp_dimension *hd)
Namhyung Kima7d945b2014-03-04 10:46:34 +09002136{
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002137 struct perf_hpp_fmt *fmt;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002138
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002139 if (hd->taken)
2140 return 0;
2141
2142 fmt = __hpp_dimension__alloc_hpp(hd);
2143 if (!fmt)
2144 return -1;
2145
2146 hd->taken = 1;
Jiri Olsa07600022016-01-18 10:24:16 +01002147 perf_hpp_list__column_register(list, fmt);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002148 return 0;
2149}
2150
Jiri Olsabeeaaeb2015-10-06 14:25:11 +02002151int hpp_dimension__add_output(unsigned col)
2152{
2153 BUG_ON(col >= PERF_HPP__MAX_INDEX);
Jiri Olsa07600022016-01-18 10:24:16 +01002154 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
Jiri Olsabeeaaeb2015-10-06 14:25:11 +02002155}
2156
Namhyung Kim40184c42015-12-23 02:07:01 +09002157static int sort_dimension__add(const char *tok,
2158 struct perf_evlist *evlist __maybe_unused)
John Kacurdd68ada2009-09-24 18:02:49 +02002159{
2160 unsigned int i;
2161
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002162 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2163 struct sort_dimension *sd = &common_sort_dimensions[i];
John Kacurdd68ada2009-09-24 18:02:49 +02002164
John Kacurdd68ada2009-09-24 18:02:49 +02002165 if (strncasecmp(tok, sd->name, strlen(tok)))
2166 continue;
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002167
John Kacurdd68ada2009-09-24 18:02:49 +02002168 if (sd->entry == &sort_parent) {
2169 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2170 if (ret) {
2171 char err[BUFSIZ];
2172
2173 regerror(ret, &parent_regex, err, sizeof(err));
Arnaldo Carvalho de Melo2aefa4f2010-04-02 12:30:57 -03002174 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2175 return -EINVAL;
John Kacurdd68ada2009-09-24 18:02:49 +02002176 }
2177 sort__has_parent = 1;
Namhyung Kim930477b2013-04-05 10:26:36 +09002178 } else if (sd->entry == &sort_sym) {
Namhyung Kim1af556402012-09-14 17:35:27 +09002179 sort__has_sym = 1;
Kan Liang94ba4622015-02-09 05:39:44 +00002180 /*
2181 * perf diff displays the performance difference amongst
2182 * two or more perf.data files. Those files could come
2183 * from different binaries. So we should not compare
2184 * their ips, but the name of symbol.
2185 */
2186 if (sort__mode == SORT_MODE__DIFF)
2187 sd->entry->se_collapse = sort__sym_sort;
2188
Namhyung Kim68f6d022013-12-18 14:21:10 +09002189 } else if (sd->entry == &sort_dso) {
2190 sort__has_dso = 1;
Kan Liang2e7ea3a2015-09-04 10:45:43 -04002191 } else if (sd->entry == &sort_socket) {
2192 sort__has_socket = 1;
Namhyung Kimcfd92da2016-01-21 19:13:24 -03002193 } else if (sd->entry == &sort_thread) {
2194 sort__has_thread = 1;
John Kacurdd68ada2009-09-24 18:02:49 +02002195 }
2196
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002197 return __sort_dimension__add(sd);
John Kacurdd68ada2009-09-24 18:02:49 +02002198 }
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002199
Namhyung Kima2ce0672014-03-04 09:06:42 +09002200 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2201 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2202
2203 if (strncasecmp(tok, hd->name, strlen(tok)))
2204 continue;
2205
2206 return __hpp_dimension__add(hd);
2207 }
2208
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002209 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2210 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2211
2212 if (strncasecmp(tok, sd->name, strlen(tok)))
2213 continue;
2214
Namhyung Kim55369fc2013-04-01 20:35:20 +09002215 if (sort__mode != SORT_MODE__BRANCH)
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002216 return -EINVAL;
2217
2218 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2219 sort__has_sym = 1;
2220
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002221 __sort_dimension__add(sd);
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002222 return 0;
2223 }
2224
Namhyung Kimafab87b2013-04-03 21:26:11 +09002225 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2226 struct sort_dimension *sd = &memory_sort_dimensions[i];
2227
2228 if (strncasecmp(tok, sd->name, strlen(tok)))
2229 continue;
2230
2231 if (sort__mode != SORT_MODE__MEMORY)
2232 return -EINVAL;
2233
2234 if (sd->entry == &sort_mem_daddr_sym)
2235 sort__has_sym = 1;
2236
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002237 __sort_dimension__add(sd);
Namhyung Kimafab87b2013-04-03 21:26:11 +09002238 return 0;
2239 }
2240
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002241 if (!add_dynamic_entry(evlist, tok))
2242 return 0;
2243
John Kacurdd68ada2009-09-24 18:02:49 +02002244 return -ESRCH;
2245}
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002246
Jiri Olsa2fbaa392016-01-18 10:24:10 +01002247static int setup_sort_list(char *str, struct perf_evlist *evlist)
2248{
2249 char *tmp, *tok;
2250 int ret = 0;
2251
2252 for (tok = strtok_r(str, ", ", &tmp);
2253 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2254 ret = sort_dimension__add(tok, evlist);
2255 if (ret == -EINVAL) {
2256 error("Invalid --sort key: `%s'", tok);
2257 break;
2258 } else if (ret == -ESRCH) {
2259 error("Unknown --sort key: `%s'", tok);
2260 break;
2261 }
2262 }
2263
2264 return ret;
2265}
2266
Namhyung Kimd49dade2015-12-23 02:07:10 +09002267static const char *get_default_sort_order(struct perf_evlist *evlist)
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002268{
2269 const char *default_sort_orders[] = {
2270 default_sort_order,
2271 default_branch_sort_order,
2272 default_mem_sort_order,
2273 default_top_sort_order,
2274 default_diff_sort_order,
Namhyung Kimd49dade2015-12-23 02:07:10 +09002275 default_tracepoint_sort_order,
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002276 };
Namhyung Kimd49dade2015-12-23 02:07:10 +09002277 bool use_trace = true;
2278 struct perf_evsel *evsel;
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002279
2280 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2281
Namhyung Kimd49dade2015-12-23 02:07:10 +09002282 if (evlist == NULL)
2283 goto out_no_evlist;
2284
2285 evlist__for_each(evlist, evsel) {
2286 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2287 use_trace = false;
2288 break;
2289 }
2290 }
2291
2292 if (use_trace) {
2293 sort__mode = SORT_MODE__TRACEPOINT;
2294 if (symbol_conf.raw_trace)
2295 return "trace_fields";
2296 }
2297out_no_evlist:
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002298 return default_sort_orders[sort__mode];
2299}
2300
Namhyung Kimd49dade2015-12-23 02:07:10 +09002301static int setup_sort_order(struct perf_evlist *evlist)
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002302{
2303 char *new_sort_order;
2304
2305 /*
2306 * Append '+'-prefixed sort order to the default sort
2307 * order string.
2308 */
2309 if (!sort_order || is_strict_order(sort_order))
2310 return 0;
2311
2312 if (sort_order[1] == '\0') {
2313 error("Invalid --sort key: `+'");
2314 return -EINVAL;
2315 }
2316
2317 /*
2318 * We allocate new sort_order string, but we never free it,
2319 * because it's checked over the rest of the code.
2320 */
2321 if (asprintf(&new_sort_order, "%s,%s",
Namhyung Kimd49dade2015-12-23 02:07:10 +09002322 get_default_sort_order(evlist), sort_order + 1) < 0) {
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002323 error("Not enough memory to set up --sort");
2324 return -ENOMEM;
2325 }
2326
2327 sort_order = new_sort_order;
2328 return 0;
2329}
2330
Jiri Olsab97511c2016-01-07 10:14:08 +01002331/*
2332 * Adds 'pre,' prefix into 'str' is 'pre' is
2333 * not already part of 'str'.
2334 */
2335static char *prefix_if_not_in(const char *pre, char *str)
2336{
2337 char *n;
2338
2339 if (!str || strstr(str, pre))
2340 return str;
2341
2342 if (asprintf(&n, "%s,%s", pre, str) < 0)
2343 return NULL;
2344
2345 free(str);
2346 return n;
2347}
2348
2349static char *setup_overhead(char *keys)
2350{
2351 keys = prefix_if_not_in("overhead", keys);
2352
2353 if (symbol_conf.cumulate_callchain)
2354 keys = prefix_if_not_in("overhead_children", keys);
2355
2356 return keys;
2357}
2358
Namhyung Kim40184c42015-12-23 02:07:01 +09002359static int __setup_sorting(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002360{
Jiri Olsa2fbaa392016-01-18 10:24:10 +01002361 char *str;
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002362 const char *sort_keys;
Namhyung Kim55309982013-02-06 14:57:16 +09002363 int ret = 0;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002364
Namhyung Kimd49dade2015-12-23 02:07:10 +09002365 ret = setup_sort_order(evlist);
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002366 if (ret)
2367 return ret;
2368
2369 sort_keys = sort_order;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002370 if (sort_keys == NULL) {
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002371 if (is_strict_order(field_order)) {
Namhyung Kima7d945b2014-03-04 10:46:34 +09002372 /*
2373 * If user specified field order but no sort order,
2374 * we'll honor it and not add default sort orders.
2375 */
2376 return 0;
2377 }
2378
Namhyung Kimd49dade2015-12-23 02:07:10 +09002379 sort_keys = get_default_sort_order(evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002380 }
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002381
2382 str = strdup(sort_keys);
Namhyung Kim5936f542013-02-06 14:57:17 +09002383 if (str == NULL) {
2384 error("Not enough memory to setup sort keys");
2385 return -ENOMEM;
2386 }
2387
Jiri Olsab97511c2016-01-07 10:14:08 +01002388 /*
2389 * Prepend overhead fields for backward compatibility.
2390 */
2391 if (!is_strict_order(field_order)) {
2392 str = setup_overhead(str);
2393 if (str == NULL) {
2394 error("Not enough memory to setup overhead keys");
2395 return -ENOMEM;
2396 }
2397 }
2398
Jiri Olsa2fbaa392016-01-18 10:24:10 +01002399 ret = setup_sort_list(str, evlist);
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002400
2401 free(str);
Namhyung Kim55309982013-02-06 14:57:16 +09002402 return ret;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002403}
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002404
Jiri Olsaf2998422014-05-23 17:15:47 +02002405void perf_hpp__set_elide(int idx, bool elide)
Namhyung Kime67d49a2014-03-18 13:00:59 +09002406{
Jiri Olsaf2998422014-05-23 17:15:47 +02002407 struct perf_hpp_fmt *fmt;
2408 struct hpp_sort_entry *hse;
Namhyung Kime67d49a2014-03-18 13:00:59 +09002409
Jiri Olsacf094042016-01-18 10:24:17 +01002410 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
Jiri Olsaf2998422014-05-23 17:15:47 +02002411 if (!perf_hpp__is_sort_entry(fmt))
2412 continue;
2413
2414 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2415 if (hse->se->se_width_idx == idx) {
2416 fmt->elide = elide;
2417 break;
2418 }
Namhyung Kime67d49a2014-03-18 13:00:59 +09002419 }
Namhyung Kime67d49a2014-03-18 13:00:59 +09002420}
2421
Jiri Olsaf2998422014-05-23 17:15:47 +02002422static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002423{
2424 if (list && strlist__nr_entries(list) == 1) {
2425 if (fp != NULL)
2426 fprintf(fp, "# %s: %s\n", list_name,
2427 strlist__entry(list, 0)->s);
Jiri Olsaf2998422014-05-23 17:15:47 +02002428 return true;
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002429 }
Jiri Olsaf2998422014-05-23 17:15:47 +02002430 return false;
2431}
2432
2433static bool get_elide(int idx, FILE *output)
2434{
2435 switch (idx) {
2436 case HISTC_SYMBOL:
2437 return __get_elide(symbol_conf.sym_list, "symbol", output);
2438 case HISTC_DSO:
2439 return __get_elide(symbol_conf.dso_list, "dso", output);
2440 case HISTC_COMM:
2441 return __get_elide(symbol_conf.comm_list, "comm", output);
2442 default:
2443 break;
2444 }
2445
2446 if (sort__mode != SORT_MODE__BRANCH)
2447 return false;
2448
2449 switch (idx) {
2450 case HISTC_SYMBOL_FROM:
2451 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2452 case HISTC_SYMBOL_TO:
2453 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2454 case HISTC_DSO_FROM:
2455 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2456 case HISTC_DSO_TO:
2457 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2458 default:
2459 break;
2460 }
2461
2462 return false;
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002463}
Namhyung Kim08e71542013-04-03 21:26:19 +09002464
2465void sort__setup_elide(FILE *output)
2466{
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002467 struct perf_hpp_fmt *fmt;
2468 struct hpp_sort_entry *hse;
Namhyung Kim7524f632013-11-08 17:53:42 +09002469
Jiri Olsacf094042016-01-18 10:24:17 +01002470 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
Jiri Olsaf2998422014-05-23 17:15:47 +02002471 if (!perf_hpp__is_sort_entry(fmt))
2472 continue;
Namhyung Kim08e71542013-04-03 21:26:19 +09002473
Jiri Olsaf2998422014-05-23 17:15:47 +02002474 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2475 fmt->elide = get_elide(hse->se->se_width_idx, output);
Namhyung Kim08e71542013-04-03 21:26:19 +09002476 }
2477
Namhyung Kim7524f632013-11-08 17:53:42 +09002478 /*
2479 * It makes no sense to elide all of sort entries.
2480 * Just revert them to show up again.
2481 */
Jiri Olsacf094042016-01-18 10:24:17 +01002482 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002483 if (!perf_hpp__is_sort_entry(fmt))
2484 continue;
2485
Jiri Olsaf2998422014-05-23 17:15:47 +02002486 if (!fmt->elide)
Namhyung Kim7524f632013-11-08 17:53:42 +09002487 return;
2488 }
2489
Jiri Olsacf094042016-01-18 10:24:17 +01002490 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002491 if (!perf_hpp__is_sort_entry(fmt))
2492 continue;
2493
Jiri Olsaf2998422014-05-23 17:15:47 +02002494 fmt->elide = false;
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002495 }
Namhyung Kim08e71542013-04-03 21:26:19 +09002496}
Namhyung Kima7d945b2014-03-04 10:46:34 +09002497
Jiri Olsa07600022016-01-18 10:24:16 +01002498static int output_field_add(struct perf_hpp_list *list, char *tok)
Namhyung Kima7d945b2014-03-04 10:46:34 +09002499{
2500 unsigned int i;
2501
2502 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2503 struct sort_dimension *sd = &common_sort_dimensions[i];
2504
2505 if (strncasecmp(tok, sd->name, strlen(tok)))
2506 continue;
2507
Jiri Olsa07600022016-01-18 10:24:16 +01002508 return __sort_dimension__add_output(list, sd);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002509 }
2510
2511 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2512 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2513
2514 if (strncasecmp(tok, hd->name, strlen(tok)))
2515 continue;
2516
Jiri Olsa07600022016-01-18 10:24:16 +01002517 return __hpp_dimension__add_output(list, hd);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002518 }
2519
2520 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2521 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2522
2523 if (strncasecmp(tok, sd->name, strlen(tok)))
2524 continue;
2525
Jiri Olsa07600022016-01-18 10:24:16 +01002526 return __sort_dimension__add_output(list, sd);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002527 }
2528
2529 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2530 struct sort_dimension *sd = &memory_sort_dimensions[i];
2531
2532 if (strncasecmp(tok, sd->name, strlen(tok)))
2533 continue;
2534
Jiri Olsa07600022016-01-18 10:24:16 +01002535 return __sort_dimension__add_output(list, sd);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002536 }
2537
2538 return -ESRCH;
2539}
2540
Jiri Olsa07600022016-01-18 10:24:16 +01002541static int setup_output_list(struct perf_hpp_list *list, char *str)
Jiri Olsa6d3375e2016-01-18 10:24:11 +01002542{
2543 char *tmp, *tok;
2544 int ret = 0;
2545
2546 for (tok = strtok_r(str, ", ", &tmp);
2547 tok; tok = strtok_r(NULL, ", ", &tmp)) {
Jiri Olsa07600022016-01-18 10:24:16 +01002548 ret = output_field_add(list, tok);
Jiri Olsa6d3375e2016-01-18 10:24:11 +01002549 if (ret == -EINVAL) {
2550 error("Invalid --fields key: `%s'", tok);
2551 break;
2552 } else if (ret == -ESRCH) {
2553 error("Unknown --fields key: `%s'", tok);
2554 break;
2555 }
2556 }
2557
2558 return ret;
2559}
2560
Namhyung Kima7d945b2014-03-04 10:46:34 +09002561static void reset_dimensions(void)
2562{
2563 unsigned int i;
2564
2565 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2566 common_sort_dimensions[i].taken = 0;
2567
2568 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2569 hpp_sort_dimensions[i].taken = 0;
2570
2571 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2572 bstack_sort_dimensions[i].taken = 0;
2573
2574 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2575 memory_sort_dimensions[i].taken = 0;
2576}
2577
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002578bool is_strict_order(const char *order)
2579{
2580 return order && (*order != '+');
2581}
2582
Namhyung Kima7d945b2014-03-04 10:46:34 +09002583static int __setup_output_field(void)
2584{
Jiri Olsa6d3375e2016-01-18 10:24:11 +01002585 char *str, *strp;
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002586 int ret = -EINVAL;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002587
2588 if (field_order == NULL)
2589 return 0;
2590
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002591 strp = str = strdup(field_order);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002592 if (str == NULL) {
2593 error("Not enough memory to setup output fields");
2594 return -ENOMEM;
2595 }
2596
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002597 if (!is_strict_order(field_order))
2598 strp++;
2599
2600 if (!strlen(strp)) {
2601 error("Invalid --fields key: `+'");
2602 goto out;
2603 }
2604
Jiri Olsa07600022016-01-18 10:24:16 +01002605 ret = setup_output_list(&perf_hpp_list, strp);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002606
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002607out:
Namhyung Kima7d945b2014-03-04 10:46:34 +09002608 free(str);
2609 return ret;
2610}
2611
Namhyung Kim40184c42015-12-23 02:07:01 +09002612int setup_sorting(struct perf_evlist *evlist)
Namhyung Kima7d945b2014-03-04 10:46:34 +09002613{
2614 int err;
2615
Namhyung Kim40184c42015-12-23 02:07:01 +09002616 err = __setup_sorting(evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002617 if (err < 0)
2618 return err;
2619
2620 if (parent_pattern != default_parent_pattern) {
Namhyung Kim40184c42015-12-23 02:07:01 +09002621 err = sort_dimension__add("parent", evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002622 if (err < 0)
2623 return err;
2624 }
2625
2626 reset_dimensions();
2627
2628 /*
2629 * perf diff doesn't use default hpp output fields.
2630 */
2631 if (sort__mode != SORT_MODE__DIFF)
2632 perf_hpp__init();
2633
2634 err = __setup_output_field();
2635 if (err < 0)
2636 return err;
2637
2638 /* copy sort keys to output fields */
Jiri Olsa43e0a682016-01-18 10:24:21 +01002639 perf_hpp__setup_output_field(&perf_hpp_list);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002640 /* and then copy output fields to sort keys */
Jiri Olsa43e0a682016-01-18 10:24:21 +01002641 perf_hpp__append_sort_keys(&perf_hpp_list);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002642
2643 return 0;
2644}
Namhyung Kim1c89fe92014-05-07 18:42:24 +09002645
2646void reset_output_field(void)
2647{
2648 sort__need_collapse = 0;
2649 sort__has_parent = 0;
2650 sort__has_sym = 0;
2651 sort__has_dso = 0;
2652
Namhyung Kimd69b2962014-05-23 10:59:01 +09002653 field_order = NULL;
2654 sort_order = NULL;
2655
Namhyung Kim1c89fe92014-05-07 18:42:24 +09002656 reset_dimensions();
Jiri Olsa43e0a682016-01-18 10:24:21 +01002657 perf_hpp__reset_output_field(&perf_hpp_list);
Namhyung Kim1c89fe92014-05-07 18:42:24 +09002658}