blob: 36dbd5554f0e1d64fb96cec118de65904751ac71 [file] [log] [blame]
Don Zickus9b32ba72014-06-01 15:38:29 +02001#include <sys/mman.h>
John Kacurdd68ada2009-09-24 18:02:49 +02002#include "sort.h"
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -03003#include "hist.h"
Namhyung Kim4dfced32013-09-13 16:28:57 +09004#include "comm.h"
Namhyung Kim08e71542013-04-03 21:26:19 +09005#include "symbol.h"
Namhyung Kim8b536992014-03-03 11:46:55 +09006#include "evsel.h"
Namhyung Kim40184c42015-12-23 02:07:01 +09007#include "evlist.h"
8#include <traceevent/event-parse.h>
John Kacurdd68ada2009-09-24 18:02:49 +02009
10regex_t parent_regex;
Arnaldo Carvalho de Meloedb7c602010-05-17 16:22:41 -030011const char default_parent_pattern[] = "^sys_|^do_page_fault";
12const char *parent_pattern = default_parent_pattern;
13const char default_sort_order[] = "comm,dso,symbol";
Andi Kleen40997d62015-07-18 08:24:53 -070014const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
Namhyung Kim512ae1b2014-03-18 11:31:39 +090015const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16const char default_top_sort_order[] = "dso,symbol";
17const char default_diff_sort_order[] = "dso,symbol";
Namhyung Kimd49dade2015-12-23 02:07:10 +090018const char default_tracepoint_sort_order[] = "trace";
Namhyung Kim512ae1b2014-03-18 11:31:39 +090019const char *sort_order;
Namhyung Kima7d945b2014-03-04 10:46:34 +090020const char *field_order;
Greg Priceb21484f2012-12-06 21:48:05 -080021regex_t ignore_callees_regex;
22int have_ignore_callees = 0;
Frederic Weisbeckeraf0a6fa2009-10-22 23:23:22 +020023int sort__need_collapse = 0;
24int sort__has_parent = 0;
Namhyung Kim1af556402012-09-14 17:35:27 +090025int sort__has_sym = 0;
Namhyung Kim68f6d022013-12-18 14:21:10 +090026int sort__has_dso = 0;
Kan Liang2e7ea3a2015-09-04 10:45:43 -040027int sort__has_socket = 0;
Namhyung Kimcfd92da2016-01-21 19:13:24 -030028int sort__has_thread = 0;
Namhyung Kim55369fc2013-04-01 20:35:20 +090029enum sort_mode sort__mode = SORT_MODE__NORMAL;
Frederic Weisbeckera4fb5812009-10-22 23:23:23 +020030
John Kacurdd68ada2009-09-24 18:02:49 +020031
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030032static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
John Kacurdd68ada2009-09-24 18:02:49 +020033{
34 int n;
35 va_list ap;
36
37 va_start(ap, fmt);
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030038 n = vsnprintf(bf, size, fmt, ap);
Jiri Olsa0ca0c132012-09-06 17:46:56 +020039 if (symbol_conf.field_sep && n > 0) {
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030040 char *sep = bf;
John Kacurdd68ada2009-09-24 18:02:49 +020041
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030042 while (1) {
Jiri Olsa0ca0c132012-09-06 17:46:56 +020043 sep = strchr(sep, *symbol_conf.field_sep);
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030044 if (sep == NULL)
45 break;
46 *sep = '.';
John Kacurdd68ada2009-09-24 18:02:49 +020047 }
John Kacurdd68ada2009-09-24 18:02:49 +020048 }
49 va_end(ap);
Anton Blanchardb8327962012-03-07 11:42:49 +110050
51 if (n >= (int)size)
52 return size - 1;
John Kacurdd68ada2009-09-24 18:02:49 +020053 return n;
54}
55
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +020056static int64_t cmp_null(const void *l, const void *r)
Frederic Weisbecker872a8782011-06-29 03:14:52 +020057{
58 if (!l && !r)
59 return 0;
60 else if (!l)
61 return -1;
62 else
63 return 1;
64}
65
66/* --sort pid */
67
68static int64_t
69sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
70{
Adrian Hunter38051232013-07-04 16:20:31 +030071 return right->thread->tid - left->thread->tid;
Frederic Weisbecker872a8782011-06-29 03:14:52 +020072}
73
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -030074static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030075 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +020076{
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +020077 const char *comm = thread__comm_str(he->thread);
Namhyung Kim5b591662014-07-31 14:47:38 +090078
79 width = max(7U, width) - 6;
80 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
81 width, width, comm ?: "");
John Kacurdd68ada2009-09-24 18:02:49 +020082}
83
Frederic Weisbecker872a8782011-06-29 03:14:52 +020084struct sort_entry sort_thread = {
Namhyung Kim8246de82014-07-31 14:47:35 +090085 .se_header = " Pid:Command",
Frederic Weisbecker872a8782011-06-29 03:14:52 +020086 .se_cmp = sort__thread_cmp,
87 .se_snprintf = hist_entry__thread_snprintf,
88 .se_width_idx = HISTC_THREAD,
89};
90
91/* --sort comm */
92
93static int64_t
94sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
95{
Frederic Weisbeckerfedd63d2013-09-11 17:18:09 +020096 /* Compare the addr that should be unique among comm */
Jiri Olsa2f15bd82015-05-15 17:54:28 +020097 return strcmp(comm__str(right->comm), comm__str(left->comm));
Frederic Weisbecker872a8782011-06-29 03:14:52 +020098}
99
100static int64_t
101sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
102{
Namhyung Kim4dfced32013-09-13 16:28:57 +0900103 /* Compare the addr that should be unique among comm */
Jiri Olsa2f15bd82015-05-15 17:54:28 +0200104 return strcmp(comm__str(right->comm), comm__str(left->comm));
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200105}
106
Namhyung Kim202e7a62014-03-04 11:01:41 +0900107static int64_t
108sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
109{
110 return strcmp(comm__str(right->comm), comm__str(left->comm));
111}
112
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300113static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -0300114 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +0200115{
Namhyung Kim5b591662014-07-31 14:47:38 +0900116 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
John Kacurdd68ada2009-09-24 18:02:49 +0200117}
118
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900119struct sort_entry sort_comm = {
120 .se_header = "Command",
121 .se_cmp = sort__comm_cmp,
122 .se_collapse = sort__comm_collapse,
Namhyung Kim202e7a62014-03-04 11:01:41 +0900123 .se_sort = sort__comm_sort,
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900124 .se_snprintf = hist_entry__comm_snprintf,
125 .se_width_idx = HISTC_COMM,
126};
127
128/* --sort dso */
129
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100130static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
John Kacurdd68ada2009-09-24 18:02:49 +0200131{
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100132 struct dso *dso_l = map_l ? map_l->dso : NULL;
133 struct dso *dso_r = map_r ? map_r->dso : NULL;
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300134 const char *dso_name_l, *dso_name_r;
John Kacurdd68ada2009-09-24 18:02:49 +0200135
136 if (!dso_l || !dso_r)
Namhyung Kim202e7a62014-03-04 11:01:41 +0900137 return cmp_null(dso_r, dso_l);
John Kacurdd68ada2009-09-24 18:02:49 +0200138
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300139 if (verbose) {
140 dso_name_l = dso_l->long_name;
141 dso_name_r = dso_r->long_name;
142 } else {
143 dso_name_l = dso_l->short_name;
144 dso_name_r = dso_r->short_name;
145 }
146
147 return strcmp(dso_name_l, dso_name_r);
John Kacurdd68ada2009-09-24 18:02:49 +0200148}
149
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100150static int64_t
151sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
John Kacurdd68ada2009-09-24 18:02:49 +0200152{
Namhyung Kim202e7a62014-03-04 11:01:41 +0900153 return _sort__dso_cmp(right->ms.map, left->ms.map);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100154}
155
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100156static int _hist_entry__dso_snprintf(struct map *map, char *bf,
157 size_t size, unsigned int width)
158{
159 if (map && map->dso) {
160 const char *dso_name = !verbose ? map->dso->short_name :
161 map->dso->long_name;
Namhyung Kim5b591662014-07-31 14:47:38 +0900162 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300163 }
John Kacurdd68ada2009-09-24 18:02:49 +0200164
Namhyung Kim5b591662014-07-31 14:47:38 +0900165 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
John Kacurdd68ada2009-09-24 18:02:49 +0200166}
167
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300168static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100169 size_t size, unsigned int width)
170{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300171 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100172}
173
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900174struct sort_entry sort_dso = {
175 .se_header = "Shared Object",
176 .se_cmp = sort__dso_cmp,
177 .se_snprintf = hist_entry__dso_snprintf,
178 .se_width_idx = HISTC_DSO,
179};
180
181/* --sort symbol */
182
Namhyung Kim2037be52013-12-18 14:21:09 +0900183static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
184{
185 return (int64_t)(right_ip - left_ip);
186}
187
Namhyung Kim51f27d12013-02-06 14:57:15 +0900188static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900189{
190 if (!sym_l || !sym_r)
191 return cmp_null(sym_l, sym_r);
192
193 if (sym_l == sym_r)
194 return 0;
195
Yannick Brosseauc05676c2015-06-17 16:41:10 -0700196 if (sym_l->start != sym_r->start)
197 return (int64_t)(sym_r->start - sym_l->start);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900198
Yannick Brosseauc05676c2015-06-17 16:41:10 -0700199 return (int64_t)(sym_r->end - sym_l->end);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900200}
201
202static int64_t
203sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
204{
Namhyung Kim09600e02013-10-15 11:01:56 +0900205 int64_t ret;
206
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900207 if (!left->ms.sym && !right->ms.sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900208 return _sort__addr_cmp(left->ip, right->ip);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900209
Namhyung Kim09600e02013-10-15 11:01:56 +0900210 /*
211 * comparing symbol address alone is not enough since it's a
212 * relative address within a dso.
213 */
Namhyung Kim68f6d022013-12-18 14:21:10 +0900214 if (!sort__has_dso) {
215 ret = sort__dso_cmp(left, right);
216 if (ret != 0)
217 return ret;
218 }
Namhyung Kim09600e02013-10-15 11:01:56 +0900219
Namhyung Kim51f27d12013-02-06 14:57:15 +0900220 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900221}
222
Namhyung Kim202e7a62014-03-04 11:01:41 +0900223static int64_t
224sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
225{
226 if (!left->ms.sym || !right->ms.sym)
227 return cmp_null(left->ms.sym, right->ms.sym);
228
229 return strcmp(right->ms.sym->name, left->ms.sym->name);
230}
231
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100232static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
233 u64 ip, char level, char *bf, size_t size,
Namhyung Kim43355522012-12-27 18:11:39 +0900234 unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100235{
236 size_t ret = 0;
237
238 if (verbose) {
239 char o = map ? dso__symtab_origin(map->dso) : '!';
240 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
Namhyung Kimded19d52013-04-01 20:35:19 +0900241 BITS_PER_LONG / 4 + 2, ip, o);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100242 }
243
244 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
Stephane Eranian98a3b322013-01-24 16:10:35 +0100245 if (sym && map) {
246 if (map->type == MAP__VARIABLE) {
247 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
248 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
Namhyung Kim62667742013-01-24 16:10:42 +0100249 ip - map->unmap_ip(map, sym->start));
Stephane Eranian98a3b322013-01-24 16:10:35 +0100250 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
251 width - ret, "");
252 } else {
253 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
254 width - ret,
255 sym->name);
256 }
257 } else {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100258 size_t len = BITS_PER_LONG / 4;
259 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
260 len, ip);
261 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
262 width - ret, "");
263 }
264
Namhyung Kim5b591662014-07-31 14:47:38 +0900265 if (ret > width)
266 bf[width] = '\0';
267
268 return width;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100269}
270
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300271static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900272 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100273{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300274 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
275 he->level, bf, size, width);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100276}
John Kacurdd68ada2009-09-24 18:02:49 +0200277
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200278struct sort_entry sort_sym = {
279 .se_header = "Symbol",
280 .se_cmp = sort__sym_cmp,
Namhyung Kim202e7a62014-03-04 11:01:41 +0900281 .se_sort = sort__sym_sort,
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200282 .se_snprintf = hist_entry__sym_snprintf,
283 .se_width_idx = HISTC_SYMBOL,
284};
John Kacurdd68ada2009-09-24 18:02:49 +0200285
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300286/* --sort srcline */
287
288static int64_t
289sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
290{
Namhyung Kim4adcc432013-09-11 14:09:33 +0900291 if (!left->srcline) {
292 if (!left->ms.map)
293 left->srcline = SRCLINE_UNKNOWN;
294 else {
295 struct map *map = left->ms.map;
296 left->srcline = get_srcline(map->dso,
Andi Kleen85c116a2014-11-12 18:05:27 -0800297 map__rip_2objdump(map, left->ip),
298 left->ms.sym, true);
Namhyung Kim4adcc432013-09-11 14:09:33 +0900299 }
300 }
301 if (!right->srcline) {
302 if (!right->ms.map)
303 right->srcline = SRCLINE_UNKNOWN;
304 else {
305 struct map *map = right->ms.map;
306 right->srcline = get_srcline(map->dso,
Andi Kleen85c116a2014-11-12 18:05:27 -0800307 map__rip_2objdump(map, right->ip),
308 right->ms.sym, true);
Namhyung Kim4adcc432013-09-11 14:09:33 +0900309 }
310 }
Namhyung Kim202e7a62014-03-04 11:01:41 +0900311 return strcmp(right->srcline, left->srcline);
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300312}
313
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300314static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim5b591662014-07-31 14:47:38 +0900315 size_t size, unsigned int width)
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300316{
Arnaldo Carvalho de Melob2d53672014-11-18 18:02:51 -0300317 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300318}
319
320struct sort_entry sort_srcline = {
321 .se_header = "Source:Line",
322 .se_cmp = sort__srcline_cmp,
323 .se_snprintf = hist_entry__srcline_snprintf,
324 .se_width_idx = HISTC_SRCLINE,
325};
326
Andi Kleen31191a82015-08-07 15:54:24 -0700327/* --sort srcfile */
328
329static char no_srcfile[1];
330
331static char *get_srcfile(struct hist_entry *e)
332{
333 char *sf, *p;
334 struct map *map = e->ms.map;
335
Andi Kleen2f84b422015-09-01 11:47:19 -0700336 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
337 e->ms.sym, false, true);
Andi Kleen76b10652015-08-11 06:36:55 -0700338 if (!strcmp(sf, SRCLINE_UNKNOWN))
339 return no_srcfile;
Andi Kleen31191a82015-08-07 15:54:24 -0700340 p = strchr(sf, ':');
341 if (p && *sf) {
342 *p = 0;
343 return sf;
344 }
345 free(sf);
346 return no_srcfile;
347}
348
349static int64_t
350sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
351{
352 if (!left->srcfile) {
353 if (!left->ms.map)
354 left->srcfile = no_srcfile;
355 else
356 left->srcfile = get_srcfile(left);
357 }
358 if (!right->srcfile) {
359 if (!right->ms.map)
360 right->srcfile = no_srcfile;
361 else
362 right->srcfile = get_srcfile(right);
363 }
364 return strcmp(right->srcfile, left->srcfile);
365}
366
367static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
368 size_t size, unsigned int width)
369{
370 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
371}
372
373struct sort_entry sort_srcfile = {
374 .se_header = "Source File",
375 .se_cmp = sort__srcfile_cmp,
376 .se_snprintf = hist_entry__srcfile_snprintf,
377 .se_width_idx = HISTC_SRCFILE,
378};
379
John Kacurdd68ada2009-09-24 18:02:49 +0200380/* --sort parent */
381
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200382static int64_t
John Kacurdd68ada2009-09-24 18:02:49 +0200383sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
384{
385 struct symbol *sym_l = left->parent;
386 struct symbol *sym_r = right->parent;
387
388 if (!sym_l || !sym_r)
389 return cmp_null(sym_l, sym_r);
390
Namhyung Kim202e7a62014-03-04 11:01:41 +0900391 return strcmp(sym_r->name, sym_l->name);
John Kacurdd68ada2009-09-24 18:02:49 +0200392}
393
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300394static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -0300395 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +0200396{
Namhyung Kim5b591662014-07-31 14:47:38 +0900397 return repsep_snprintf(bf, size, "%-*.*s", width, width,
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300398 he->parent ? he->parent->name : "[other]");
John Kacurdd68ada2009-09-24 18:02:49 +0200399}
400
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200401struct sort_entry sort_parent = {
402 .se_header = "Parent symbol",
403 .se_cmp = sort__parent_cmp,
404 .se_snprintf = hist_entry__parent_snprintf,
405 .se_width_idx = HISTC_PARENT,
406};
407
Arun Sharmaf60f3592010-06-04 11:27:10 -0300408/* --sort cpu */
409
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200410static int64_t
Arun Sharmaf60f3592010-06-04 11:27:10 -0300411sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
412{
413 return right->cpu - left->cpu;
414}
415
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300416static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
417 size_t size, unsigned int width)
Arun Sharmaf60f3592010-06-04 11:27:10 -0300418{
Namhyung Kim5b591662014-07-31 14:47:38 +0900419 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
Arun Sharmaf60f3592010-06-04 11:27:10 -0300420}
421
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200422struct sort_entry sort_cpu = {
423 .se_header = "CPU",
424 .se_cmp = sort__cpu_cmp,
425 .se_snprintf = hist_entry__cpu_snprintf,
426 .se_width_idx = HISTC_CPU,
427};
428
Kan Liang2e7ea3a2015-09-04 10:45:43 -0400429/* --sort socket */
430
431static int64_t
432sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
433{
434 return right->socket - left->socket;
435}
436
437static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
438 size_t size, unsigned int width)
439{
440 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
441}
442
443struct sort_entry sort_socket = {
444 .se_header = "Socket",
445 .se_cmp = sort__socket_cmp,
446 .se_snprintf = hist_entry__socket_snprintf,
447 .se_width_idx = HISTC_SOCKET,
448};
449
Namhyung Kima34bb6a02015-12-23 02:07:04 +0900450/* --sort trace */
451
452static char *get_trace_output(struct hist_entry *he)
453{
454 struct trace_seq seq;
455 struct perf_evsel *evsel;
456 struct pevent_record rec = {
457 .data = he->raw_data,
458 .size = he->raw_size,
459 };
460
461 evsel = hists_to_evsel(he->hists);
462
463 trace_seq_init(&seq);
Namhyung Kim053a3982015-12-23 02:07:05 +0900464 if (symbol_conf.raw_trace) {
465 pevent_print_fields(&seq, he->raw_data, he->raw_size,
466 evsel->tp_format);
467 } else {
468 pevent_event_info(&seq, evsel->tp_format, &rec);
469 }
Namhyung Kima34bb6a02015-12-23 02:07:04 +0900470 return seq.buffer;
471}
472
473static int64_t
474sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
475{
476 struct perf_evsel *evsel;
477
478 evsel = hists_to_evsel(left->hists);
479 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
480 return 0;
481
482 if (left->trace_output == NULL)
483 left->trace_output = get_trace_output(left);
484 if (right->trace_output == NULL)
485 right->trace_output = get_trace_output(right);
486
487 hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
488 hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
489
490 return strcmp(right->trace_output, left->trace_output);
491}
492
493static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
494 size_t size, unsigned int width)
495{
496 struct perf_evsel *evsel;
497
498 evsel = hists_to_evsel(he->hists);
499 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
500 return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
501
502 if (he->trace_output == NULL)
503 he->trace_output = get_trace_output(he);
504 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
505}
506
507struct sort_entry sort_trace = {
508 .se_header = "Trace output",
509 .se_cmp = sort__trace_cmp,
510 .se_snprintf = hist_entry__trace_snprintf,
511 .se_width_idx = HISTC_TRACE,
512};
513
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900514/* sort keys for branch stacks */
515
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100516static int64_t
517sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
518{
Jiri Olsa288a4b92014-10-16 16:07:07 +0200519 if (!left->branch_info || !right->branch_info)
520 return cmp_null(left->branch_info, right->branch_info);
521
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100522 return _sort__dso_cmp(left->branch_info->from.map,
523 right->branch_info->from.map);
524}
525
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300526static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100527 size_t size, unsigned int width)
528{
Jiri Olsa288a4b92014-10-16 16:07:07 +0200529 if (he->branch_info)
530 return _hist_entry__dso_snprintf(he->branch_info->from.map,
531 bf, size, width);
532 else
533 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100534}
535
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100536static int64_t
537sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
538{
Jiri Olsa8b62fa52014-10-16 16:07:06 +0200539 if (!left->branch_info || !right->branch_info)
540 return cmp_null(left->branch_info, right->branch_info);
541
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100542 return _sort__dso_cmp(left->branch_info->to.map,
543 right->branch_info->to.map);
544}
545
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300546static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100547 size_t size, unsigned int width)
548{
Jiri Olsa8b62fa52014-10-16 16:07:06 +0200549 if (he->branch_info)
550 return _hist_entry__dso_snprintf(he->branch_info->to.map,
551 bf, size, width);
552 else
553 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100554}
555
556static int64_t
557sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
558{
559 struct addr_map_symbol *from_l = &left->branch_info->from;
560 struct addr_map_symbol *from_r = &right->branch_info->from;
561
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200562 if (!left->branch_info || !right->branch_info)
563 return cmp_null(left->branch_info, right->branch_info);
564
565 from_l = &left->branch_info->from;
566 from_r = &right->branch_info->from;
567
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100568 if (!from_l->sym && !from_r->sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900569 return _sort__addr_cmp(from_l->addr, from_r->addr);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100570
Namhyung Kim51f27d12013-02-06 14:57:15 +0900571 return _sort__sym_cmp(from_l->sym, from_r->sym);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100572}
573
574static int64_t
575sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
576{
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200577 struct addr_map_symbol *to_l, *to_r;
578
579 if (!left->branch_info || !right->branch_info)
580 return cmp_null(left->branch_info, right->branch_info);
581
582 to_l = &left->branch_info->to;
583 to_r = &right->branch_info->to;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100584
585 if (!to_l->sym && !to_r->sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900586 return _sort__addr_cmp(to_l->addr, to_r->addr);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100587
Namhyung Kim51f27d12013-02-06 14:57:15 +0900588 return _sort__sym_cmp(to_l->sym, to_r->sym);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100589}
590
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300591static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900592 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100593{
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200594 if (he->branch_info) {
595 struct addr_map_symbol *from = &he->branch_info->from;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100596
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200597 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
598 he->level, bf, size, width);
599 }
600
601 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100602}
603
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300604static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900605 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100606{
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200607 if (he->branch_info) {
608 struct addr_map_symbol *to = &he->branch_info->to;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100609
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200610 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
611 he->level, bf, size, width);
612 }
613
614 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100615}
616
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900617struct sort_entry sort_dso_from = {
618 .se_header = "Source Shared Object",
619 .se_cmp = sort__dso_from_cmp,
620 .se_snprintf = hist_entry__dso_from_snprintf,
621 .se_width_idx = HISTC_DSO_FROM,
622};
623
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100624struct sort_entry sort_dso_to = {
625 .se_header = "Target Shared Object",
626 .se_cmp = sort__dso_to_cmp,
627 .se_snprintf = hist_entry__dso_to_snprintf,
628 .se_width_idx = HISTC_DSO_TO,
629};
630
631struct sort_entry sort_sym_from = {
632 .se_header = "Source Symbol",
633 .se_cmp = sort__sym_from_cmp,
634 .se_snprintf = hist_entry__sym_from_snprintf,
635 .se_width_idx = HISTC_SYMBOL_FROM,
636};
637
638struct sort_entry sort_sym_to = {
639 .se_header = "Target Symbol",
640 .se_cmp = sort__sym_to_cmp,
641 .se_snprintf = hist_entry__sym_to_snprintf,
642 .se_width_idx = HISTC_SYMBOL_TO,
643};
644
645static int64_t
646sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
647{
Jiri Olsa428560e2014-10-16 16:07:03 +0200648 unsigned char mp, p;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100649
Jiri Olsa428560e2014-10-16 16:07:03 +0200650 if (!left->branch_info || !right->branch_info)
651 return cmp_null(left->branch_info, right->branch_info);
652
653 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
654 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100655 return mp || p;
656}
657
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300658static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100659 size_t size, unsigned int width){
660 static const char *out = "N/A";
661
Jiri Olsa428560e2014-10-16 16:07:03 +0200662 if (he->branch_info) {
663 if (he->branch_info->flags.predicted)
664 out = "N";
665 else if (he->branch_info->flags.mispred)
666 out = "Y";
667 }
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100668
Namhyung Kim5b591662014-07-31 14:47:38 +0900669 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100670}
671
Andi Kleen0e332f02015-07-18 08:24:46 -0700672static int64_t
673sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
674{
675 return left->branch_info->flags.cycles -
676 right->branch_info->flags.cycles;
677}
678
679static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
680 size_t size, unsigned int width)
681{
682 if (he->branch_info->flags.cycles == 0)
683 return repsep_snprintf(bf, size, "%-*s", width, "-");
684 return repsep_snprintf(bf, size, "%-*hd", width,
685 he->branch_info->flags.cycles);
686}
687
688struct sort_entry sort_cycles = {
689 .se_header = "Basic Block Cycles",
690 .se_cmp = sort__cycles_cmp,
691 .se_snprintf = hist_entry__cycles_snprintf,
692 .se_width_idx = HISTC_CYCLES,
693};
694
Stephane Eranian98a3b322013-01-24 16:10:35 +0100695/* --sort daddr_sym */
696static int64_t
697sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
698{
699 uint64_t l = 0, r = 0;
700
701 if (left->mem_info)
702 l = left->mem_info->daddr.addr;
703 if (right->mem_info)
704 r = right->mem_info->daddr.addr;
705
706 return (int64_t)(r - l);
707}
708
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300709static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100710 size_t size, unsigned int width)
711{
712 uint64_t addr = 0;
713 struct map *map = NULL;
714 struct symbol *sym = NULL;
715
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300716 if (he->mem_info) {
717 addr = he->mem_info->daddr.addr;
718 map = he->mem_info->daddr.map;
719 sym = he->mem_info->daddr.sym;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100720 }
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300721 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100722 width);
723}
724
725static int64_t
Don Zickus28e6db22015-10-05 20:06:07 +0200726sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
727{
728 uint64_t l = 0, r = 0;
729
730 if (left->mem_info)
731 l = left->mem_info->iaddr.addr;
732 if (right->mem_info)
733 r = right->mem_info->iaddr.addr;
734
735 return (int64_t)(r - l);
736}
737
738static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
739 size_t size, unsigned int width)
740{
741 uint64_t addr = 0;
742 struct map *map = NULL;
743 struct symbol *sym = NULL;
744
745 if (he->mem_info) {
746 addr = he->mem_info->iaddr.addr;
747 map = he->mem_info->iaddr.map;
748 sym = he->mem_info->iaddr.sym;
749 }
750 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
751 width);
752}
753
754static int64_t
Stephane Eranian98a3b322013-01-24 16:10:35 +0100755sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
756{
757 struct map *map_l = NULL;
758 struct map *map_r = NULL;
759
760 if (left->mem_info)
761 map_l = left->mem_info->daddr.map;
762 if (right->mem_info)
763 map_r = right->mem_info->daddr.map;
764
765 return _sort__dso_cmp(map_l, map_r);
766}
767
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300768static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100769 size_t size, unsigned int width)
770{
771 struct map *map = NULL;
772
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300773 if (he->mem_info)
774 map = he->mem_info->daddr.map;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100775
776 return _hist_entry__dso_snprintf(map, bf, size, width);
777}
778
779static int64_t
780sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
781{
782 union perf_mem_data_src data_src_l;
783 union perf_mem_data_src data_src_r;
784
785 if (left->mem_info)
786 data_src_l = left->mem_info->data_src;
787 else
788 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
789
790 if (right->mem_info)
791 data_src_r = right->mem_info->data_src;
792 else
793 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
794
795 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
796}
797
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300798static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100799 size_t size, unsigned int width)
800{
801 const char *out;
802 u64 mask = PERF_MEM_LOCK_NA;
803
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300804 if (he->mem_info)
805 mask = he->mem_info->data_src.mem_lock;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100806
807 if (mask & PERF_MEM_LOCK_NA)
808 out = "N/A";
809 else if (mask & PERF_MEM_LOCK_LOCKED)
810 out = "Yes";
811 else
812 out = "No";
813
814 return repsep_snprintf(bf, size, "%-*s", width, out);
815}
816
817static int64_t
818sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
819{
820 union perf_mem_data_src data_src_l;
821 union perf_mem_data_src data_src_r;
822
823 if (left->mem_info)
824 data_src_l = left->mem_info->data_src;
825 else
826 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
827
828 if (right->mem_info)
829 data_src_r = right->mem_info->data_src;
830 else
831 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
832
833 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
834}
835
836static const char * const tlb_access[] = {
837 "N/A",
838 "HIT",
839 "MISS",
840 "L1",
841 "L2",
842 "Walker",
843 "Fault",
844};
845#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
846
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300847static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100848 size_t size, unsigned int width)
849{
850 char out[64];
851 size_t sz = sizeof(out) - 1; /* -1 for null termination */
852 size_t l = 0, i;
853 u64 m = PERF_MEM_TLB_NA;
854 u64 hit, miss;
855
856 out[0] = '\0';
857
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300858 if (he->mem_info)
859 m = he->mem_info->data_src.mem_dtlb;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100860
861 hit = m & PERF_MEM_TLB_HIT;
862 miss = m & PERF_MEM_TLB_MISS;
863
864 /* already taken care of */
865 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
866
867 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
868 if (!(m & 0x1))
869 continue;
870 if (l) {
871 strcat(out, " or ");
872 l += 4;
873 }
874 strncat(out, tlb_access[i], sz - l);
875 l += strlen(tlb_access[i]);
876 }
877 if (*out == '\0')
878 strcpy(out, "N/A");
879 if (hit)
880 strncat(out, " hit", sz - l);
881 if (miss)
882 strncat(out, " miss", sz - l);
883
884 return repsep_snprintf(bf, size, "%-*s", width, out);
885}
886
887static int64_t
888sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
889{
890 union perf_mem_data_src data_src_l;
891 union perf_mem_data_src data_src_r;
892
893 if (left->mem_info)
894 data_src_l = left->mem_info->data_src;
895 else
896 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
897
898 if (right->mem_info)
899 data_src_r = right->mem_info->data_src;
900 else
901 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
902
903 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
904}
905
906static const char * const mem_lvl[] = {
907 "N/A",
908 "HIT",
909 "MISS",
910 "L1",
911 "LFB",
912 "L2",
913 "L3",
914 "Local RAM",
915 "Remote RAM (1 hop)",
916 "Remote RAM (2 hops)",
917 "Remote Cache (1 hop)",
918 "Remote Cache (2 hops)",
919 "I/O",
920 "Uncached",
921};
922#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
923
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300924static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100925 size_t size, unsigned int width)
926{
927 char out[64];
928 size_t sz = sizeof(out) - 1; /* -1 for null termination */
929 size_t i, l = 0;
930 u64 m = PERF_MEM_LVL_NA;
931 u64 hit, miss;
932
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300933 if (he->mem_info)
934 m = he->mem_info->data_src.mem_lvl;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100935
936 out[0] = '\0';
937
938 hit = m & PERF_MEM_LVL_HIT;
939 miss = m & PERF_MEM_LVL_MISS;
940
941 /* already taken care of */
942 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
943
944 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
945 if (!(m & 0x1))
946 continue;
947 if (l) {
948 strcat(out, " or ");
949 l += 4;
950 }
951 strncat(out, mem_lvl[i], sz - l);
952 l += strlen(mem_lvl[i]);
953 }
954 if (*out == '\0')
955 strcpy(out, "N/A");
956 if (hit)
957 strncat(out, " hit", sz - l);
958 if (miss)
959 strncat(out, " miss", sz - l);
960
961 return repsep_snprintf(bf, size, "%-*s", width, out);
962}
963
964static int64_t
965sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
966{
967 union perf_mem_data_src data_src_l;
968 union perf_mem_data_src data_src_r;
969
970 if (left->mem_info)
971 data_src_l = left->mem_info->data_src;
972 else
973 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
974
975 if (right->mem_info)
976 data_src_r = right->mem_info->data_src;
977 else
978 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
979
980 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
981}
982
983static const char * const snoop_access[] = {
984 "N/A",
985 "None",
986 "Miss",
987 "Hit",
988 "HitM",
989};
990#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
991
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300992static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100993 size_t size, unsigned int width)
994{
995 char out[64];
996 size_t sz = sizeof(out) - 1; /* -1 for null termination */
997 size_t i, l = 0;
998 u64 m = PERF_MEM_SNOOP_NA;
999
1000 out[0] = '\0';
1001
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001002 if (he->mem_info)
1003 m = he->mem_info->data_src.mem_snoop;
Stephane Eranian98a3b322013-01-24 16:10:35 +01001004
1005 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1006 if (!(m & 0x1))
1007 continue;
1008 if (l) {
1009 strcat(out, " or ");
1010 l += 4;
1011 }
1012 strncat(out, snoop_access[i], sz - l);
1013 l += strlen(snoop_access[i]);
1014 }
1015
1016 if (*out == '\0')
1017 strcpy(out, "N/A");
1018
1019 return repsep_snprintf(bf, size, "%-*s", width, out);
1020}
1021
Don Zickus9b32ba72014-06-01 15:38:29 +02001022static inline u64 cl_address(u64 address)
1023{
1024 /* return the cacheline of the address */
1025 return (address & ~(cacheline_size - 1));
1026}
1027
1028static int64_t
1029sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1030{
1031 u64 l, r;
1032 struct map *l_map, *r_map;
1033
1034 if (!left->mem_info) return -1;
1035 if (!right->mem_info) return 1;
1036
1037 /* group event types together */
1038 if (left->cpumode > right->cpumode) return -1;
1039 if (left->cpumode < right->cpumode) return 1;
1040
1041 l_map = left->mem_info->daddr.map;
1042 r_map = right->mem_info->daddr.map;
1043
1044 /* if both are NULL, jump to sort on al_addr instead */
1045 if (!l_map && !r_map)
1046 goto addr;
1047
1048 if (!l_map) return -1;
1049 if (!r_map) return 1;
1050
1051 if (l_map->maj > r_map->maj) return -1;
1052 if (l_map->maj < r_map->maj) return 1;
1053
1054 if (l_map->min > r_map->min) return -1;
1055 if (l_map->min < r_map->min) return 1;
1056
1057 if (l_map->ino > r_map->ino) return -1;
1058 if (l_map->ino < r_map->ino) return 1;
1059
1060 if (l_map->ino_generation > r_map->ino_generation) return -1;
1061 if (l_map->ino_generation < r_map->ino_generation) return 1;
1062
1063 /*
1064 * Addresses with no major/minor numbers are assumed to be
1065 * anonymous in userspace. Sort those on pid then address.
1066 *
1067 * The kernel and non-zero major/minor mapped areas are
1068 * assumed to be unity mapped. Sort those on address.
1069 */
1070
1071 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1072 (!(l_map->flags & MAP_SHARED)) &&
1073 !l_map->maj && !l_map->min && !l_map->ino &&
1074 !l_map->ino_generation) {
1075 /* userspace anonymous */
1076
1077 if (left->thread->pid_ > right->thread->pid_) return -1;
1078 if (left->thread->pid_ < right->thread->pid_) return 1;
1079 }
1080
1081addr:
1082 /* al_addr does all the right addr - start + offset calculations */
1083 l = cl_address(left->mem_info->daddr.al_addr);
1084 r = cl_address(right->mem_info->daddr.al_addr);
1085
1086 if (l > r) return -1;
1087 if (l < r) return 1;
1088
1089 return 0;
1090}
1091
1092static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1093 size_t size, unsigned int width)
1094{
1095
1096 uint64_t addr = 0;
1097 struct map *map = NULL;
1098 struct symbol *sym = NULL;
1099 char level = he->level;
1100
1101 if (he->mem_info) {
1102 addr = cl_address(he->mem_info->daddr.al_addr);
1103 map = he->mem_info->daddr.map;
1104 sym = he->mem_info->daddr.sym;
1105
1106 /* print [s] for shared data mmaps */
1107 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1108 map && (map->type == MAP__VARIABLE) &&
1109 (map->flags & MAP_SHARED) &&
1110 (map->maj || map->min || map->ino ||
1111 map->ino_generation))
1112 level = 's';
1113 else if (!map)
1114 level = 'X';
1115 }
1116 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1117 width);
1118}
1119
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001120struct sort_entry sort_mispredict = {
1121 .se_header = "Branch Mispredicted",
1122 .se_cmp = sort__mispredict_cmp,
1123 .se_snprintf = hist_entry__mispredict_snprintf,
1124 .se_width_idx = HISTC_MISPREDICT,
1125};
1126
Andi Kleen05484292013-01-24 16:10:29 +01001127static u64 he_weight(struct hist_entry *he)
1128{
1129 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1130}
1131
1132static int64_t
1133sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1134{
1135 return he_weight(left) - he_weight(right);
1136}
1137
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001138static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
Andi Kleen05484292013-01-24 16:10:29 +01001139 size_t size, unsigned int width)
1140{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001141 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
Andi Kleen05484292013-01-24 16:10:29 +01001142}
1143
1144struct sort_entry sort_local_weight = {
1145 .se_header = "Local Weight",
1146 .se_cmp = sort__local_weight_cmp,
1147 .se_snprintf = hist_entry__local_weight_snprintf,
1148 .se_width_idx = HISTC_LOCAL_WEIGHT,
1149};
1150
1151static int64_t
1152sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1153{
1154 return left->stat.weight - right->stat.weight;
1155}
1156
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001157static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
Andi Kleen05484292013-01-24 16:10:29 +01001158 size_t size, unsigned int width)
1159{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001160 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
Andi Kleen05484292013-01-24 16:10:29 +01001161}
1162
1163struct sort_entry sort_global_weight = {
1164 .se_header = "Weight",
1165 .se_cmp = sort__global_weight_cmp,
1166 .se_snprintf = hist_entry__global_weight_snprintf,
1167 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1168};
1169
Stephane Eranian98a3b322013-01-24 16:10:35 +01001170struct sort_entry sort_mem_daddr_sym = {
1171 .se_header = "Data Symbol",
1172 .se_cmp = sort__daddr_cmp,
1173 .se_snprintf = hist_entry__daddr_snprintf,
1174 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1175};
1176
Don Zickus28e6db22015-10-05 20:06:07 +02001177struct sort_entry sort_mem_iaddr_sym = {
1178 .se_header = "Code Symbol",
1179 .se_cmp = sort__iaddr_cmp,
1180 .se_snprintf = hist_entry__iaddr_snprintf,
1181 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1182};
1183
Stephane Eranian98a3b322013-01-24 16:10:35 +01001184struct sort_entry sort_mem_daddr_dso = {
1185 .se_header = "Data Object",
1186 .se_cmp = sort__dso_daddr_cmp,
1187 .se_snprintf = hist_entry__dso_daddr_snprintf,
1188 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1189};
1190
1191struct sort_entry sort_mem_locked = {
1192 .se_header = "Locked",
1193 .se_cmp = sort__locked_cmp,
1194 .se_snprintf = hist_entry__locked_snprintf,
1195 .se_width_idx = HISTC_MEM_LOCKED,
1196};
1197
1198struct sort_entry sort_mem_tlb = {
1199 .se_header = "TLB access",
1200 .se_cmp = sort__tlb_cmp,
1201 .se_snprintf = hist_entry__tlb_snprintf,
1202 .se_width_idx = HISTC_MEM_TLB,
1203};
1204
1205struct sort_entry sort_mem_lvl = {
1206 .se_header = "Memory access",
1207 .se_cmp = sort__lvl_cmp,
1208 .se_snprintf = hist_entry__lvl_snprintf,
1209 .se_width_idx = HISTC_MEM_LVL,
1210};
1211
1212struct sort_entry sort_mem_snoop = {
1213 .se_header = "Snoop",
1214 .se_cmp = sort__snoop_cmp,
1215 .se_snprintf = hist_entry__snoop_snprintf,
1216 .se_width_idx = HISTC_MEM_SNOOP,
1217};
1218
Don Zickus9b32ba72014-06-01 15:38:29 +02001219struct sort_entry sort_mem_dcacheline = {
1220 .se_header = "Data Cacheline",
1221 .se_cmp = sort__dcacheline_cmp,
1222 .se_snprintf = hist_entry__dcacheline_snprintf,
1223 .se_width_idx = HISTC_MEM_DCACHELINE,
1224};
1225
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001226static int64_t
1227sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1228{
Jiri Olsa49f47442014-10-16 16:07:01 +02001229 if (!left->branch_info || !right->branch_info)
1230 return cmp_null(left->branch_info, right->branch_info);
1231
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001232 return left->branch_info->flags.abort !=
1233 right->branch_info->flags.abort;
1234}
1235
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001236static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001237 size_t size, unsigned int width)
1238{
Jiri Olsa49f47442014-10-16 16:07:01 +02001239 static const char *out = "N/A";
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001240
Jiri Olsa49f47442014-10-16 16:07:01 +02001241 if (he->branch_info) {
1242 if (he->branch_info->flags.abort)
1243 out = "A";
1244 else
1245 out = ".";
1246 }
1247
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001248 return repsep_snprintf(bf, size, "%-*s", width, out);
1249}
1250
1251struct sort_entry sort_abort = {
1252 .se_header = "Transaction abort",
1253 .se_cmp = sort__abort_cmp,
1254 .se_snprintf = hist_entry__abort_snprintf,
1255 .se_width_idx = HISTC_ABORT,
1256};
1257
1258static int64_t
1259sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1260{
Jiri Olsa0199d242014-10-16 16:07:02 +02001261 if (!left->branch_info || !right->branch_info)
1262 return cmp_null(left->branch_info, right->branch_info);
1263
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001264 return left->branch_info->flags.in_tx !=
1265 right->branch_info->flags.in_tx;
1266}
1267
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001268static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001269 size_t size, unsigned int width)
1270{
Jiri Olsa0199d242014-10-16 16:07:02 +02001271 static const char *out = "N/A";
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001272
Jiri Olsa0199d242014-10-16 16:07:02 +02001273 if (he->branch_info) {
1274 if (he->branch_info->flags.in_tx)
1275 out = "T";
1276 else
1277 out = ".";
1278 }
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001279
1280 return repsep_snprintf(bf, size, "%-*s", width, out);
1281}
1282
1283struct sort_entry sort_in_tx = {
1284 .se_header = "Branch in transaction",
1285 .se_cmp = sort__in_tx_cmp,
1286 .se_snprintf = hist_entry__in_tx_snprintf,
1287 .se_width_idx = HISTC_IN_TX,
1288};
1289
Andi Kleen475eeab2013-09-20 07:40:43 -07001290static int64_t
1291sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1292{
1293 return left->transaction - right->transaction;
1294}
1295
1296static inline char *add_str(char *p, const char *str)
1297{
1298 strcpy(p, str);
1299 return p + strlen(str);
1300}
1301
1302static struct txbit {
1303 unsigned flag;
1304 const char *name;
1305 int skip_for_len;
1306} txbits[] = {
1307 { PERF_TXN_ELISION, "EL ", 0 },
1308 { PERF_TXN_TRANSACTION, "TX ", 1 },
1309 { PERF_TXN_SYNC, "SYNC ", 1 },
1310 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1311 { PERF_TXN_RETRY, "RETRY ", 0 },
1312 { PERF_TXN_CONFLICT, "CON ", 0 },
1313 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1314 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1315 { 0, NULL, 0 }
1316};
1317
1318int hist_entry__transaction_len(void)
1319{
1320 int i;
1321 int len = 0;
1322
1323 for (i = 0; txbits[i].name; i++) {
1324 if (!txbits[i].skip_for_len)
1325 len += strlen(txbits[i].name);
1326 }
1327 len += 4; /* :XX<space> */
1328 return len;
1329}
1330
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001331static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
Andi Kleen475eeab2013-09-20 07:40:43 -07001332 size_t size, unsigned int width)
1333{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001334 u64 t = he->transaction;
Andi Kleen475eeab2013-09-20 07:40:43 -07001335 char buf[128];
1336 char *p = buf;
1337 int i;
1338
1339 buf[0] = 0;
1340 for (i = 0; txbits[i].name; i++)
1341 if (txbits[i].flag & t)
1342 p = add_str(p, txbits[i].name);
1343 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1344 p = add_str(p, "NEITHER ");
1345 if (t & PERF_TXN_ABORT_MASK) {
1346 sprintf(p, ":%" PRIx64,
1347 (t & PERF_TXN_ABORT_MASK) >>
1348 PERF_TXN_ABORT_SHIFT);
1349 p += strlen(p);
1350 }
1351
1352 return repsep_snprintf(bf, size, "%-*s", width, buf);
1353}
1354
1355struct sort_entry sort_transaction = {
1356 .se_header = "Transaction ",
1357 .se_cmp = sort__transaction_cmp,
1358 .se_snprintf = hist_entry__transaction_snprintf,
1359 .se_width_idx = HISTC_TRANSACTION,
1360};
1361
Frederic Weisbecker872a8782011-06-29 03:14:52 +02001362struct sort_dimension {
1363 const char *name;
1364 struct sort_entry *entry;
1365 int taken;
1366};
1367
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001368#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1369
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001370static struct sort_dimension common_sort_dimensions[] = {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001371 DIM(SORT_PID, "pid", sort_thread),
1372 DIM(SORT_COMM, "comm", sort_comm),
1373 DIM(SORT_DSO, "dso", sort_dso),
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001374 DIM(SORT_SYM, "symbol", sort_sym),
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001375 DIM(SORT_PARENT, "parent", sort_parent),
1376 DIM(SORT_CPU, "cpu", sort_cpu),
Kan Liang2e7ea3a2015-09-04 10:45:43 -04001377 DIM(SORT_SOCKET, "socket", sort_socket),
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -03001378 DIM(SORT_SRCLINE, "srcline", sort_srcline),
Andi Kleen31191a82015-08-07 15:54:24 -07001379 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
Andi Kleenf9ea55d2013-07-18 15:58:53 -07001380 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1381 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
Andi Kleen475eeab2013-09-20 07:40:43 -07001382 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
Namhyung Kima34bb6a02015-12-23 02:07:04 +09001383 DIM(SORT_TRACE, "trace", sort_trace),
Frederic Weisbecker872a8782011-06-29 03:14:52 +02001384};
1385
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001386#undef DIM
1387
1388#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1389
1390static struct sort_dimension bstack_sort_dimensions[] = {
1391 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1392 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1393 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1394 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1395 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001396 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1397 DIM(SORT_ABORT, "abort", sort_abort),
Andi Kleen0e332f02015-07-18 08:24:46 -07001398 DIM(SORT_CYCLES, "cycles", sort_cycles),
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001399};
1400
1401#undef DIM
1402
Namhyung Kimafab87b2013-04-03 21:26:11 +09001403#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1404
1405static struct sort_dimension memory_sort_dimensions[] = {
Namhyung Kimafab87b2013-04-03 21:26:11 +09001406 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
Don Zickus28e6db22015-10-05 20:06:07 +02001407 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
Namhyung Kimafab87b2013-04-03 21:26:11 +09001408 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1409 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1410 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1411 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1412 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
Don Zickus9b32ba72014-06-01 15:38:29 +02001413 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
Namhyung Kimafab87b2013-04-03 21:26:11 +09001414};
1415
1416#undef DIM
1417
Namhyung Kima2ce0672014-03-04 09:06:42 +09001418struct hpp_dimension {
1419 const char *name;
1420 struct perf_hpp_fmt *fmt;
1421 int taken;
1422};
1423
1424#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1425
1426static struct hpp_dimension hpp_sort_dimensions[] = {
1427 DIM(PERF_HPP__OVERHEAD, "overhead"),
1428 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1429 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1430 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1431 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
Namhyung Kim594dcbf2013-10-30 16:06:59 +09001432 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
Namhyung Kima2ce0672014-03-04 09:06:42 +09001433 DIM(PERF_HPP__SAMPLES, "sample"),
1434 DIM(PERF_HPP__PERIOD, "period"),
1435};
1436
1437#undef DIM
1438
Namhyung Kim8b536992014-03-03 11:46:55 +09001439struct hpp_sort_entry {
1440 struct perf_hpp_fmt hpp;
1441 struct sort_entry *se;
1442};
1443
Namhyung Kime0d66c72014-07-31 14:47:37 +09001444void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
Namhyung Kim678a5002014-03-20 11:18:54 +09001445{
1446 struct hpp_sort_entry *hse;
1447
1448 if (!perf_hpp__is_sort_entry(fmt))
1449 return;
1450
1451 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001452 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
Namhyung Kim678a5002014-03-20 11:18:54 +09001453}
1454
Namhyung Kim8b536992014-03-03 11:46:55 +09001455static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1456 struct perf_evsel *evsel)
1457{
1458 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001459 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001460
1461 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim8b536992014-03-03 11:46:55 +09001462
Namhyung Kim5b591662014-07-31 14:47:38 +09001463 if (!len)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -03001464 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
Namhyung Kim5b591662014-07-31 14:47:38 +09001465
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001466 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
Namhyung Kim8b536992014-03-03 11:46:55 +09001467}
1468
1469static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1470 struct perf_hpp *hpp __maybe_unused,
1471 struct perf_evsel *evsel)
1472{
1473 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001474 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001475
1476 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1477
Namhyung Kim5b591662014-07-31 14:47:38 +09001478 if (!len)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -03001479 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
Namhyung Kim5b591662014-07-31 14:47:38 +09001480
1481 return len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001482}
1483
1484static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1485 struct hist_entry *he)
1486{
1487 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001488 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001489
1490 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim5b591662014-07-31 14:47:38 +09001491
1492 if (!len)
1493 len = hists__col_len(he->hists, hse->se->se_width_idx);
Namhyung Kim8b536992014-03-03 11:46:55 +09001494
1495 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1496}
1497
Namhyung Kim87bbdf72015-01-08 09:45:46 +09001498static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1499 struct hist_entry *a, struct hist_entry *b)
1500{
1501 struct hpp_sort_entry *hse;
1502
1503 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1504 return hse->se->se_cmp(a, b);
1505}
1506
1507static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1508 struct hist_entry *a, struct hist_entry *b)
1509{
1510 struct hpp_sort_entry *hse;
1511 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1512
1513 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1514 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1515 return collapse_fn(a, b);
1516}
1517
1518static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1519 struct hist_entry *a, struct hist_entry *b)
1520{
1521 struct hpp_sort_entry *hse;
1522 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1523
1524 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1525 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1526 return sort_fn(a, b);
1527}
1528
Jiri Olsa97358082016-01-18 10:24:03 +01001529bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1530{
1531 return format->header == __sort__hpp_header;
1532}
1533
1534static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1535{
1536 struct hpp_sort_entry *hse_a;
1537 struct hpp_sort_entry *hse_b;
1538
1539 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1540 return false;
1541
1542 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1543 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1544
1545 return hse_a->se == hse_b->se;
1546}
1547
Jiri Olsa564132f2016-01-18 10:24:09 +01001548static void hse_free(struct perf_hpp_fmt *fmt)
1549{
1550 struct hpp_sort_entry *hse;
1551
1552 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1553 free(hse);
1554}
1555
Namhyung Kima7d945b2014-03-04 10:46:34 +09001556static struct hpp_sort_entry *
1557__sort_dimension__alloc_hpp(struct sort_dimension *sd)
Namhyung Kim8b536992014-03-03 11:46:55 +09001558{
1559 struct hpp_sort_entry *hse;
1560
1561 hse = malloc(sizeof(*hse));
1562 if (hse == NULL) {
1563 pr_err("Memory allocation failed\n");
Namhyung Kima7d945b2014-03-04 10:46:34 +09001564 return NULL;
Namhyung Kim8b536992014-03-03 11:46:55 +09001565 }
1566
1567 hse->se = sd->entry;
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001568 hse->hpp.name = sd->entry->se_header;
Namhyung Kim8b536992014-03-03 11:46:55 +09001569 hse->hpp.header = __sort__hpp_header;
1570 hse->hpp.width = __sort__hpp_width;
1571 hse->hpp.entry = __sort__hpp_entry;
1572 hse->hpp.color = NULL;
1573
Namhyung Kim87bbdf72015-01-08 09:45:46 +09001574 hse->hpp.cmp = __sort__hpp_cmp;
1575 hse->hpp.collapse = __sort__hpp_collapse;
1576 hse->hpp.sort = __sort__hpp_sort;
Jiri Olsa97358082016-01-18 10:24:03 +01001577 hse->hpp.equal = __sort__hpp_equal;
Jiri Olsa564132f2016-01-18 10:24:09 +01001578 hse->hpp.free = hse_free;
Namhyung Kim8b536992014-03-03 11:46:55 +09001579
1580 INIT_LIST_HEAD(&hse->hpp.list);
1581 INIT_LIST_HEAD(&hse->hpp.sort_list);
Jiri Olsaf2998422014-05-23 17:15:47 +02001582 hse->hpp.elide = false;
Namhyung Kime0d66c72014-07-31 14:47:37 +09001583 hse->hpp.len = 0;
Namhyung Kim5b591662014-07-31 14:47:38 +09001584 hse->hpp.user_len = 0;
Namhyung Kim8b536992014-03-03 11:46:55 +09001585
Namhyung Kima7d945b2014-03-04 10:46:34 +09001586 return hse;
1587}
1588
Jiri Olsa564132f2016-01-18 10:24:09 +01001589static void hpp_free(struct perf_hpp_fmt *fmt)
1590{
1591 free(fmt);
1592}
1593
Jiri Olsa1945c3e2016-01-18 10:24:07 +01001594static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
1595{
1596 struct perf_hpp_fmt *fmt;
1597
1598 fmt = memdup(hd->fmt, sizeof(*fmt));
1599 if (fmt) {
1600 INIT_LIST_HEAD(&fmt->list);
1601 INIT_LIST_HEAD(&fmt->sort_list);
Jiri Olsa564132f2016-01-18 10:24:09 +01001602 fmt->free = hpp_free;
Jiri Olsa1945c3e2016-01-18 10:24:07 +01001603 }
1604
1605 return fmt;
1606}
1607
Namhyung Kima7d945b2014-03-04 10:46:34 +09001608static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1609{
1610 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1611
1612 if (hse == NULL)
1613 return -1;
1614
Namhyung Kim8b536992014-03-03 11:46:55 +09001615 perf_hpp__register_sort_field(&hse->hpp);
1616 return 0;
1617}
1618
Namhyung Kima7d945b2014-03-04 10:46:34 +09001619static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1620{
1621 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1622
1623 if (hse == NULL)
1624 return -1;
1625
1626 perf_hpp__column_register(&hse->hpp);
1627 return 0;
1628}
1629
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001630struct hpp_dynamic_entry {
1631 struct perf_hpp_fmt hpp;
1632 struct perf_evsel *evsel;
1633 struct format_field *field;
1634 unsigned dynamic_len;
Namhyung Kim053a3982015-12-23 02:07:05 +09001635 bool raw_trace;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001636};
1637
1638static int hde_width(struct hpp_dynamic_entry *hde)
1639{
1640 if (!hde->hpp.len) {
1641 int len = hde->dynamic_len;
1642 int namelen = strlen(hde->field->name);
1643 int fieldlen = hde->field->size;
1644
1645 if (namelen > len)
1646 len = namelen;
1647
1648 if (!(hde->field->flags & FIELD_IS_STRING)) {
1649 /* length for print hex numbers */
1650 fieldlen = hde->field->size * 2 + 2;
1651 }
1652 if (fieldlen > len)
1653 len = fieldlen;
1654
1655 hde->hpp.len = len;
1656 }
1657 return hde->hpp.len;
1658}
1659
Namhyung Kim60517d22015-12-23 02:07:03 +09001660static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1661 struct hist_entry *he)
1662{
1663 char *str, *pos;
1664 struct format_field *field = hde->field;
1665 size_t namelen;
1666 bool last = false;
1667
Namhyung Kim053a3982015-12-23 02:07:05 +09001668 if (hde->raw_trace)
1669 return;
1670
Namhyung Kim60517d22015-12-23 02:07:03 +09001671 /* parse pretty print result and update max length */
1672 if (!he->trace_output)
1673 he->trace_output = get_trace_output(he);
1674
1675 namelen = strlen(field->name);
1676 str = he->trace_output;
1677
1678 while (str) {
1679 pos = strchr(str, ' ');
1680 if (pos == NULL) {
1681 last = true;
1682 pos = str + strlen(str);
1683 }
1684
1685 if (!strncmp(str, field->name, namelen)) {
1686 size_t len;
1687
1688 str += namelen + 1;
1689 len = pos - str;
1690
1691 if (len > hde->dynamic_len)
1692 hde->dynamic_len = len;
1693 break;
1694 }
1695
1696 if (last)
1697 str = NULL;
1698 else
1699 str = pos + 1;
1700 }
1701}
1702
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001703static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1704 struct perf_evsel *evsel __maybe_unused)
1705{
1706 struct hpp_dynamic_entry *hde;
1707 size_t len = fmt->user_len;
1708
1709 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1710
1711 if (!len)
1712 len = hde_width(hde);
1713
1714 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1715}
1716
1717static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1718 struct perf_hpp *hpp __maybe_unused,
1719 struct perf_evsel *evsel __maybe_unused)
1720{
1721 struct hpp_dynamic_entry *hde;
1722 size_t len = fmt->user_len;
1723
1724 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1725
1726 if (!len)
1727 len = hde_width(hde);
1728
1729 return len;
1730}
1731
Namhyung Kim361459f2015-12-23 02:07:08 +09001732bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1733{
1734 struct hpp_dynamic_entry *hde;
1735
1736 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1737
1738 return hists_to_evsel(hists) == hde->evsel;
1739}
1740
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001741static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1742 struct hist_entry *he)
1743{
1744 struct hpp_dynamic_entry *hde;
1745 size_t len = fmt->user_len;
Namhyung Kim60517d22015-12-23 02:07:03 +09001746 char *str, *pos;
1747 struct format_field *field;
1748 size_t namelen;
1749 bool last = false;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001750 int ret;
1751
1752 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1753
1754 if (!len)
1755 len = hde_width(hde);
1756
Namhyung Kim053a3982015-12-23 02:07:05 +09001757 if (hde->raw_trace)
1758 goto raw_field;
Namhyung Kim60517d22015-12-23 02:07:03 +09001759
Namhyung Kim053a3982015-12-23 02:07:05 +09001760 field = hde->field;
Namhyung Kim60517d22015-12-23 02:07:03 +09001761 namelen = strlen(field->name);
1762 str = he->trace_output;
1763
1764 while (str) {
1765 pos = strchr(str, ' ');
1766 if (pos == NULL) {
1767 last = true;
1768 pos = str + strlen(str);
1769 }
1770
1771 if (!strncmp(str, field->name, namelen)) {
1772 str += namelen + 1;
1773 str = strndup(str, pos - str);
1774
1775 if (str == NULL)
1776 return scnprintf(hpp->buf, hpp->size,
1777 "%*.*s", len, len, "ERROR");
1778 break;
1779 }
1780
1781 if (last)
1782 str = NULL;
1783 else
1784 str = pos + 1;
1785 }
1786
1787 if (str == NULL) {
1788 struct trace_seq seq;
Namhyung Kim053a3982015-12-23 02:07:05 +09001789raw_field:
Namhyung Kim60517d22015-12-23 02:07:03 +09001790 trace_seq_init(&seq);
1791 pevent_print_field(&seq, he->raw_data, hde->field);
1792 str = seq.buffer;
1793 }
1794
1795 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1796 free(str);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001797 return ret;
1798}
1799
1800static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1801 struct hist_entry *a, struct hist_entry *b)
1802{
1803 struct hpp_dynamic_entry *hde;
1804 struct format_field *field;
1805 unsigned offset, size;
1806
1807 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1808
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001809 field = hde->field;
1810 if (field->flags & FIELD_IS_DYNAMIC) {
1811 unsigned long long dyn;
1812
1813 pevent_read_number_field(field, a->raw_data, &dyn);
1814 offset = dyn & 0xffff;
1815 size = (dyn >> 16) & 0xffff;
1816
1817 /* record max width for output */
1818 if (size > hde->dynamic_len)
1819 hde->dynamic_len = size;
1820 } else {
1821 offset = field->offset;
1822 size = field->size;
Namhyung Kim60517d22015-12-23 02:07:03 +09001823
1824 update_dynamic_len(hde, a);
1825 update_dynamic_len(hde, b);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001826 }
1827
1828 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1829}
1830
Namhyung Kim361459f2015-12-23 02:07:08 +09001831bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1832{
1833 return fmt->cmp == __sort__hde_cmp;
1834}
1835
Jiri Olsa564132f2016-01-18 10:24:09 +01001836static void hde_free(struct perf_hpp_fmt *fmt)
1837{
1838 struct hpp_dynamic_entry *hde;
1839
1840 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1841 free(hde);
1842}
1843
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001844static struct hpp_dynamic_entry *
1845__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1846{
1847 struct hpp_dynamic_entry *hde;
1848
1849 hde = malloc(sizeof(*hde));
1850 if (hde == NULL) {
1851 pr_debug("Memory allocation failed\n");
1852 return NULL;
1853 }
1854
1855 hde->evsel = evsel;
1856 hde->field = field;
1857 hde->dynamic_len = 0;
1858
1859 hde->hpp.name = field->name;
1860 hde->hpp.header = __sort__hde_header;
1861 hde->hpp.width = __sort__hde_width;
1862 hde->hpp.entry = __sort__hde_entry;
1863 hde->hpp.color = NULL;
1864
1865 hde->hpp.cmp = __sort__hde_cmp;
1866 hde->hpp.collapse = __sort__hde_cmp;
1867 hde->hpp.sort = __sort__hde_cmp;
Jiri Olsa564132f2016-01-18 10:24:09 +01001868 hde->hpp.free = hde_free;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001869
1870 INIT_LIST_HEAD(&hde->hpp.list);
1871 INIT_LIST_HEAD(&hde->hpp.sort_list);
1872 hde->hpp.elide = false;
1873 hde->hpp.len = 0;
1874 hde->hpp.user_len = 0;
1875
1876 return hde;
1877}
1878
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001879static int parse_field_name(char *str, char **event, char **field, char **opt)
1880{
1881 char *event_name, *field_name, *opt_name;
1882
1883 event_name = str;
1884 field_name = strchr(str, '.');
1885
1886 if (field_name) {
1887 *field_name++ = '\0';
1888 } else {
1889 event_name = NULL;
1890 field_name = str;
1891 }
1892
1893 opt_name = strchr(field_name, '/');
1894 if (opt_name)
1895 *opt_name++ = '\0';
1896
1897 *event = event_name;
1898 *field = field_name;
1899 *opt = opt_name;
1900
1901 return 0;
1902}
1903
1904/* find match evsel using a given event name. The event name can be:
Namhyung Kim9735be22016-01-05 19:58:35 +09001905 * 1. '%' + event index (e.g. '%1' for first event)
1906 * 2. full event name (e.g. sched:sched_switch)
1907 * 3. partial event name (should not contain ':')
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001908 */
1909static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1910{
1911 struct perf_evsel *evsel = NULL;
1912 struct perf_evsel *pos;
1913 bool full_name;
1914
1915 /* case 1 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001916 if (event_name[0] == '%') {
1917 int nr = strtol(event_name+1, NULL, 0);
1918
1919 if (nr > evlist->nr_entries)
1920 return NULL;
1921
1922 evsel = perf_evlist__first(evlist);
1923 while (--nr > 0)
1924 evsel = perf_evsel__next(evsel);
1925
1926 return evsel;
1927 }
1928
1929 full_name = !!strchr(event_name, ':');
1930 evlist__for_each(evlist, pos) {
Namhyung Kim9735be22016-01-05 19:58:35 +09001931 /* case 2 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001932 if (full_name && !strcmp(pos->name, event_name))
1933 return pos;
Namhyung Kim9735be22016-01-05 19:58:35 +09001934 /* case 3 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001935 if (!full_name && strstr(pos->name, event_name)) {
1936 if (evsel) {
1937 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1938 event_name, evsel->name, pos->name);
1939 return NULL;
1940 }
1941 evsel = pos;
1942 }
1943 }
1944
1945 return evsel;
1946}
1947
Namhyung Kim3b099bf52015-12-23 02:07:07 +09001948static int __dynamic_dimension__add(struct perf_evsel *evsel,
1949 struct format_field *field,
1950 bool raw_trace)
1951{
1952 struct hpp_dynamic_entry *hde;
1953
1954 hde = __alloc_dynamic_entry(evsel, field);
1955 if (hde == NULL)
1956 return -ENOMEM;
1957
1958 hde->raw_trace = raw_trace;
1959
1960 perf_hpp__register_sort_field(&hde->hpp);
1961 return 0;
1962}
1963
Namhyung Kim2e422fd2015-12-23 02:07:09 +09001964static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1965{
1966 int ret;
1967 struct format_field *field;
1968
1969 field = evsel->tp_format->format.fields;
1970 while (field) {
1971 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1972 if (ret < 0)
1973 return ret;
1974
1975 field = field->next;
1976 }
1977 return 0;
1978}
1979
1980static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1981{
1982 int ret;
1983 struct perf_evsel *evsel;
1984
1985 evlist__for_each(evlist, evsel) {
1986 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1987 continue;
1988
1989 ret = add_evsel_fields(evsel, raw_trace);
1990 if (ret < 0)
1991 return ret;
1992 }
1993 return 0;
1994}
1995
Namhyung Kim9735be22016-01-05 19:58:35 +09001996static int add_all_matching_fields(struct perf_evlist *evlist,
1997 char *field_name, bool raw_trace)
1998{
1999 int ret = -ESRCH;
2000 struct perf_evsel *evsel;
2001 struct format_field *field;
2002
2003 evlist__for_each(evlist, evsel) {
2004 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
2005 continue;
2006
2007 field = pevent_find_any_field(evsel->tp_format, field_name);
2008 if (field == NULL)
2009 continue;
2010
2011 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2012 if (ret < 0)
2013 break;
2014 }
2015 return ret;
2016}
2017
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002018static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
2019{
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002020 char *str, *event_name, *field_name, *opt_name;
2021 struct perf_evsel *evsel;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002022 struct format_field *field;
Namhyung Kim053a3982015-12-23 02:07:05 +09002023 bool raw_trace = symbol_conf.raw_trace;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002024 int ret = 0;
2025
2026 if (evlist == NULL)
2027 return -ENOENT;
2028
2029 str = strdup(tok);
2030 if (str == NULL)
2031 return -ENOMEM;
2032
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002033 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002034 ret = -EINVAL;
2035 goto out;
2036 }
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002037
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002038 if (opt_name) {
2039 if (strcmp(opt_name, "raw")) {
2040 pr_debug("unsupported field option %s\n", opt_name);
Namhyung Kim053a3982015-12-23 02:07:05 +09002041 ret = -EINVAL;
2042 goto out;
2043 }
2044 raw_trace = true;
2045 }
2046
Namhyung Kim2e422fd2015-12-23 02:07:09 +09002047 if (!strcmp(field_name, "trace_fields")) {
2048 ret = add_all_dynamic_fields(evlist, raw_trace);
2049 goto out;
2050 }
2051
Namhyung Kim9735be22016-01-05 19:58:35 +09002052 if (event_name == NULL) {
2053 ret = add_all_matching_fields(evlist, field_name, raw_trace);
2054 goto out;
2055 }
2056
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002057 evsel = find_evsel(evlist, event_name);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002058 if (evsel == NULL) {
2059 pr_debug("Cannot find event: %s\n", event_name);
2060 ret = -ENOENT;
2061 goto out;
2062 }
2063
2064 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2065 pr_debug("%s is not a tracepoint event\n", event_name);
2066 ret = -EINVAL;
2067 goto out;
2068 }
2069
Namhyung Kim3b099bf52015-12-23 02:07:07 +09002070 if (!strcmp(field_name, "*")) {
Namhyung Kim2e422fd2015-12-23 02:07:09 +09002071 ret = add_evsel_fields(evsel, raw_trace);
Namhyung Kim3b099bf52015-12-23 02:07:07 +09002072 } else {
2073 field = pevent_find_any_field(evsel->tp_format, field_name);
2074 if (field == NULL) {
2075 pr_debug("Cannot find event field for %s.%s\n",
2076 event_name, field_name);
2077 return -ENOENT;
2078 }
2079
2080 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2081 }
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002082
2083out:
2084 free(str);
2085 return ret;
2086}
2087
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002088static int __sort_dimension__add(struct sort_dimension *sd)
Namhyung Kim2f532d02013-04-03 21:26:10 +09002089{
2090 if (sd->taken)
Namhyung Kim8b536992014-03-03 11:46:55 +09002091 return 0;
2092
Namhyung Kima7d945b2014-03-04 10:46:34 +09002093 if (__sort_dimension__add_hpp_sort(sd) < 0)
Namhyung Kim8b536992014-03-03 11:46:55 +09002094 return -1;
Namhyung Kim2f532d02013-04-03 21:26:10 +09002095
2096 if (sd->entry->se_collapse)
2097 sort__need_collapse = 1;
2098
Namhyung Kim2f532d02013-04-03 21:26:10 +09002099 sd->taken = 1;
Namhyung Kim8b536992014-03-03 11:46:55 +09002100
2101 return 0;
Namhyung Kim2f532d02013-04-03 21:26:10 +09002102}
2103
Namhyung Kima2ce0672014-03-04 09:06:42 +09002104static int __hpp_dimension__add(struct hpp_dimension *hd)
2105{
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002106 struct perf_hpp_fmt *fmt;
Namhyung Kima2ce0672014-03-04 09:06:42 +09002107
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002108 if (hd->taken)
2109 return 0;
2110
2111 fmt = __hpp_dimension__alloc_hpp(hd);
2112 if (!fmt)
2113 return -1;
2114
2115 hd->taken = 1;
2116 perf_hpp__register_sort_field(fmt);
Namhyung Kima2ce0672014-03-04 09:06:42 +09002117 return 0;
2118}
2119
Namhyung Kima7d945b2014-03-04 10:46:34 +09002120static int __sort_dimension__add_output(struct sort_dimension *sd)
2121{
2122 if (sd->taken)
2123 return 0;
2124
2125 if (__sort_dimension__add_hpp_output(sd) < 0)
2126 return -1;
2127
2128 sd->taken = 1;
2129 return 0;
2130}
2131
2132static int __hpp_dimension__add_output(struct hpp_dimension *hd)
2133{
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002134 struct perf_hpp_fmt *fmt;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002135
Jiri Olsa1945c3e2016-01-18 10:24:07 +01002136 if (hd->taken)
2137 return 0;
2138
2139 fmt = __hpp_dimension__alloc_hpp(hd);
2140 if (!fmt)
2141 return -1;
2142
2143 hd->taken = 1;
2144 perf_hpp__column_register(fmt);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002145 return 0;
2146}
2147
Jiri Olsabeeaaeb2015-10-06 14:25:11 +02002148int hpp_dimension__add_output(unsigned col)
2149{
2150 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2151 return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
2152}
2153
Namhyung Kim40184c42015-12-23 02:07:01 +09002154static int sort_dimension__add(const char *tok,
2155 struct perf_evlist *evlist __maybe_unused)
John Kacurdd68ada2009-09-24 18:02:49 +02002156{
2157 unsigned int i;
2158
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002159 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2160 struct sort_dimension *sd = &common_sort_dimensions[i];
John Kacurdd68ada2009-09-24 18:02:49 +02002161
John Kacurdd68ada2009-09-24 18:02:49 +02002162 if (strncasecmp(tok, sd->name, strlen(tok)))
2163 continue;
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002164
John Kacurdd68ada2009-09-24 18:02:49 +02002165 if (sd->entry == &sort_parent) {
2166 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2167 if (ret) {
2168 char err[BUFSIZ];
2169
2170 regerror(ret, &parent_regex, err, sizeof(err));
Arnaldo Carvalho de Melo2aefa4f2010-04-02 12:30:57 -03002171 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2172 return -EINVAL;
John Kacurdd68ada2009-09-24 18:02:49 +02002173 }
2174 sort__has_parent = 1;
Namhyung Kim930477b2013-04-05 10:26:36 +09002175 } else if (sd->entry == &sort_sym) {
Namhyung Kim1af556402012-09-14 17:35:27 +09002176 sort__has_sym = 1;
Kan Liang94ba4622015-02-09 05:39:44 +00002177 /*
2178 * perf diff displays the performance difference amongst
2179 * two or more perf.data files. Those files could come
2180 * from different binaries. So we should not compare
2181 * their ips, but the name of symbol.
2182 */
2183 if (sort__mode == SORT_MODE__DIFF)
2184 sd->entry->se_collapse = sort__sym_sort;
2185
Namhyung Kim68f6d022013-12-18 14:21:10 +09002186 } else if (sd->entry == &sort_dso) {
2187 sort__has_dso = 1;
Kan Liang2e7ea3a2015-09-04 10:45:43 -04002188 } else if (sd->entry == &sort_socket) {
2189 sort__has_socket = 1;
Namhyung Kimcfd92da2016-01-21 19:13:24 -03002190 } else if (sd->entry == &sort_thread) {
2191 sort__has_thread = 1;
John Kacurdd68ada2009-09-24 18:02:49 +02002192 }
2193
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002194 return __sort_dimension__add(sd);
John Kacurdd68ada2009-09-24 18:02:49 +02002195 }
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002196
Namhyung Kima2ce0672014-03-04 09:06:42 +09002197 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2198 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2199
2200 if (strncasecmp(tok, hd->name, strlen(tok)))
2201 continue;
2202
2203 return __hpp_dimension__add(hd);
2204 }
2205
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002206 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2207 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2208
2209 if (strncasecmp(tok, sd->name, strlen(tok)))
2210 continue;
2211
Namhyung Kim55369fc2013-04-01 20:35:20 +09002212 if (sort__mode != SORT_MODE__BRANCH)
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002213 return -EINVAL;
2214
2215 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2216 sort__has_sym = 1;
2217
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002218 __sort_dimension__add(sd);
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002219 return 0;
2220 }
2221
Namhyung Kimafab87b2013-04-03 21:26:11 +09002222 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2223 struct sort_dimension *sd = &memory_sort_dimensions[i];
2224
2225 if (strncasecmp(tok, sd->name, strlen(tok)))
2226 continue;
2227
2228 if (sort__mode != SORT_MODE__MEMORY)
2229 return -EINVAL;
2230
2231 if (sd->entry == &sort_mem_daddr_sym)
2232 sort__has_sym = 1;
2233
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002234 __sort_dimension__add(sd);
Namhyung Kimafab87b2013-04-03 21:26:11 +09002235 return 0;
2236 }
2237
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002238 if (!add_dynamic_entry(evlist, tok))
2239 return 0;
2240
John Kacurdd68ada2009-09-24 18:02:49 +02002241 return -ESRCH;
2242}
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002243
Jiri Olsa2fbaa392016-01-18 10:24:10 +01002244static int setup_sort_list(char *str, struct perf_evlist *evlist)
2245{
2246 char *tmp, *tok;
2247 int ret = 0;
2248
2249 for (tok = strtok_r(str, ", ", &tmp);
2250 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2251 ret = sort_dimension__add(tok, evlist);
2252 if (ret == -EINVAL) {
2253 error("Invalid --sort key: `%s'", tok);
2254 break;
2255 } else if (ret == -ESRCH) {
2256 error("Unknown --sort key: `%s'", tok);
2257 break;
2258 }
2259 }
2260
2261 return ret;
2262}
2263
Namhyung Kimd49dade2015-12-23 02:07:10 +09002264static const char *get_default_sort_order(struct perf_evlist *evlist)
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002265{
2266 const char *default_sort_orders[] = {
2267 default_sort_order,
2268 default_branch_sort_order,
2269 default_mem_sort_order,
2270 default_top_sort_order,
2271 default_diff_sort_order,
Namhyung Kimd49dade2015-12-23 02:07:10 +09002272 default_tracepoint_sort_order,
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002273 };
Namhyung Kimd49dade2015-12-23 02:07:10 +09002274 bool use_trace = true;
2275 struct perf_evsel *evsel;
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002276
2277 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2278
Namhyung Kimd49dade2015-12-23 02:07:10 +09002279 if (evlist == NULL)
2280 goto out_no_evlist;
2281
2282 evlist__for_each(evlist, evsel) {
2283 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2284 use_trace = false;
2285 break;
2286 }
2287 }
2288
2289 if (use_trace) {
2290 sort__mode = SORT_MODE__TRACEPOINT;
2291 if (symbol_conf.raw_trace)
2292 return "trace_fields";
2293 }
2294out_no_evlist:
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002295 return default_sort_orders[sort__mode];
2296}
2297
Namhyung Kimd49dade2015-12-23 02:07:10 +09002298static int setup_sort_order(struct perf_evlist *evlist)
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002299{
2300 char *new_sort_order;
2301
2302 /*
2303 * Append '+'-prefixed sort order to the default sort
2304 * order string.
2305 */
2306 if (!sort_order || is_strict_order(sort_order))
2307 return 0;
2308
2309 if (sort_order[1] == '\0') {
2310 error("Invalid --sort key: `+'");
2311 return -EINVAL;
2312 }
2313
2314 /*
2315 * We allocate new sort_order string, but we never free it,
2316 * because it's checked over the rest of the code.
2317 */
2318 if (asprintf(&new_sort_order, "%s,%s",
Namhyung Kimd49dade2015-12-23 02:07:10 +09002319 get_default_sort_order(evlist), sort_order + 1) < 0) {
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002320 error("Not enough memory to set up --sort");
2321 return -ENOMEM;
2322 }
2323
2324 sort_order = new_sort_order;
2325 return 0;
2326}
2327
Jiri Olsab97511c2016-01-07 10:14:08 +01002328/*
2329 * Adds 'pre,' prefix into 'str' is 'pre' is
2330 * not already part of 'str'.
2331 */
2332static char *prefix_if_not_in(const char *pre, char *str)
2333{
2334 char *n;
2335
2336 if (!str || strstr(str, pre))
2337 return str;
2338
2339 if (asprintf(&n, "%s,%s", pre, str) < 0)
2340 return NULL;
2341
2342 free(str);
2343 return n;
2344}
2345
2346static char *setup_overhead(char *keys)
2347{
2348 keys = prefix_if_not_in("overhead", keys);
2349
2350 if (symbol_conf.cumulate_callchain)
2351 keys = prefix_if_not_in("overhead_children", keys);
2352
2353 return keys;
2354}
2355
Namhyung Kim40184c42015-12-23 02:07:01 +09002356static int __setup_sorting(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002357{
Jiri Olsa2fbaa392016-01-18 10:24:10 +01002358 char *str;
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002359 const char *sort_keys;
Namhyung Kim55309982013-02-06 14:57:16 +09002360 int ret = 0;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002361
Namhyung Kimd49dade2015-12-23 02:07:10 +09002362 ret = setup_sort_order(evlist);
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002363 if (ret)
2364 return ret;
2365
2366 sort_keys = sort_order;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002367 if (sort_keys == NULL) {
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002368 if (is_strict_order(field_order)) {
Namhyung Kima7d945b2014-03-04 10:46:34 +09002369 /*
2370 * If user specified field order but no sort order,
2371 * we'll honor it and not add default sort orders.
2372 */
2373 return 0;
2374 }
2375
Namhyung Kimd49dade2015-12-23 02:07:10 +09002376 sort_keys = get_default_sort_order(evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002377 }
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002378
2379 str = strdup(sort_keys);
Namhyung Kim5936f542013-02-06 14:57:17 +09002380 if (str == NULL) {
2381 error("Not enough memory to setup sort keys");
2382 return -ENOMEM;
2383 }
2384
Jiri Olsab97511c2016-01-07 10:14:08 +01002385 /*
2386 * Prepend overhead fields for backward compatibility.
2387 */
2388 if (!is_strict_order(field_order)) {
2389 str = setup_overhead(str);
2390 if (str == NULL) {
2391 error("Not enough memory to setup overhead keys");
2392 return -ENOMEM;
2393 }
2394 }
2395
Jiri Olsa2fbaa392016-01-18 10:24:10 +01002396 ret = setup_sort_list(str, evlist);
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002397
2398 free(str);
Namhyung Kim55309982013-02-06 14:57:16 +09002399 return ret;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002400}
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002401
Jiri Olsaf2998422014-05-23 17:15:47 +02002402void perf_hpp__set_elide(int idx, bool elide)
Namhyung Kime67d49a2014-03-18 13:00:59 +09002403{
Jiri Olsaf2998422014-05-23 17:15:47 +02002404 struct perf_hpp_fmt *fmt;
2405 struct hpp_sort_entry *hse;
Namhyung Kime67d49a2014-03-18 13:00:59 +09002406
Jiri Olsaf2998422014-05-23 17:15:47 +02002407 perf_hpp__for_each_format(fmt) {
2408 if (!perf_hpp__is_sort_entry(fmt))
2409 continue;
2410
2411 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2412 if (hse->se->se_width_idx == idx) {
2413 fmt->elide = elide;
2414 break;
2415 }
Namhyung Kime67d49a2014-03-18 13:00:59 +09002416 }
Namhyung Kime67d49a2014-03-18 13:00:59 +09002417}
2418
Jiri Olsaf2998422014-05-23 17:15:47 +02002419static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002420{
2421 if (list && strlist__nr_entries(list) == 1) {
2422 if (fp != NULL)
2423 fprintf(fp, "# %s: %s\n", list_name,
2424 strlist__entry(list, 0)->s);
Jiri Olsaf2998422014-05-23 17:15:47 +02002425 return true;
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002426 }
Jiri Olsaf2998422014-05-23 17:15:47 +02002427 return false;
2428}
2429
2430static bool get_elide(int idx, FILE *output)
2431{
2432 switch (idx) {
2433 case HISTC_SYMBOL:
2434 return __get_elide(symbol_conf.sym_list, "symbol", output);
2435 case HISTC_DSO:
2436 return __get_elide(symbol_conf.dso_list, "dso", output);
2437 case HISTC_COMM:
2438 return __get_elide(symbol_conf.comm_list, "comm", output);
2439 default:
2440 break;
2441 }
2442
2443 if (sort__mode != SORT_MODE__BRANCH)
2444 return false;
2445
2446 switch (idx) {
2447 case HISTC_SYMBOL_FROM:
2448 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2449 case HISTC_SYMBOL_TO:
2450 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2451 case HISTC_DSO_FROM:
2452 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2453 case HISTC_DSO_TO:
2454 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2455 default:
2456 break;
2457 }
2458
2459 return false;
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002460}
Namhyung Kim08e71542013-04-03 21:26:19 +09002461
2462void sort__setup_elide(FILE *output)
2463{
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002464 struct perf_hpp_fmt *fmt;
2465 struct hpp_sort_entry *hse;
Namhyung Kim7524f632013-11-08 17:53:42 +09002466
Jiri Olsaf2998422014-05-23 17:15:47 +02002467 perf_hpp__for_each_format(fmt) {
2468 if (!perf_hpp__is_sort_entry(fmt))
2469 continue;
Namhyung Kim08e71542013-04-03 21:26:19 +09002470
Jiri Olsaf2998422014-05-23 17:15:47 +02002471 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2472 fmt->elide = get_elide(hse->se->se_width_idx, output);
Namhyung Kim08e71542013-04-03 21:26:19 +09002473 }
2474
Namhyung Kim7524f632013-11-08 17:53:42 +09002475 /*
2476 * It makes no sense to elide all of sort entries.
2477 * Just revert them to show up again.
2478 */
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002479 perf_hpp__for_each_format(fmt) {
2480 if (!perf_hpp__is_sort_entry(fmt))
2481 continue;
2482
Jiri Olsaf2998422014-05-23 17:15:47 +02002483 if (!fmt->elide)
Namhyung Kim7524f632013-11-08 17:53:42 +09002484 return;
2485 }
2486
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002487 perf_hpp__for_each_format(fmt) {
2488 if (!perf_hpp__is_sort_entry(fmt))
2489 continue;
2490
Jiri Olsaf2998422014-05-23 17:15:47 +02002491 fmt->elide = false;
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002492 }
Namhyung Kim08e71542013-04-03 21:26:19 +09002493}
Namhyung Kima7d945b2014-03-04 10:46:34 +09002494
2495static int output_field_add(char *tok)
2496{
2497 unsigned int i;
2498
2499 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2500 struct sort_dimension *sd = &common_sort_dimensions[i];
2501
2502 if (strncasecmp(tok, sd->name, strlen(tok)))
2503 continue;
2504
2505 return __sort_dimension__add_output(sd);
2506 }
2507
2508 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2509 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2510
2511 if (strncasecmp(tok, hd->name, strlen(tok)))
2512 continue;
2513
2514 return __hpp_dimension__add_output(hd);
2515 }
2516
2517 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2518 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2519
2520 if (strncasecmp(tok, sd->name, strlen(tok)))
2521 continue;
2522
2523 return __sort_dimension__add_output(sd);
2524 }
2525
2526 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2527 struct sort_dimension *sd = &memory_sort_dimensions[i];
2528
2529 if (strncasecmp(tok, sd->name, strlen(tok)))
2530 continue;
2531
2532 return __sort_dimension__add_output(sd);
2533 }
2534
2535 return -ESRCH;
2536}
2537
Jiri Olsa6d3375e2016-01-18 10:24:11 +01002538static int setup_output_list(char *str)
2539{
2540 char *tmp, *tok;
2541 int ret = 0;
2542
2543 for (tok = strtok_r(str, ", ", &tmp);
2544 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2545 ret = output_field_add(tok);
2546 if (ret == -EINVAL) {
2547 error("Invalid --fields key: `%s'", tok);
2548 break;
2549 } else if (ret == -ESRCH) {
2550 error("Unknown --fields key: `%s'", tok);
2551 break;
2552 }
2553 }
2554
2555 return ret;
2556}
2557
Namhyung Kima7d945b2014-03-04 10:46:34 +09002558static void reset_dimensions(void)
2559{
2560 unsigned int i;
2561
2562 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2563 common_sort_dimensions[i].taken = 0;
2564
2565 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2566 hpp_sort_dimensions[i].taken = 0;
2567
2568 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2569 bstack_sort_dimensions[i].taken = 0;
2570
2571 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2572 memory_sort_dimensions[i].taken = 0;
2573}
2574
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002575bool is_strict_order(const char *order)
2576{
2577 return order && (*order != '+');
2578}
2579
Namhyung Kima7d945b2014-03-04 10:46:34 +09002580static int __setup_output_field(void)
2581{
Jiri Olsa6d3375e2016-01-18 10:24:11 +01002582 char *str, *strp;
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002583 int ret = -EINVAL;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002584
2585 if (field_order == NULL)
2586 return 0;
2587
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002588 strp = str = strdup(field_order);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002589 if (str == NULL) {
2590 error("Not enough memory to setup output fields");
2591 return -ENOMEM;
2592 }
2593
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002594 if (!is_strict_order(field_order))
2595 strp++;
2596
2597 if (!strlen(strp)) {
2598 error("Invalid --fields key: `+'");
2599 goto out;
2600 }
2601
Jiri Olsa6d3375e2016-01-18 10:24:11 +01002602 ret = setup_output_list(strp);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002603
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002604out:
Namhyung Kima7d945b2014-03-04 10:46:34 +09002605 free(str);
2606 return ret;
2607}
2608
Namhyung Kim40184c42015-12-23 02:07:01 +09002609int setup_sorting(struct perf_evlist *evlist)
Namhyung Kima7d945b2014-03-04 10:46:34 +09002610{
2611 int err;
2612
Namhyung Kim40184c42015-12-23 02:07:01 +09002613 err = __setup_sorting(evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002614 if (err < 0)
2615 return err;
2616
2617 if (parent_pattern != default_parent_pattern) {
Namhyung Kim40184c42015-12-23 02:07:01 +09002618 err = sort_dimension__add("parent", evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002619 if (err < 0)
2620 return err;
2621 }
2622
2623 reset_dimensions();
2624
2625 /*
2626 * perf diff doesn't use default hpp output fields.
2627 */
2628 if (sort__mode != SORT_MODE__DIFF)
2629 perf_hpp__init();
2630
2631 err = __setup_output_field();
2632 if (err < 0)
2633 return err;
2634
2635 /* copy sort keys to output fields */
2636 perf_hpp__setup_output_field();
2637 /* and then copy output fields to sort keys */
2638 perf_hpp__append_sort_keys();
2639
2640 return 0;
2641}
Namhyung Kim1c89fe92014-05-07 18:42:24 +09002642
2643void reset_output_field(void)
2644{
2645 sort__need_collapse = 0;
2646 sort__has_parent = 0;
2647 sort__has_sym = 0;
2648 sort__has_dso = 0;
2649
Namhyung Kimd69b2962014-05-23 10:59:01 +09002650 field_order = NULL;
2651 sort_order = NULL;
2652
Namhyung Kim1c89fe92014-05-07 18:42:24 +09002653 reset_dimensions();
2654 perf_hpp__reset_output_field();
2655}