blob: 898e4b0724bf1a020fb89796b374fe3630848516 [file] [log] [blame]
Don Zickus9b32ba72014-06-01 15:38:29 +02001#include <sys/mman.h>
John Kacurdd68ada2009-09-24 18:02:49 +02002#include "sort.h"
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -03003#include "hist.h"
Namhyung Kim4dfced32013-09-13 16:28:57 +09004#include "comm.h"
Namhyung Kim08e71542013-04-03 21:26:19 +09005#include "symbol.h"
Namhyung Kim8b536992014-03-03 11:46:55 +09006#include "evsel.h"
Namhyung Kim40184c42015-12-23 02:07:01 +09007#include "evlist.h"
8#include <traceevent/event-parse.h>
John Kacurdd68ada2009-09-24 18:02:49 +02009
10regex_t parent_regex;
Arnaldo Carvalho de Meloedb7c602010-05-17 16:22:41 -030011const char default_parent_pattern[] = "^sys_|^do_page_fault";
12const char *parent_pattern = default_parent_pattern;
13const char default_sort_order[] = "comm,dso,symbol";
Andi Kleen40997d62015-07-18 08:24:53 -070014const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
Namhyung Kim512ae1b2014-03-18 11:31:39 +090015const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
16const char default_top_sort_order[] = "dso,symbol";
17const char default_diff_sort_order[] = "dso,symbol";
Namhyung Kimd49dade2015-12-23 02:07:10 +090018const char default_tracepoint_sort_order[] = "trace";
Namhyung Kim512ae1b2014-03-18 11:31:39 +090019const char *sort_order;
Namhyung Kima7d945b2014-03-04 10:46:34 +090020const char *field_order;
Greg Priceb21484f2012-12-06 21:48:05 -080021regex_t ignore_callees_regex;
22int have_ignore_callees = 0;
Frederic Weisbeckeraf0a6fa2009-10-22 23:23:22 +020023int sort__need_collapse = 0;
24int sort__has_parent = 0;
Namhyung Kim1af556402012-09-14 17:35:27 +090025int sort__has_sym = 0;
Namhyung Kim68f6d022013-12-18 14:21:10 +090026int sort__has_dso = 0;
Kan Liang2e7ea3a2015-09-04 10:45:43 -040027int sort__has_socket = 0;
Namhyung Kimcfd92da2016-01-21 19:13:24 -030028int sort__has_thread = 0;
Namhyung Kim55369fc2013-04-01 20:35:20 +090029enum sort_mode sort__mode = SORT_MODE__NORMAL;
Frederic Weisbeckera4fb5812009-10-22 23:23:23 +020030
John Kacurdd68ada2009-09-24 18:02:49 +020031
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030032static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
John Kacurdd68ada2009-09-24 18:02:49 +020033{
34 int n;
35 va_list ap;
36
37 va_start(ap, fmt);
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030038 n = vsnprintf(bf, size, fmt, ap);
Jiri Olsa0ca0c132012-09-06 17:46:56 +020039 if (symbol_conf.field_sep && n > 0) {
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030040 char *sep = bf;
John Kacurdd68ada2009-09-24 18:02:49 +020041
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030042 while (1) {
Jiri Olsa0ca0c132012-09-06 17:46:56 +020043 sep = strchr(sep, *symbol_conf.field_sep);
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030044 if (sep == NULL)
45 break;
46 *sep = '.';
John Kacurdd68ada2009-09-24 18:02:49 +020047 }
John Kacurdd68ada2009-09-24 18:02:49 +020048 }
49 va_end(ap);
Anton Blanchardb8327962012-03-07 11:42:49 +110050
51 if (n >= (int)size)
52 return size - 1;
John Kacurdd68ada2009-09-24 18:02:49 +020053 return n;
54}
55
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +020056static int64_t cmp_null(const void *l, const void *r)
Frederic Weisbecker872a8782011-06-29 03:14:52 +020057{
58 if (!l && !r)
59 return 0;
60 else if (!l)
61 return -1;
62 else
63 return 1;
64}
65
66/* --sort pid */
67
68static int64_t
69sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
70{
Adrian Hunter38051232013-07-04 16:20:31 +030071 return right->thread->tid - left->thread->tid;
Frederic Weisbecker872a8782011-06-29 03:14:52 +020072}
73
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -030074static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -030075 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +020076{
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +020077 const char *comm = thread__comm_str(he->thread);
Namhyung Kim5b591662014-07-31 14:47:38 +090078
79 width = max(7U, width) - 6;
80 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
81 width, width, comm ?: "");
John Kacurdd68ada2009-09-24 18:02:49 +020082}
83
Frederic Weisbecker872a8782011-06-29 03:14:52 +020084struct sort_entry sort_thread = {
Namhyung Kim8246de82014-07-31 14:47:35 +090085 .se_header = " Pid:Command",
Frederic Weisbecker872a8782011-06-29 03:14:52 +020086 .se_cmp = sort__thread_cmp,
87 .se_snprintf = hist_entry__thread_snprintf,
88 .se_width_idx = HISTC_THREAD,
89};
90
91/* --sort comm */
92
93static int64_t
94sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
95{
Frederic Weisbeckerfedd63d2013-09-11 17:18:09 +020096 /* Compare the addr that should be unique among comm */
Jiri Olsa2f15bd82015-05-15 17:54:28 +020097 return strcmp(comm__str(right->comm), comm__str(left->comm));
Frederic Weisbecker872a8782011-06-29 03:14:52 +020098}
99
100static int64_t
101sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
102{
Namhyung Kim4dfced32013-09-13 16:28:57 +0900103 /* Compare the addr that should be unique among comm */
Jiri Olsa2f15bd82015-05-15 17:54:28 +0200104 return strcmp(comm__str(right->comm), comm__str(left->comm));
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200105}
106
Namhyung Kim202e7a62014-03-04 11:01:41 +0900107static int64_t
108sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
109{
110 return strcmp(comm__str(right->comm), comm__str(left->comm));
111}
112
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300113static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -0300114 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +0200115{
Namhyung Kim5b591662014-07-31 14:47:38 +0900116 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
John Kacurdd68ada2009-09-24 18:02:49 +0200117}
118
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900119struct sort_entry sort_comm = {
120 .se_header = "Command",
121 .se_cmp = sort__comm_cmp,
122 .se_collapse = sort__comm_collapse,
Namhyung Kim202e7a62014-03-04 11:01:41 +0900123 .se_sort = sort__comm_sort,
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900124 .se_snprintf = hist_entry__comm_snprintf,
125 .se_width_idx = HISTC_COMM,
126};
127
128/* --sort dso */
129
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100130static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
John Kacurdd68ada2009-09-24 18:02:49 +0200131{
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100132 struct dso *dso_l = map_l ? map_l->dso : NULL;
133 struct dso *dso_r = map_r ? map_r->dso : NULL;
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300134 const char *dso_name_l, *dso_name_r;
John Kacurdd68ada2009-09-24 18:02:49 +0200135
136 if (!dso_l || !dso_r)
Namhyung Kim202e7a62014-03-04 11:01:41 +0900137 return cmp_null(dso_r, dso_l);
John Kacurdd68ada2009-09-24 18:02:49 +0200138
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300139 if (verbose) {
140 dso_name_l = dso_l->long_name;
141 dso_name_r = dso_r->long_name;
142 } else {
143 dso_name_l = dso_l->short_name;
144 dso_name_r = dso_r->short_name;
145 }
146
147 return strcmp(dso_name_l, dso_name_r);
John Kacurdd68ada2009-09-24 18:02:49 +0200148}
149
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100150static int64_t
151sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
John Kacurdd68ada2009-09-24 18:02:49 +0200152{
Namhyung Kim202e7a62014-03-04 11:01:41 +0900153 return _sort__dso_cmp(right->ms.map, left->ms.map);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100154}
155
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100156static int _hist_entry__dso_snprintf(struct map *map, char *bf,
157 size_t size, unsigned int width)
158{
159 if (map && map->dso) {
160 const char *dso_name = !verbose ? map->dso->short_name :
161 map->dso->long_name;
Namhyung Kim5b591662014-07-31 14:47:38 +0900162 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
Arnaldo Carvalho de Melo439d4732009-10-02 03:29:58 -0300163 }
John Kacurdd68ada2009-09-24 18:02:49 +0200164
Namhyung Kim5b591662014-07-31 14:47:38 +0900165 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
John Kacurdd68ada2009-09-24 18:02:49 +0200166}
167
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300168static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100169 size_t size, unsigned int width)
170{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300171 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100172}
173
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900174struct sort_entry sort_dso = {
175 .se_header = "Shared Object",
176 .se_cmp = sort__dso_cmp,
177 .se_snprintf = hist_entry__dso_snprintf,
178 .se_width_idx = HISTC_DSO,
179};
180
181/* --sort symbol */
182
Namhyung Kim2037be52013-12-18 14:21:09 +0900183static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
184{
185 return (int64_t)(right_ip - left_ip);
186}
187
Namhyung Kim51f27d12013-02-06 14:57:15 +0900188static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900189{
190 if (!sym_l || !sym_r)
191 return cmp_null(sym_l, sym_r);
192
193 if (sym_l == sym_r)
194 return 0;
195
Yannick Brosseauc05676c2015-06-17 16:41:10 -0700196 if (sym_l->start != sym_r->start)
197 return (int64_t)(sym_r->start - sym_l->start);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900198
Yannick Brosseauc05676c2015-06-17 16:41:10 -0700199 return (int64_t)(sym_r->end - sym_l->end);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900200}
201
202static int64_t
203sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
204{
Namhyung Kim09600e02013-10-15 11:01:56 +0900205 int64_t ret;
206
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900207 if (!left->ms.sym && !right->ms.sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900208 return _sort__addr_cmp(left->ip, right->ip);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900209
Namhyung Kim09600e02013-10-15 11:01:56 +0900210 /*
211 * comparing symbol address alone is not enough since it's a
212 * relative address within a dso.
213 */
Namhyung Kim68f6d022013-12-18 14:21:10 +0900214 if (!sort__has_dso) {
215 ret = sort__dso_cmp(left, right);
216 if (ret != 0)
217 return ret;
218 }
Namhyung Kim09600e02013-10-15 11:01:56 +0900219
Namhyung Kim51f27d12013-02-06 14:57:15 +0900220 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900221}
222
Namhyung Kim202e7a62014-03-04 11:01:41 +0900223static int64_t
224sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
225{
226 if (!left->ms.sym || !right->ms.sym)
227 return cmp_null(left->ms.sym, right->ms.sym);
228
229 return strcmp(right->ms.sym->name, left->ms.sym->name);
230}
231
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100232static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
233 u64 ip, char level, char *bf, size_t size,
Namhyung Kim43355522012-12-27 18:11:39 +0900234 unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100235{
236 size_t ret = 0;
237
238 if (verbose) {
239 char o = map ? dso__symtab_origin(map->dso) : '!';
240 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
Namhyung Kimded19d52013-04-01 20:35:19 +0900241 BITS_PER_LONG / 4 + 2, ip, o);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100242 }
243
244 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
Stephane Eranian98a3b322013-01-24 16:10:35 +0100245 if (sym && map) {
246 if (map->type == MAP__VARIABLE) {
247 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
248 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
Namhyung Kim62667742013-01-24 16:10:42 +0100249 ip - map->unmap_ip(map, sym->start));
Stephane Eranian98a3b322013-01-24 16:10:35 +0100250 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
251 width - ret, "");
252 } else {
253 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
254 width - ret,
255 sym->name);
256 }
257 } else {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100258 size_t len = BITS_PER_LONG / 4;
259 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
260 len, ip);
261 ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
262 width - ret, "");
263 }
264
Namhyung Kim5b591662014-07-31 14:47:38 +0900265 if (ret > width)
266 bf[width] = '\0';
267
268 return width;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100269}
270
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300271static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900272 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100273{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300274 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
275 he->level, bf, size, width);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100276}
John Kacurdd68ada2009-09-24 18:02:49 +0200277
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200278struct sort_entry sort_sym = {
279 .se_header = "Symbol",
280 .se_cmp = sort__sym_cmp,
Namhyung Kim202e7a62014-03-04 11:01:41 +0900281 .se_sort = sort__sym_sort,
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200282 .se_snprintf = hist_entry__sym_snprintf,
283 .se_width_idx = HISTC_SYMBOL,
284};
John Kacurdd68ada2009-09-24 18:02:49 +0200285
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300286/* --sort srcline */
287
288static int64_t
289sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
290{
Namhyung Kim4adcc432013-09-11 14:09:33 +0900291 if (!left->srcline) {
292 if (!left->ms.map)
293 left->srcline = SRCLINE_UNKNOWN;
294 else {
295 struct map *map = left->ms.map;
296 left->srcline = get_srcline(map->dso,
Andi Kleen85c116a2014-11-12 18:05:27 -0800297 map__rip_2objdump(map, left->ip),
298 left->ms.sym, true);
Namhyung Kim4adcc432013-09-11 14:09:33 +0900299 }
300 }
301 if (!right->srcline) {
302 if (!right->ms.map)
303 right->srcline = SRCLINE_UNKNOWN;
304 else {
305 struct map *map = right->ms.map;
306 right->srcline = get_srcline(map->dso,
Andi Kleen85c116a2014-11-12 18:05:27 -0800307 map__rip_2objdump(map, right->ip),
308 right->ms.sym, true);
Namhyung Kim4adcc432013-09-11 14:09:33 +0900309 }
310 }
Namhyung Kim202e7a62014-03-04 11:01:41 +0900311 return strcmp(right->srcline, left->srcline);
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300312}
313
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300314static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim5b591662014-07-31 14:47:38 +0900315 size_t size, unsigned int width)
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300316{
Arnaldo Carvalho de Melob2d53672014-11-18 18:02:51 -0300317 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcline);
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300318}
319
320struct sort_entry sort_srcline = {
321 .se_header = "Source:Line",
322 .se_cmp = sort__srcline_cmp,
323 .se_snprintf = hist_entry__srcline_snprintf,
324 .se_width_idx = HISTC_SRCLINE,
325};
326
Andi Kleen31191a82015-08-07 15:54:24 -0700327/* --sort srcfile */
328
329static char no_srcfile[1];
330
331static char *get_srcfile(struct hist_entry *e)
332{
333 char *sf, *p;
334 struct map *map = e->ms.map;
335
Andi Kleen2f84b422015-09-01 11:47:19 -0700336 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
337 e->ms.sym, false, true);
Andi Kleen76b10652015-08-11 06:36:55 -0700338 if (!strcmp(sf, SRCLINE_UNKNOWN))
339 return no_srcfile;
Andi Kleen31191a82015-08-07 15:54:24 -0700340 p = strchr(sf, ':');
341 if (p && *sf) {
342 *p = 0;
343 return sf;
344 }
345 free(sf);
346 return no_srcfile;
347}
348
349static int64_t
350sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
351{
352 if (!left->srcfile) {
353 if (!left->ms.map)
354 left->srcfile = no_srcfile;
355 else
356 left->srcfile = get_srcfile(left);
357 }
358 if (!right->srcfile) {
359 if (!right->ms.map)
360 right->srcfile = no_srcfile;
361 else
362 right->srcfile = get_srcfile(right);
363 }
364 return strcmp(right->srcfile, left->srcfile);
365}
366
367static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
368 size_t size, unsigned int width)
369{
370 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
371}
372
373struct sort_entry sort_srcfile = {
374 .se_header = "Source File",
375 .se_cmp = sort__srcfile_cmp,
376 .se_snprintf = hist_entry__srcfile_snprintf,
377 .se_width_idx = HISTC_SRCFILE,
378};
379
John Kacurdd68ada2009-09-24 18:02:49 +0200380/* --sort parent */
381
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200382static int64_t
John Kacurdd68ada2009-09-24 18:02:49 +0200383sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
384{
385 struct symbol *sym_l = left->parent;
386 struct symbol *sym_r = right->parent;
387
388 if (!sym_l || !sym_r)
389 return cmp_null(sym_l, sym_r);
390
Namhyung Kim202e7a62014-03-04 11:01:41 +0900391 return strcmp(sym_r->name, sym_l->name);
John Kacurdd68ada2009-09-24 18:02:49 +0200392}
393
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300394static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
Arnaldo Carvalho de Meloa4e3b952010-03-31 11:33:40 -0300395 size_t size, unsigned int width)
John Kacurdd68ada2009-09-24 18:02:49 +0200396{
Namhyung Kim5b591662014-07-31 14:47:38 +0900397 return repsep_snprintf(bf, size, "%-*.*s", width, width,
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300398 he->parent ? he->parent->name : "[other]");
John Kacurdd68ada2009-09-24 18:02:49 +0200399}
400
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200401struct sort_entry sort_parent = {
402 .se_header = "Parent symbol",
403 .se_cmp = sort__parent_cmp,
404 .se_snprintf = hist_entry__parent_snprintf,
405 .se_width_idx = HISTC_PARENT,
406};
407
Arun Sharmaf60f3592010-06-04 11:27:10 -0300408/* --sort cpu */
409
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200410static int64_t
Arun Sharmaf60f3592010-06-04 11:27:10 -0300411sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
412{
413 return right->cpu - left->cpu;
414}
415
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300416static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
417 size_t size, unsigned int width)
Arun Sharmaf60f3592010-06-04 11:27:10 -0300418{
Namhyung Kim5b591662014-07-31 14:47:38 +0900419 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
Arun Sharmaf60f3592010-06-04 11:27:10 -0300420}
421
Frederic Weisbecker872a8782011-06-29 03:14:52 +0200422struct sort_entry sort_cpu = {
423 .se_header = "CPU",
424 .se_cmp = sort__cpu_cmp,
425 .se_snprintf = hist_entry__cpu_snprintf,
426 .se_width_idx = HISTC_CPU,
427};
428
Kan Liang2e7ea3a2015-09-04 10:45:43 -0400429/* --sort socket */
430
431static int64_t
432sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
433{
434 return right->socket - left->socket;
435}
436
437static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
438 size_t size, unsigned int width)
439{
440 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
441}
442
443struct sort_entry sort_socket = {
444 .se_header = "Socket",
445 .se_cmp = sort__socket_cmp,
446 .se_snprintf = hist_entry__socket_snprintf,
447 .se_width_idx = HISTC_SOCKET,
448};
449
Namhyung Kima34bb6a02015-12-23 02:07:04 +0900450/* --sort trace */
451
452static char *get_trace_output(struct hist_entry *he)
453{
454 struct trace_seq seq;
455 struct perf_evsel *evsel;
456 struct pevent_record rec = {
457 .data = he->raw_data,
458 .size = he->raw_size,
459 };
460
461 evsel = hists_to_evsel(he->hists);
462
463 trace_seq_init(&seq);
Namhyung Kim053a3982015-12-23 02:07:05 +0900464 if (symbol_conf.raw_trace) {
465 pevent_print_fields(&seq, he->raw_data, he->raw_size,
466 evsel->tp_format);
467 } else {
468 pevent_event_info(&seq, evsel->tp_format, &rec);
469 }
Namhyung Kima34bb6a02015-12-23 02:07:04 +0900470 return seq.buffer;
471}
472
473static int64_t
474sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
475{
476 struct perf_evsel *evsel;
477
478 evsel = hists_to_evsel(left->hists);
479 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
480 return 0;
481
482 if (left->trace_output == NULL)
483 left->trace_output = get_trace_output(left);
484 if (right->trace_output == NULL)
485 right->trace_output = get_trace_output(right);
486
487 hists__new_col_len(left->hists, HISTC_TRACE, strlen(left->trace_output));
488 hists__new_col_len(right->hists, HISTC_TRACE, strlen(right->trace_output));
489
490 return strcmp(right->trace_output, left->trace_output);
491}
492
493static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
494 size_t size, unsigned int width)
495{
496 struct perf_evsel *evsel;
497
498 evsel = hists_to_evsel(he->hists);
499 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
500 return scnprintf(bf, size, "%-*.*s", width, width, "N/A");
501
502 if (he->trace_output == NULL)
503 he->trace_output = get_trace_output(he);
504 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->trace_output);
505}
506
507struct sort_entry sort_trace = {
508 .se_header = "Trace output",
509 .se_cmp = sort__trace_cmp,
510 .se_snprintf = hist_entry__trace_snprintf,
511 .se_width_idx = HISTC_TRACE,
512};
513
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900514/* sort keys for branch stacks */
515
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100516static int64_t
517sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
518{
Jiri Olsa288a4b92014-10-16 16:07:07 +0200519 if (!left->branch_info || !right->branch_info)
520 return cmp_null(left->branch_info, right->branch_info);
521
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100522 return _sort__dso_cmp(left->branch_info->from.map,
523 right->branch_info->from.map);
524}
525
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300526static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100527 size_t size, unsigned int width)
528{
Jiri Olsa288a4b92014-10-16 16:07:07 +0200529 if (he->branch_info)
530 return _hist_entry__dso_snprintf(he->branch_info->from.map,
531 bf, size, width);
532 else
533 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100534}
535
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100536static int64_t
537sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
538{
Jiri Olsa8b62fa52014-10-16 16:07:06 +0200539 if (!left->branch_info || !right->branch_info)
540 return cmp_null(left->branch_info, right->branch_info);
541
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100542 return _sort__dso_cmp(left->branch_info->to.map,
543 right->branch_info->to.map);
544}
545
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300546static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100547 size_t size, unsigned int width)
548{
Jiri Olsa8b62fa52014-10-16 16:07:06 +0200549 if (he->branch_info)
550 return _hist_entry__dso_snprintf(he->branch_info->to.map,
551 bf, size, width);
552 else
553 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100554}
555
556static int64_t
557sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
558{
559 struct addr_map_symbol *from_l = &left->branch_info->from;
560 struct addr_map_symbol *from_r = &right->branch_info->from;
561
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200562 if (!left->branch_info || !right->branch_info)
563 return cmp_null(left->branch_info, right->branch_info);
564
565 from_l = &left->branch_info->from;
566 from_r = &right->branch_info->from;
567
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100568 if (!from_l->sym && !from_r->sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900569 return _sort__addr_cmp(from_l->addr, from_r->addr);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100570
Namhyung Kim51f27d12013-02-06 14:57:15 +0900571 return _sort__sym_cmp(from_l->sym, from_r->sym);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100572}
573
574static int64_t
575sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
576{
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200577 struct addr_map_symbol *to_l, *to_r;
578
579 if (!left->branch_info || !right->branch_info)
580 return cmp_null(left->branch_info, right->branch_info);
581
582 to_l = &left->branch_info->to;
583 to_r = &right->branch_info->to;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100584
585 if (!to_l->sym && !to_r->sym)
Namhyung Kim2037be52013-12-18 14:21:09 +0900586 return _sort__addr_cmp(to_l->addr, to_r->addr);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100587
Namhyung Kim51f27d12013-02-06 14:57:15 +0900588 return _sort__sym_cmp(to_l->sym, to_r->sym);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100589}
590
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300591static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900592 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100593{
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200594 if (he->branch_info) {
595 struct addr_map_symbol *from = &he->branch_info->from;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100596
Jiri Olsa1b9e97a2014-10-16 16:07:05 +0200597 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
598 he->level, bf, size, width);
599 }
600
601 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100602}
603
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300604static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
Namhyung Kim43355522012-12-27 18:11:39 +0900605 size_t size, unsigned int width)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100606{
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200607 if (he->branch_info) {
608 struct addr_map_symbol *to = &he->branch_info->to;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100609
Jiri Olsa38cdbd32014-10-16 16:07:04 +0200610 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
611 he->level, bf, size, width);
612 }
613
614 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100615}
616
Namhyung Kim14d1ac72012-12-27 18:11:38 +0900617struct sort_entry sort_dso_from = {
618 .se_header = "Source Shared Object",
619 .se_cmp = sort__dso_from_cmp,
620 .se_snprintf = hist_entry__dso_from_snprintf,
621 .se_width_idx = HISTC_DSO_FROM,
622};
623
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100624struct sort_entry sort_dso_to = {
625 .se_header = "Target Shared Object",
626 .se_cmp = sort__dso_to_cmp,
627 .se_snprintf = hist_entry__dso_to_snprintf,
628 .se_width_idx = HISTC_DSO_TO,
629};
630
631struct sort_entry sort_sym_from = {
632 .se_header = "Source Symbol",
633 .se_cmp = sort__sym_from_cmp,
634 .se_snprintf = hist_entry__sym_from_snprintf,
635 .se_width_idx = HISTC_SYMBOL_FROM,
636};
637
638struct sort_entry sort_sym_to = {
639 .se_header = "Target Symbol",
640 .se_cmp = sort__sym_to_cmp,
641 .se_snprintf = hist_entry__sym_to_snprintf,
642 .se_width_idx = HISTC_SYMBOL_TO,
643};
644
645static int64_t
646sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
647{
Jiri Olsa428560e2014-10-16 16:07:03 +0200648 unsigned char mp, p;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100649
Jiri Olsa428560e2014-10-16 16:07:03 +0200650 if (!left->branch_info || !right->branch_info)
651 return cmp_null(left->branch_info, right->branch_info);
652
653 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
654 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100655 return mp || p;
656}
657
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300658static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100659 size_t size, unsigned int width){
660 static const char *out = "N/A";
661
Jiri Olsa428560e2014-10-16 16:07:03 +0200662 if (he->branch_info) {
663 if (he->branch_info->flags.predicted)
664 out = "N";
665 else if (he->branch_info->flags.mispred)
666 out = "Y";
667 }
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100668
Namhyung Kim5b591662014-07-31 14:47:38 +0900669 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100670}
671
Andi Kleen0e332f02015-07-18 08:24:46 -0700672static int64_t
673sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
674{
675 return left->branch_info->flags.cycles -
676 right->branch_info->flags.cycles;
677}
678
679static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
680 size_t size, unsigned int width)
681{
682 if (he->branch_info->flags.cycles == 0)
683 return repsep_snprintf(bf, size, "%-*s", width, "-");
684 return repsep_snprintf(bf, size, "%-*hd", width,
685 he->branch_info->flags.cycles);
686}
687
688struct sort_entry sort_cycles = {
689 .se_header = "Basic Block Cycles",
690 .se_cmp = sort__cycles_cmp,
691 .se_snprintf = hist_entry__cycles_snprintf,
692 .se_width_idx = HISTC_CYCLES,
693};
694
Stephane Eranian98a3b322013-01-24 16:10:35 +0100695/* --sort daddr_sym */
696static int64_t
697sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
698{
699 uint64_t l = 0, r = 0;
700
701 if (left->mem_info)
702 l = left->mem_info->daddr.addr;
703 if (right->mem_info)
704 r = right->mem_info->daddr.addr;
705
706 return (int64_t)(r - l);
707}
708
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300709static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100710 size_t size, unsigned int width)
711{
712 uint64_t addr = 0;
713 struct map *map = NULL;
714 struct symbol *sym = NULL;
715
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300716 if (he->mem_info) {
717 addr = he->mem_info->daddr.addr;
718 map = he->mem_info->daddr.map;
719 sym = he->mem_info->daddr.sym;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100720 }
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300721 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100722 width);
723}
724
725static int64_t
Don Zickus28e6db22015-10-05 20:06:07 +0200726sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
727{
728 uint64_t l = 0, r = 0;
729
730 if (left->mem_info)
731 l = left->mem_info->iaddr.addr;
732 if (right->mem_info)
733 r = right->mem_info->iaddr.addr;
734
735 return (int64_t)(r - l);
736}
737
738static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
739 size_t size, unsigned int width)
740{
741 uint64_t addr = 0;
742 struct map *map = NULL;
743 struct symbol *sym = NULL;
744
745 if (he->mem_info) {
746 addr = he->mem_info->iaddr.addr;
747 map = he->mem_info->iaddr.map;
748 sym = he->mem_info->iaddr.sym;
749 }
750 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
751 width);
752}
753
754static int64_t
Stephane Eranian98a3b322013-01-24 16:10:35 +0100755sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
756{
757 struct map *map_l = NULL;
758 struct map *map_r = NULL;
759
760 if (left->mem_info)
761 map_l = left->mem_info->daddr.map;
762 if (right->mem_info)
763 map_r = right->mem_info->daddr.map;
764
765 return _sort__dso_cmp(map_l, map_r);
766}
767
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300768static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100769 size_t size, unsigned int width)
770{
771 struct map *map = NULL;
772
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300773 if (he->mem_info)
774 map = he->mem_info->daddr.map;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100775
776 return _hist_entry__dso_snprintf(map, bf, size, width);
777}
778
779static int64_t
780sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
781{
782 union perf_mem_data_src data_src_l;
783 union perf_mem_data_src data_src_r;
784
785 if (left->mem_info)
786 data_src_l = left->mem_info->data_src;
787 else
788 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
789
790 if (right->mem_info)
791 data_src_r = right->mem_info->data_src;
792 else
793 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
794
795 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
796}
797
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300798static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100799 size_t size, unsigned int width)
800{
801 const char *out;
802 u64 mask = PERF_MEM_LOCK_NA;
803
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300804 if (he->mem_info)
805 mask = he->mem_info->data_src.mem_lock;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100806
807 if (mask & PERF_MEM_LOCK_NA)
808 out = "N/A";
809 else if (mask & PERF_MEM_LOCK_LOCKED)
810 out = "Yes";
811 else
812 out = "No";
813
814 return repsep_snprintf(bf, size, "%-*s", width, out);
815}
816
817static int64_t
818sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
819{
820 union perf_mem_data_src data_src_l;
821 union perf_mem_data_src data_src_r;
822
823 if (left->mem_info)
824 data_src_l = left->mem_info->data_src;
825 else
826 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
827
828 if (right->mem_info)
829 data_src_r = right->mem_info->data_src;
830 else
831 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
832
833 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
834}
835
836static const char * const tlb_access[] = {
837 "N/A",
838 "HIT",
839 "MISS",
840 "L1",
841 "L2",
842 "Walker",
843 "Fault",
844};
845#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
846
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300847static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100848 size_t size, unsigned int width)
849{
850 char out[64];
851 size_t sz = sizeof(out) - 1; /* -1 for null termination */
852 size_t l = 0, i;
853 u64 m = PERF_MEM_TLB_NA;
854 u64 hit, miss;
855
856 out[0] = '\0';
857
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300858 if (he->mem_info)
859 m = he->mem_info->data_src.mem_dtlb;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100860
861 hit = m & PERF_MEM_TLB_HIT;
862 miss = m & PERF_MEM_TLB_MISS;
863
864 /* already taken care of */
865 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
866
867 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
868 if (!(m & 0x1))
869 continue;
870 if (l) {
871 strcat(out, " or ");
872 l += 4;
873 }
874 strncat(out, tlb_access[i], sz - l);
875 l += strlen(tlb_access[i]);
876 }
877 if (*out == '\0')
878 strcpy(out, "N/A");
879 if (hit)
880 strncat(out, " hit", sz - l);
881 if (miss)
882 strncat(out, " miss", sz - l);
883
884 return repsep_snprintf(bf, size, "%-*s", width, out);
885}
886
887static int64_t
888sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
889{
890 union perf_mem_data_src data_src_l;
891 union perf_mem_data_src data_src_r;
892
893 if (left->mem_info)
894 data_src_l = left->mem_info->data_src;
895 else
896 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
897
898 if (right->mem_info)
899 data_src_r = right->mem_info->data_src;
900 else
901 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
902
903 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
904}
905
906static const char * const mem_lvl[] = {
907 "N/A",
908 "HIT",
909 "MISS",
910 "L1",
911 "LFB",
912 "L2",
913 "L3",
914 "Local RAM",
915 "Remote RAM (1 hop)",
916 "Remote RAM (2 hops)",
917 "Remote Cache (1 hop)",
918 "Remote Cache (2 hops)",
919 "I/O",
920 "Uncached",
921};
922#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
923
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300924static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100925 size_t size, unsigned int width)
926{
927 char out[64];
928 size_t sz = sizeof(out) - 1; /* -1 for null termination */
929 size_t i, l = 0;
930 u64 m = PERF_MEM_LVL_NA;
931 u64 hit, miss;
932
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300933 if (he->mem_info)
934 m = he->mem_info->data_src.mem_lvl;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100935
936 out[0] = '\0';
937
938 hit = m & PERF_MEM_LVL_HIT;
939 miss = m & PERF_MEM_LVL_MISS;
940
941 /* already taken care of */
942 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
943
944 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
945 if (!(m & 0x1))
946 continue;
947 if (l) {
948 strcat(out, " or ");
949 l += 4;
950 }
951 strncat(out, mem_lvl[i], sz - l);
952 l += strlen(mem_lvl[i]);
953 }
954 if (*out == '\0')
955 strcpy(out, "N/A");
956 if (hit)
957 strncat(out, " hit", sz - l);
958 if (miss)
959 strncat(out, " miss", sz - l);
960
961 return repsep_snprintf(bf, size, "%-*s", width, out);
962}
963
964static int64_t
965sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
966{
967 union perf_mem_data_src data_src_l;
968 union perf_mem_data_src data_src_r;
969
970 if (left->mem_info)
971 data_src_l = left->mem_info->data_src;
972 else
973 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
974
975 if (right->mem_info)
976 data_src_r = right->mem_info->data_src;
977 else
978 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
979
980 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
981}
982
983static const char * const snoop_access[] = {
984 "N/A",
985 "None",
986 "Miss",
987 "Hit",
988 "HitM",
989};
990#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
991
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -0300992static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100993 size_t size, unsigned int width)
994{
995 char out[64];
996 size_t sz = sizeof(out) - 1; /* -1 for null termination */
997 size_t i, l = 0;
998 u64 m = PERF_MEM_SNOOP_NA;
999
1000 out[0] = '\0';
1001
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001002 if (he->mem_info)
1003 m = he->mem_info->data_src.mem_snoop;
Stephane Eranian98a3b322013-01-24 16:10:35 +01001004
1005 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
1006 if (!(m & 0x1))
1007 continue;
1008 if (l) {
1009 strcat(out, " or ");
1010 l += 4;
1011 }
1012 strncat(out, snoop_access[i], sz - l);
1013 l += strlen(snoop_access[i]);
1014 }
1015
1016 if (*out == '\0')
1017 strcpy(out, "N/A");
1018
1019 return repsep_snprintf(bf, size, "%-*s", width, out);
1020}
1021
Don Zickus9b32ba72014-06-01 15:38:29 +02001022static inline u64 cl_address(u64 address)
1023{
1024 /* return the cacheline of the address */
1025 return (address & ~(cacheline_size - 1));
1026}
1027
1028static int64_t
1029sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1030{
1031 u64 l, r;
1032 struct map *l_map, *r_map;
1033
1034 if (!left->mem_info) return -1;
1035 if (!right->mem_info) return 1;
1036
1037 /* group event types together */
1038 if (left->cpumode > right->cpumode) return -1;
1039 if (left->cpumode < right->cpumode) return 1;
1040
1041 l_map = left->mem_info->daddr.map;
1042 r_map = right->mem_info->daddr.map;
1043
1044 /* if both are NULL, jump to sort on al_addr instead */
1045 if (!l_map && !r_map)
1046 goto addr;
1047
1048 if (!l_map) return -1;
1049 if (!r_map) return 1;
1050
1051 if (l_map->maj > r_map->maj) return -1;
1052 if (l_map->maj < r_map->maj) return 1;
1053
1054 if (l_map->min > r_map->min) return -1;
1055 if (l_map->min < r_map->min) return 1;
1056
1057 if (l_map->ino > r_map->ino) return -1;
1058 if (l_map->ino < r_map->ino) return 1;
1059
1060 if (l_map->ino_generation > r_map->ino_generation) return -1;
1061 if (l_map->ino_generation < r_map->ino_generation) return 1;
1062
1063 /*
1064 * Addresses with no major/minor numbers are assumed to be
1065 * anonymous in userspace. Sort those on pid then address.
1066 *
1067 * The kernel and non-zero major/minor mapped areas are
1068 * assumed to be unity mapped. Sort those on address.
1069 */
1070
1071 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1072 (!(l_map->flags & MAP_SHARED)) &&
1073 !l_map->maj && !l_map->min && !l_map->ino &&
1074 !l_map->ino_generation) {
1075 /* userspace anonymous */
1076
1077 if (left->thread->pid_ > right->thread->pid_) return -1;
1078 if (left->thread->pid_ < right->thread->pid_) return 1;
1079 }
1080
1081addr:
1082 /* al_addr does all the right addr - start + offset calculations */
1083 l = cl_address(left->mem_info->daddr.al_addr);
1084 r = cl_address(right->mem_info->daddr.al_addr);
1085
1086 if (l > r) return -1;
1087 if (l < r) return 1;
1088
1089 return 0;
1090}
1091
1092static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1093 size_t size, unsigned int width)
1094{
1095
1096 uint64_t addr = 0;
1097 struct map *map = NULL;
1098 struct symbol *sym = NULL;
1099 char level = he->level;
1100
1101 if (he->mem_info) {
1102 addr = cl_address(he->mem_info->daddr.al_addr);
1103 map = he->mem_info->daddr.map;
1104 sym = he->mem_info->daddr.sym;
1105
1106 /* print [s] for shared data mmaps */
1107 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1108 map && (map->type == MAP__VARIABLE) &&
1109 (map->flags & MAP_SHARED) &&
1110 (map->maj || map->min || map->ino ||
1111 map->ino_generation))
1112 level = 's';
1113 else if (!map)
1114 level = 'X';
1115 }
1116 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
1117 width);
1118}
1119
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001120struct sort_entry sort_mispredict = {
1121 .se_header = "Branch Mispredicted",
1122 .se_cmp = sort__mispredict_cmp,
1123 .se_snprintf = hist_entry__mispredict_snprintf,
1124 .se_width_idx = HISTC_MISPREDICT,
1125};
1126
Andi Kleen05484292013-01-24 16:10:29 +01001127static u64 he_weight(struct hist_entry *he)
1128{
1129 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
1130}
1131
1132static int64_t
1133sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1134{
1135 return he_weight(left) - he_weight(right);
1136}
1137
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001138static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
Andi Kleen05484292013-01-24 16:10:29 +01001139 size_t size, unsigned int width)
1140{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001141 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
Andi Kleen05484292013-01-24 16:10:29 +01001142}
1143
1144struct sort_entry sort_local_weight = {
1145 .se_header = "Local Weight",
1146 .se_cmp = sort__local_weight_cmp,
1147 .se_snprintf = hist_entry__local_weight_snprintf,
1148 .se_width_idx = HISTC_LOCAL_WEIGHT,
1149};
1150
1151static int64_t
1152sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
1153{
1154 return left->stat.weight - right->stat.weight;
1155}
1156
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001157static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
Andi Kleen05484292013-01-24 16:10:29 +01001158 size_t size, unsigned int width)
1159{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001160 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
Andi Kleen05484292013-01-24 16:10:29 +01001161}
1162
1163struct sort_entry sort_global_weight = {
1164 .se_header = "Weight",
1165 .se_cmp = sort__global_weight_cmp,
1166 .se_snprintf = hist_entry__global_weight_snprintf,
1167 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1168};
1169
Stephane Eranian98a3b322013-01-24 16:10:35 +01001170struct sort_entry sort_mem_daddr_sym = {
1171 .se_header = "Data Symbol",
1172 .se_cmp = sort__daddr_cmp,
1173 .se_snprintf = hist_entry__daddr_snprintf,
1174 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1175};
1176
Don Zickus28e6db22015-10-05 20:06:07 +02001177struct sort_entry sort_mem_iaddr_sym = {
1178 .se_header = "Code Symbol",
1179 .se_cmp = sort__iaddr_cmp,
1180 .se_snprintf = hist_entry__iaddr_snprintf,
1181 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1182};
1183
Stephane Eranian98a3b322013-01-24 16:10:35 +01001184struct sort_entry sort_mem_daddr_dso = {
1185 .se_header = "Data Object",
1186 .se_cmp = sort__dso_daddr_cmp,
1187 .se_snprintf = hist_entry__dso_daddr_snprintf,
1188 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1189};
1190
1191struct sort_entry sort_mem_locked = {
1192 .se_header = "Locked",
1193 .se_cmp = sort__locked_cmp,
1194 .se_snprintf = hist_entry__locked_snprintf,
1195 .se_width_idx = HISTC_MEM_LOCKED,
1196};
1197
1198struct sort_entry sort_mem_tlb = {
1199 .se_header = "TLB access",
1200 .se_cmp = sort__tlb_cmp,
1201 .se_snprintf = hist_entry__tlb_snprintf,
1202 .se_width_idx = HISTC_MEM_TLB,
1203};
1204
1205struct sort_entry sort_mem_lvl = {
1206 .se_header = "Memory access",
1207 .se_cmp = sort__lvl_cmp,
1208 .se_snprintf = hist_entry__lvl_snprintf,
1209 .se_width_idx = HISTC_MEM_LVL,
1210};
1211
1212struct sort_entry sort_mem_snoop = {
1213 .se_header = "Snoop",
1214 .se_cmp = sort__snoop_cmp,
1215 .se_snprintf = hist_entry__snoop_snprintf,
1216 .se_width_idx = HISTC_MEM_SNOOP,
1217};
1218
Don Zickus9b32ba72014-06-01 15:38:29 +02001219struct sort_entry sort_mem_dcacheline = {
1220 .se_header = "Data Cacheline",
1221 .se_cmp = sort__dcacheline_cmp,
1222 .se_snprintf = hist_entry__dcacheline_snprintf,
1223 .se_width_idx = HISTC_MEM_DCACHELINE,
1224};
1225
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001226static int64_t
1227sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1228{
Jiri Olsa49f47442014-10-16 16:07:01 +02001229 if (!left->branch_info || !right->branch_info)
1230 return cmp_null(left->branch_info, right->branch_info);
1231
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001232 return left->branch_info->flags.abort !=
1233 right->branch_info->flags.abort;
1234}
1235
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001236static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001237 size_t size, unsigned int width)
1238{
Jiri Olsa49f47442014-10-16 16:07:01 +02001239 static const char *out = "N/A";
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001240
Jiri Olsa49f47442014-10-16 16:07:01 +02001241 if (he->branch_info) {
1242 if (he->branch_info->flags.abort)
1243 out = "A";
1244 else
1245 out = ".";
1246 }
1247
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001248 return repsep_snprintf(bf, size, "%-*s", width, out);
1249}
1250
1251struct sort_entry sort_abort = {
1252 .se_header = "Transaction abort",
1253 .se_cmp = sort__abort_cmp,
1254 .se_snprintf = hist_entry__abort_snprintf,
1255 .se_width_idx = HISTC_ABORT,
1256};
1257
1258static int64_t
1259sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1260{
Jiri Olsa0199d242014-10-16 16:07:02 +02001261 if (!left->branch_info || !right->branch_info)
1262 return cmp_null(left->branch_info, right->branch_info);
1263
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001264 return left->branch_info->flags.in_tx !=
1265 right->branch_info->flags.in_tx;
1266}
1267
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001268static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001269 size_t size, unsigned int width)
1270{
Jiri Olsa0199d242014-10-16 16:07:02 +02001271 static const char *out = "N/A";
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001272
Jiri Olsa0199d242014-10-16 16:07:02 +02001273 if (he->branch_info) {
1274 if (he->branch_info->flags.in_tx)
1275 out = "T";
1276 else
1277 out = ".";
1278 }
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001279
1280 return repsep_snprintf(bf, size, "%-*s", width, out);
1281}
1282
1283struct sort_entry sort_in_tx = {
1284 .se_header = "Branch in transaction",
1285 .se_cmp = sort__in_tx_cmp,
1286 .se_snprintf = hist_entry__in_tx_snprintf,
1287 .se_width_idx = HISTC_IN_TX,
1288};
1289
Andi Kleen475eeab2013-09-20 07:40:43 -07001290static int64_t
1291sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1292{
1293 return left->transaction - right->transaction;
1294}
1295
1296static inline char *add_str(char *p, const char *str)
1297{
1298 strcpy(p, str);
1299 return p + strlen(str);
1300}
1301
1302static struct txbit {
1303 unsigned flag;
1304 const char *name;
1305 int skip_for_len;
1306} txbits[] = {
1307 { PERF_TXN_ELISION, "EL ", 0 },
1308 { PERF_TXN_TRANSACTION, "TX ", 1 },
1309 { PERF_TXN_SYNC, "SYNC ", 1 },
1310 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1311 { PERF_TXN_RETRY, "RETRY ", 0 },
1312 { PERF_TXN_CONFLICT, "CON ", 0 },
1313 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1314 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1315 { 0, NULL, 0 }
1316};
1317
1318int hist_entry__transaction_len(void)
1319{
1320 int i;
1321 int len = 0;
1322
1323 for (i = 0; txbits[i].name; i++) {
1324 if (!txbits[i].skip_for_len)
1325 len += strlen(txbits[i].name);
1326 }
1327 len += 4; /* :XX<space> */
1328 return len;
1329}
1330
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001331static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
Andi Kleen475eeab2013-09-20 07:40:43 -07001332 size_t size, unsigned int width)
1333{
Arnaldo Carvalho de Meloc824c432013-10-22 19:01:31 -03001334 u64 t = he->transaction;
Andi Kleen475eeab2013-09-20 07:40:43 -07001335 char buf[128];
1336 char *p = buf;
1337 int i;
1338
1339 buf[0] = 0;
1340 for (i = 0; txbits[i].name; i++)
1341 if (txbits[i].flag & t)
1342 p = add_str(p, txbits[i].name);
1343 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1344 p = add_str(p, "NEITHER ");
1345 if (t & PERF_TXN_ABORT_MASK) {
1346 sprintf(p, ":%" PRIx64,
1347 (t & PERF_TXN_ABORT_MASK) >>
1348 PERF_TXN_ABORT_SHIFT);
1349 p += strlen(p);
1350 }
1351
1352 return repsep_snprintf(bf, size, "%-*s", width, buf);
1353}
1354
1355struct sort_entry sort_transaction = {
1356 .se_header = "Transaction ",
1357 .se_cmp = sort__transaction_cmp,
1358 .se_snprintf = hist_entry__transaction_snprintf,
1359 .se_width_idx = HISTC_TRANSACTION,
1360};
1361
Frederic Weisbecker872a8782011-06-29 03:14:52 +02001362struct sort_dimension {
1363 const char *name;
1364 struct sort_entry *entry;
1365 int taken;
1366};
1367
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001368#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
1369
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001370static struct sort_dimension common_sort_dimensions[] = {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001371 DIM(SORT_PID, "pid", sort_thread),
1372 DIM(SORT_COMM, "comm", sort_comm),
1373 DIM(SORT_DSO, "dso", sort_dso),
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001374 DIM(SORT_SYM, "symbol", sort_sym),
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001375 DIM(SORT_PARENT, "parent", sort_parent),
1376 DIM(SORT_CPU, "cpu", sort_cpu),
Kan Liang2e7ea3a2015-09-04 10:45:43 -04001377 DIM(SORT_SOCKET, "socket", sort_socket),
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -03001378 DIM(SORT_SRCLINE, "srcline", sort_srcline),
Andi Kleen31191a82015-08-07 15:54:24 -07001379 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
Andi Kleenf9ea55d2013-07-18 15:58:53 -07001380 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
1381 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
Andi Kleen475eeab2013-09-20 07:40:43 -07001382 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
Namhyung Kima34bb6a02015-12-23 02:07:04 +09001383 DIM(SORT_TRACE, "trace", sort_trace),
Frederic Weisbecker872a8782011-06-29 03:14:52 +02001384};
1385
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001386#undef DIM
1387
1388#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
1389
1390static struct sort_dimension bstack_sort_dimensions[] = {
1391 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
1392 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
1393 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
1394 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
1395 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
Andi Kleenf5d05bc2013-09-20 07:40:41 -07001396 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1397 DIM(SORT_ABORT, "abort", sort_abort),
Andi Kleen0e332f02015-07-18 08:24:46 -07001398 DIM(SORT_CYCLES, "cycles", sort_cycles),
Namhyung Kimfc5871e2012-12-27 18:11:46 +09001399};
1400
1401#undef DIM
1402
Namhyung Kimafab87b2013-04-03 21:26:11 +09001403#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
1404
1405static struct sort_dimension memory_sort_dimensions[] = {
Namhyung Kimafab87b2013-04-03 21:26:11 +09001406 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
Don Zickus28e6db22015-10-05 20:06:07 +02001407 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
Namhyung Kimafab87b2013-04-03 21:26:11 +09001408 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
1409 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
1410 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
1411 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
1412 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
Don Zickus9b32ba72014-06-01 15:38:29 +02001413 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
Namhyung Kimafab87b2013-04-03 21:26:11 +09001414};
1415
1416#undef DIM
1417
Namhyung Kima2ce0672014-03-04 09:06:42 +09001418struct hpp_dimension {
1419 const char *name;
1420 struct perf_hpp_fmt *fmt;
1421 int taken;
1422};
1423
1424#define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
1425
1426static struct hpp_dimension hpp_sort_dimensions[] = {
1427 DIM(PERF_HPP__OVERHEAD, "overhead"),
1428 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
1429 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
1430 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
1431 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
Namhyung Kim594dcbf2013-10-30 16:06:59 +09001432 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
Namhyung Kima2ce0672014-03-04 09:06:42 +09001433 DIM(PERF_HPP__SAMPLES, "sample"),
1434 DIM(PERF_HPP__PERIOD, "period"),
1435};
1436
1437#undef DIM
1438
Namhyung Kim8b536992014-03-03 11:46:55 +09001439struct hpp_sort_entry {
1440 struct perf_hpp_fmt hpp;
1441 struct sort_entry *se;
1442};
1443
Namhyung Kima7d945b2014-03-04 10:46:34 +09001444bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
1445{
1446 struct hpp_sort_entry *hse_a;
1447 struct hpp_sort_entry *hse_b;
1448
1449 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
1450 return false;
1451
1452 hse_a = container_of(a, struct hpp_sort_entry, hpp);
1453 hse_b = container_of(b, struct hpp_sort_entry, hpp);
1454
1455 return hse_a->se == hse_b->se;
1456}
1457
Namhyung Kime0d66c72014-07-31 14:47:37 +09001458void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
Namhyung Kim678a5002014-03-20 11:18:54 +09001459{
1460 struct hpp_sort_entry *hse;
1461
1462 if (!perf_hpp__is_sort_entry(fmt))
1463 return;
1464
1465 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001466 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
Namhyung Kim678a5002014-03-20 11:18:54 +09001467}
1468
Namhyung Kim8b536992014-03-03 11:46:55 +09001469static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1470 struct perf_evsel *evsel)
1471{
1472 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001473 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001474
1475 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim8b536992014-03-03 11:46:55 +09001476
Namhyung Kim5b591662014-07-31 14:47:38 +09001477 if (!len)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -03001478 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
Namhyung Kim5b591662014-07-31 14:47:38 +09001479
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001480 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
Namhyung Kim8b536992014-03-03 11:46:55 +09001481}
1482
1483static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
1484 struct perf_hpp *hpp __maybe_unused,
1485 struct perf_evsel *evsel)
1486{
1487 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001488 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001489
1490 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1491
Namhyung Kim5b591662014-07-31 14:47:38 +09001492 if (!len)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -03001493 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
Namhyung Kim5b591662014-07-31 14:47:38 +09001494
1495 return len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001496}
1497
1498static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1499 struct hist_entry *he)
1500{
1501 struct hpp_sort_entry *hse;
Namhyung Kim5b591662014-07-31 14:47:38 +09001502 size_t len = fmt->user_len;
Namhyung Kim8b536992014-03-03 11:46:55 +09001503
1504 hse = container_of(fmt, struct hpp_sort_entry, hpp);
Namhyung Kim5b591662014-07-31 14:47:38 +09001505
1506 if (!len)
1507 len = hists__col_len(he->hists, hse->se->se_width_idx);
Namhyung Kim8b536992014-03-03 11:46:55 +09001508
1509 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
1510}
1511
Namhyung Kim87bbdf72015-01-08 09:45:46 +09001512static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
1513 struct hist_entry *a, struct hist_entry *b)
1514{
1515 struct hpp_sort_entry *hse;
1516
1517 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1518 return hse->se->se_cmp(a, b);
1519}
1520
1521static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
1522 struct hist_entry *a, struct hist_entry *b)
1523{
1524 struct hpp_sort_entry *hse;
1525 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
1526
1527 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1528 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
1529 return collapse_fn(a, b);
1530}
1531
1532static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
1533 struct hist_entry *a, struct hist_entry *b)
1534{
1535 struct hpp_sort_entry *hse;
1536 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
1537
1538 hse = container_of(fmt, struct hpp_sort_entry, hpp);
1539 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
1540 return sort_fn(a, b);
1541}
1542
Namhyung Kima7d945b2014-03-04 10:46:34 +09001543static struct hpp_sort_entry *
1544__sort_dimension__alloc_hpp(struct sort_dimension *sd)
Namhyung Kim8b536992014-03-03 11:46:55 +09001545{
1546 struct hpp_sort_entry *hse;
1547
1548 hse = malloc(sizeof(*hse));
1549 if (hse == NULL) {
1550 pr_err("Memory allocation failed\n");
Namhyung Kima7d945b2014-03-04 10:46:34 +09001551 return NULL;
Namhyung Kim8b536992014-03-03 11:46:55 +09001552 }
1553
1554 hse->se = sd->entry;
Namhyung Kim1ecd4452014-07-31 14:47:40 +09001555 hse->hpp.name = sd->entry->se_header;
Namhyung Kim8b536992014-03-03 11:46:55 +09001556 hse->hpp.header = __sort__hpp_header;
1557 hse->hpp.width = __sort__hpp_width;
1558 hse->hpp.entry = __sort__hpp_entry;
1559 hse->hpp.color = NULL;
1560
Namhyung Kim87bbdf72015-01-08 09:45:46 +09001561 hse->hpp.cmp = __sort__hpp_cmp;
1562 hse->hpp.collapse = __sort__hpp_collapse;
1563 hse->hpp.sort = __sort__hpp_sort;
Namhyung Kim8b536992014-03-03 11:46:55 +09001564
1565 INIT_LIST_HEAD(&hse->hpp.list);
1566 INIT_LIST_HEAD(&hse->hpp.sort_list);
Jiri Olsaf2998422014-05-23 17:15:47 +02001567 hse->hpp.elide = false;
Namhyung Kime0d66c72014-07-31 14:47:37 +09001568 hse->hpp.len = 0;
Namhyung Kim5b591662014-07-31 14:47:38 +09001569 hse->hpp.user_len = 0;
Namhyung Kim8b536992014-03-03 11:46:55 +09001570
Namhyung Kima7d945b2014-03-04 10:46:34 +09001571 return hse;
1572}
1573
1574bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
1575{
1576 return format->header == __sort__hpp_header;
1577}
1578
1579static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
1580{
1581 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1582
1583 if (hse == NULL)
1584 return -1;
1585
Namhyung Kim8b536992014-03-03 11:46:55 +09001586 perf_hpp__register_sort_field(&hse->hpp);
1587 return 0;
1588}
1589
Namhyung Kima7d945b2014-03-04 10:46:34 +09001590static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
1591{
1592 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
1593
1594 if (hse == NULL)
1595 return -1;
1596
1597 perf_hpp__column_register(&hse->hpp);
1598 return 0;
1599}
1600
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001601struct hpp_dynamic_entry {
1602 struct perf_hpp_fmt hpp;
1603 struct perf_evsel *evsel;
1604 struct format_field *field;
1605 unsigned dynamic_len;
Namhyung Kim053a3982015-12-23 02:07:05 +09001606 bool raw_trace;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001607};
1608
1609static int hde_width(struct hpp_dynamic_entry *hde)
1610{
1611 if (!hde->hpp.len) {
1612 int len = hde->dynamic_len;
1613 int namelen = strlen(hde->field->name);
1614 int fieldlen = hde->field->size;
1615
1616 if (namelen > len)
1617 len = namelen;
1618
1619 if (!(hde->field->flags & FIELD_IS_STRING)) {
1620 /* length for print hex numbers */
1621 fieldlen = hde->field->size * 2 + 2;
1622 }
1623 if (fieldlen > len)
1624 len = fieldlen;
1625
1626 hde->hpp.len = len;
1627 }
1628 return hde->hpp.len;
1629}
1630
Namhyung Kim60517d22015-12-23 02:07:03 +09001631static void update_dynamic_len(struct hpp_dynamic_entry *hde,
1632 struct hist_entry *he)
1633{
1634 char *str, *pos;
1635 struct format_field *field = hde->field;
1636 size_t namelen;
1637 bool last = false;
1638
Namhyung Kim053a3982015-12-23 02:07:05 +09001639 if (hde->raw_trace)
1640 return;
1641
Namhyung Kim60517d22015-12-23 02:07:03 +09001642 /* parse pretty print result and update max length */
1643 if (!he->trace_output)
1644 he->trace_output = get_trace_output(he);
1645
1646 namelen = strlen(field->name);
1647 str = he->trace_output;
1648
1649 while (str) {
1650 pos = strchr(str, ' ');
1651 if (pos == NULL) {
1652 last = true;
1653 pos = str + strlen(str);
1654 }
1655
1656 if (!strncmp(str, field->name, namelen)) {
1657 size_t len;
1658
1659 str += namelen + 1;
1660 len = pos - str;
1661
1662 if (len > hde->dynamic_len)
1663 hde->dynamic_len = len;
1664 break;
1665 }
1666
1667 if (last)
1668 str = NULL;
1669 else
1670 str = pos + 1;
1671 }
1672}
1673
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001674static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1675 struct perf_evsel *evsel __maybe_unused)
1676{
1677 struct hpp_dynamic_entry *hde;
1678 size_t len = fmt->user_len;
1679
1680 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1681
1682 if (!len)
1683 len = hde_width(hde);
1684
1685 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
1686}
1687
1688static int __sort__hde_width(struct perf_hpp_fmt *fmt,
1689 struct perf_hpp *hpp __maybe_unused,
1690 struct perf_evsel *evsel __maybe_unused)
1691{
1692 struct hpp_dynamic_entry *hde;
1693 size_t len = fmt->user_len;
1694
1695 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1696
1697 if (!len)
1698 len = hde_width(hde);
1699
1700 return len;
1701}
1702
Namhyung Kim361459f2015-12-23 02:07:08 +09001703bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
1704{
1705 struct hpp_dynamic_entry *hde;
1706
1707 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1708
1709 return hists_to_evsel(hists) == hde->evsel;
1710}
1711
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001712static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
1713 struct hist_entry *he)
1714{
1715 struct hpp_dynamic_entry *hde;
1716 size_t len = fmt->user_len;
Namhyung Kim60517d22015-12-23 02:07:03 +09001717 char *str, *pos;
1718 struct format_field *field;
1719 size_t namelen;
1720 bool last = false;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001721 int ret;
1722
1723 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1724
1725 if (!len)
1726 len = hde_width(hde);
1727
Namhyung Kim053a3982015-12-23 02:07:05 +09001728 if (hde->raw_trace)
1729 goto raw_field;
Namhyung Kim60517d22015-12-23 02:07:03 +09001730
Namhyung Kim053a3982015-12-23 02:07:05 +09001731 field = hde->field;
Namhyung Kim60517d22015-12-23 02:07:03 +09001732 namelen = strlen(field->name);
1733 str = he->trace_output;
1734
1735 while (str) {
1736 pos = strchr(str, ' ');
1737 if (pos == NULL) {
1738 last = true;
1739 pos = str + strlen(str);
1740 }
1741
1742 if (!strncmp(str, field->name, namelen)) {
1743 str += namelen + 1;
1744 str = strndup(str, pos - str);
1745
1746 if (str == NULL)
1747 return scnprintf(hpp->buf, hpp->size,
1748 "%*.*s", len, len, "ERROR");
1749 break;
1750 }
1751
1752 if (last)
1753 str = NULL;
1754 else
1755 str = pos + 1;
1756 }
1757
1758 if (str == NULL) {
1759 struct trace_seq seq;
Namhyung Kim053a3982015-12-23 02:07:05 +09001760raw_field:
Namhyung Kim60517d22015-12-23 02:07:03 +09001761 trace_seq_init(&seq);
1762 pevent_print_field(&seq, he->raw_data, hde->field);
1763 str = seq.buffer;
1764 }
1765
1766 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
1767 free(str);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001768 return ret;
1769}
1770
1771static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
1772 struct hist_entry *a, struct hist_entry *b)
1773{
1774 struct hpp_dynamic_entry *hde;
1775 struct format_field *field;
1776 unsigned offset, size;
1777
1778 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
1779
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001780 field = hde->field;
1781 if (field->flags & FIELD_IS_DYNAMIC) {
1782 unsigned long long dyn;
1783
1784 pevent_read_number_field(field, a->raw_data, &dyn);
1785 offset = dyn & 0xffff;
1786 size = (dyn >> 16) & 0xffff;
1787
1788 /* record max width for output */
1789 if (size > hde->dynamic_len)
1790 hde->dynamic_len = size;
1791 } else {
1792 offset = field->offset;
1793 size = field->size;
Namhyung Kim60517d22015-12-23 02:07:03 +09001794
1795 update_dynamic_len(hde, a);
1796 update_dynamic_len(hde, b);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001797 }
1798
1799 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
1800}
1801
Namhyung Kim361459f2015-12-23 02:07:08 +09001802bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
1803{
1804 return fmt->cmp == __sort__hde_cmp;
1805}
1806
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001807static struct hpp_dynamic_entry *
1808__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
1809{
1810 struct hpp_dynamic_entry *hde;
1811
1812 hde = malloc(sizeof(*hde));
1813 if (hde == NULL) {
1814 pr_debug("Memory allocation failed\n");
1815 return NULL;
1816 }
1817
1818 hde->evsel = evsel;
1819 hde->field = field;
1820 hde->dynamic_len = 0;
1821
1822 hde->hpp.name = field->name;
1823 hde->hpp.header = __sort__hde_header;
1824 hde->hpp.width = __sort__hde_width;
1825 hde->hpp.entry = __sort__hde_entry;
1826 hde->hpp.color = NULL;
1827
1828 hde->hpp.cmp = __sort__hde_cmp;
1829 hde->hpp.collapse = __sort__hde_cmp;
1830 hde->hpp.sort = __sort__hde_cmp;
1831
1832 INIT_LIST_HEAD(&hde->hpp.list);
1833 INIT_LIST_HEAD(&hde->hpp.sort_list);
1834 hde->hpp.elide = false;
1835 hde->hpp.len = 0;
1836 hde->hpp.user_len = 0;
1837
1838 return hde;
1839}
1840
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001841static int parse_field_name(char *str, char **event, char **field, char **opt)
1842{
1843 char *event_name, *field_name, *opt_name;
1844
1845 event_name = str;
1846 field_name = strchr(str, '.');
1847
1848 if (field_name) {
1849 *field_name++ = '\0';
1850 } else {
1851 event_name = NULL;
1852 field_name = str;
1853 }
1854
1855 opt_name = strchr(field_name, '/');
1856 if (opt_name)
1857 *opt_name++ = '\0';
1858
1859 *event = event_name;
1860 *field = field_name;
1861 *opt = opt_name;
1862
1863 return 0;
1864}
1865
1866/* find match evsel using a given event name. The event name can be:
Namhyung Kim9735be22016-01-05 19:58:35 +09001867 * 1. '%' + event index (e.g. '%1' for first event)
1868 * 2. full event name (e.g. sched:sched_switch)
1869 * 3. partial event name (should not contain ':')
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001870 */
1871static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
1872{
1873 struct perf_evsel *evsel = NULL;
1874 struct perf_evsel *pos;
1875 bool full_name;
1876
1877 /* case 1 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001878 if (event_name[0] == '%') {
1879 int nr = strtol(event_name+1, NULL, 0);
1880
1881 if (nr > evlist->nr_entries)
1882 return NULL;
1883
1884 evsel = perf_evlist__first(evlist);
1885 while (--nr > 0)
1886 evsel = perf_evsel__next(evsel);
1887
1888 return evsel;
1889 }
1890
1891 full_name = !!strchr(event_name, ':');
1892 evlist__for_each(evlist, pos) {
Namhyung Kim9735be22016-01-05 19:58:35 +09001893 /* case 2 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001894 if (full_name && !strcmp(pos->name, event_name))
1895 return pos;
Namhyung Kim9735be22016-01-05 19:58:35 +09001896 /* case 3 */
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001897 if (!full_name && strstr(pos->name, event_name)) {
1898 if (evsel) {
1899 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
1900 event_name, evsel->name, pos->name);
1901 return NULL;
1902 }
1903 evsel = pos;
1904 }
1905 }
1906
1907 return evsel;
1908}
1909
Namhyung Kim3b099bf52015-12-23 02:07:07 +09001910static int __dynamic_dimension__add(struct perf_evsel *evsel,
1911 struct format_field *field,
1912 bool raw_trace)
1913{
1914 struct hpp_dynamic_entry *hde;
1915
1916 hde = __alloc_dynamic_entry(evsel, field);
1917 if (hde == NULL)
1918 return -ENOMEM;
1919
1920 hde->raw_trace = raw_trace;
1921
1922 perf_hpp__register_sort_field(&hde->hpp);
1923 return 0;
1924}
1925
Namhyung Kim2e422fd2015-12-23 02:07:09 +09001926static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
1927{
1928 int ret;
1929 struct format_field *field;
1930
1931 field = evsel->tp_format->format.fields;
1932 while (field) {
1933 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1934 if (ret < 0)
1935 return ret;
1936
1937 field = field->next;
1938 }
1939 return 0;
1940}
1941
1942static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
1943{
1944 int ret;
1945 struct perf_evsel *evsel;
1946
1947 evlist__for_each(evlist, evsel) {
1948 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1949 continue;
1950
1951 ret = add_evsel_fields(evsel, raw_trace);
1952 if (ret < 0)
1953 return ret;
1954 }
1955 return 0;
1956}
1957
Namhyung Kim9735be22016-01-05 19:58:35 +09001958static int add_all_matching_fields(struct perf_evlist *evlist,
1959 char *field_name, bool raw_trace)
1960{
1961 int ret = -ESRCH;
1962 struct perf_evsel *evsel;
1963 struct format_field *field;
1964
1965 evlist__for_each(evlist, evsel) {
1966 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1967 continue;
1968
1969 field = pevent_find_any_field(evsel->tp_format, field_name);
1970 if (field == NULL)
1971 continue;
1972
1973 ret = __dynamic_dimension__add(evsel, field, raw_trace);
1974 if (ret < 0)
1975 break;
1976 }
1977 return ret;
1978}
1979
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001980static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
1981{
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001982 char *str, *event_name, *field_name, *opt_name;
1983 struct perf_evsel *evsel;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001984 struct format_field *field;
Namhyung Kim053a3982015-12-23 02:07:05 +09001985 bool raw_trace = symbol_conf.raw_trace;
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001986 int ret = 0;
1987
1988 if (evlist == NULL)
1989 return -ENOENT;
1990
1991 str = strdup(tok);
1992 if (str == NULL)
1993 return -ENOMEM;
1994
Namhyung Kim5d0cff92015-12-23 02:07:06 +09001995 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001996 ret = -EINVAL;
1997 goto out;
1998 }
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09001999
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002000 if (opt_name) {
2001 if (strcmp(opt_name, "raw")) {
2002 pr_debug("unsupported field option %s\n", opt_name);
Namhyung Kim053a3982015-12-23 02:07:05 +09002003 ret = -EINVAL;
2004 goto out;
2005 }
2006 raw_trace = true;
2007 }
2008
Namhyung Kim2e422fd2015-12-23 02:07:09 +09002009 if (!strcmp(field_name, "trace_fields")) {
2010 ret = add_all_dynamic_fields(evlist, raw_trace);
2011 goto out;
2012 }
2013
Namhyung Kim9735be22016-01-05 19:58:35 +09002014 if (event_name == NULL) {
2015 ret = add_all_matching_fields(evlist, field_name, raw_trace);
2016 goto out;
2017 }
2018
Namhyung Kim5d0cff92015-12-23 02:07:06 +09002019 evsel = find_evsel(evlist, event_name);
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002020 if (evsel == NULL) {
2021 pr_debug("Cannot find event: %s\n", event_name);
2022 ret = -ENOENT;
2023 goto out;
2024 }
2025
2026 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2027 pr_debug("%s is not a tracepoint event\n", event_name);
2028 ret = -EINVAL;
2029 goto out;
2030 }
2031
Namhyung Kim3b099bf52015-12-23 02:07:07 +09002032 if (!strcmp(field_name, "*")) {
Namhyung Kim2e422fd2015-12-23 02:07:09 +09002033 ret = add_evsel_fields(evsel, raw_trace);
Namhyung Kim3b099bf52015-12-23 02:07:07 +09002034 } else {
2035 field = pevent_find_any_field(evsel->tp_format, field_name);
2036 if (field == NULL) {
2037 pr_debug("Cannot find event field for %s.%s\n",
2038 event_name, field_name);
2039 return -ENOENT;
2040 }
2041
2042 ret = __dynamic_dimension__add(evsel, field, raw_trace);
2043 }
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002044
2045out:
2046 free(str);
2047 return ret;
2048}
2049
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002050static int __sort_dimension__add(struct sort_dimension *sd)
Namhyung Kim2f532d02013-04-03 21:26:10 +09002051{
2052 if (sd->taken)
Namhyung Kim8b536992014-03-03 11:46:55 +09002053 return 0;
2054
Namhyung Kima7d945b2014-03-04 10:46:34 +09002055 if (__sort_dimension__add_hpp_sort(sd) < 0)
Namhyung Kim8b536992014-03-03 11:46:55 +09002056 return -1;
Namhyung Kim2f532d02013-04-03 21:26:10 +09002057
2058 if (sd->entry->se_collapse)
2059 sort__need_collapse = 1;
2060
Namhyung Kim2f532d02013-04-03 21:26:10 +09002061 sd->taken = 1;
Namhyung Kim8b536992014-03-03 11:46:55 +09002062
2063 return 0;
Namhyung Kim2f532d02013-04-03 21:26:10 +09002064}
2065
Namhyung Kima2ce0672014-03-04 09:06:42 +09002066static int __hpp_dimension__add(struct hpp_dimension *hd)
2067{
2068 if (!hd->taken) {
2069 hd->taken = 1;
2070
2071 perf_hpp__register_sort_field(hd->fmt);
2072 }
2073 return 0;
2074}
2075
Namhyung Kima7d945b2014-03-04 10:46:34 +09002076static int __sort_dimension__add_output(struct sort_dimension *sd)
2077{
2078 if (sd->taken)
2079 return 0;
2080
2081 if (__sort_dimension__add_hpp_output(sd) < 0)
2082 return -1;
2083
2084 sd->taken = 1;
2085 return 0;
2086}
2087
2088static int __hpp_dimension__add_output(struct hpp_dimension *hd)
2089{
2090 if (!hd->taken) {
2091 hd->taken = 1;
2092
2093 perf_hpp__column_register(hd->fmt);
2094 }
2095 return 0;
2096}
2097
Jiri Olsabeeaaeb2015-10-06 14:25:11 +02002098int hpp_dimension__add_output(unsigned col)
2099{
2100 BUG_ON(col >= PERF_HPP__MAX_INDEX);
2101 return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
2102}
2103
Namhyung Kim40184c42015-12-23 02:07:01 +09002104static int sort_dimension__add(const char *tok,
2105 struct perf_evlist *evlist __maybe_unused)
John Kacurdd68ada2009-09-24 18:02:49 +02002106{
2107 unsigned int i;
2108
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002109 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2110 struct sort_dimension *sd = &common_sort_dimensions[i];
John Kacurdd68ada2009-09-24 18:02:49 +02002111
John Kacurdd68ada2009-09-24 18:02:49 +02002112 if (strncasecmp(tok, sd->name, strlen(tok)))
2113 continue;
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002114
John Kacurdd68ada2009-09-24 18:02:49 +02002115 if (sd->entry == &sort_parent) {
2116 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
2117 if (ret) {
2118 char err[BUFSIZ];
2119
2120 regerror(ret, &parent_regex, err, sizeof(err));
Arnaldo Carvalho de Melo2aefa4f2010-04-02 12:30:57 -03002121 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
2122 return -EINVAL;
John Kacurdd68ada2009-09-24 18:02:49 +02002123 }
2124 sort__has_parent = 1;
Namhyung Kim930477b2013-04-05 10:26:36 +09002125 } else if (sd->entry == &sort_sym) {
Namhyung Kim1af556402012-09-14 17:35:27 +09002126 sort__has_sym = 1;
Kan Liang94ba4622015-02-09 05:39:44 +00002127 /*
2128 * perf diff displays the performance difference amongst
2129 * two or more perf.data files. Those files could come
2130 * from different binaries. So we should not compare
2131 * their ips, but the name of symbol.
2132 */
2133 if (sort__mode == SORT_MODE__DIFF)
2134 sd->entry->se_collapse = sort__sym_sort;
2135
Namhyung Kim68f6d022013-12-18 14:21:10 +09002136 } else if (sd->entry == &sort_dso) {
2137 sort__has_dso = 1;
Kan Liang2e7ea3a2015-09-04 10:45:43 -04002138 } else if (sd->entry == &sort_socket) {
2139 sort__has_socket = 1;
Namhyung Kimcfd92da2016-01-21 19:13:24 -03002140 } else if (sd->entry == &sort_thread) {
2141 sort__has_thread = 1;
John Kacurdd68ada2009-09-24 18:02:49 +02002142 }
2143
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002144 return __sort_dimension__add(sd);
John Kacurdd68ada2009-09-24 18:02:49 +02002145 }
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002146
Namhyung Kima2ce0672014-03-04 09:06:42 +09002147 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2148 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2149
2150 if (strncasecmp(tok, hd->name, strlen(tok)))
2151 continue;
2152
2153 return __hpp_dimension__add(hd);
2154 }
2155
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002156 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2157 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2158
2159 if (strncasecmp(tok, sd->name, strlen(tok)))
2160 continue;
2161
Namhyung Kim55369fc2013-04-01 20:35:20 +09002162 if (sort__mode != SORT_MODE__BRANCH)
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002163 return -EINVAL;
2164
2165 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
2166 sort__has_sym = 1;
2167
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002168 __sort_dimension__add(sd);
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002169 return 0;
2170 }
2171
Namhyung Kimafab87b2013-04-03 21:26:11 +09002172 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2173 struct sort_dimension *sd = &memory_sort_dimensions[i];
2174
2175 if (strncasecmp(tok, sd->name, strlen(tok)))
2176 continue;
2177
2178 if (sort__mode != SORT_MODE__MEMORY)
2179 return -EINVAL;
2180
2181 if (sd->entry == &sort_mem_daddr_sym)
2182 sort__has_sym = 1;
2183
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002184 __sort_dimension__add(sd);
Namhyung Kimafab87b2013-04-03 21:26:11 +09002185 return 0;
2186 }
2187
Namhyung Kimc7c2a5e2015-12-23 02:07:02 +09002188 if (!add_dynamic_entry(evlist, tok))
2189 return 0;
2190
John Kacurdd68ada2009-09-24 18:02:49 +02002191 return -ESRCH;
2192}
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002193
Namhyung Kimd49dade2015-12-23 02:07:10 +09002194static const char *get_default_sort_order(struct perf_evlist *evlist)
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002195{
2196 const char *default_sort_orders[] = {
2197 default_sort_order,
2198 default_branch_sort_order,
2199 default_mem_sort_order,
2200 default_top_sort_order,
2201 default_diff_sort_order,
Namhyung Kimd49dade2015-12-23 02:07:10 +09002202 default_tracepoint_sort_order,
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002203 };
Namhyung Kimd49dade2015-12-23 02:07:10 +09002204 bool use_trace = true;
2205 struct perf_evsel *evsel;
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002206
2207 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
2208
Namhyung Kimd49dade2015-12-23 02:07:10 +09002209 if (evlist == NULL)
2210 goto out_no_evlist;
2211
2212 evlist__for_each(evlist, evsel) {
2213 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
2214 use_trace = false;
2215 break;
2216 }
2217 }
2218
2219 if (use_trace) {
2220 sort__mode = SORT_MODE__TRACEPOINT;
2221 if (symbol_conf.raw_trace)
2222 return "trace_fields";
2223 }
2224out_no_evlist:
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002225 return default_sort_orders[sort__mode];
2226}
2227
Namhyung Kimd49dade2015-12-23 02:07:10 +09002228static int setup_sort_order(struct perf_evlist *evlist)
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002229{
2230 char *new_sort_order;
2231
2232 /*
2233 * Append '+'-prefixed sort order to the default sort
2234 * order string.
2235 */
2236 if (!sort_order || is_strict_order(sort_order))
2237 return 0;
2238
2239 if (sort_order[1] == '\0') {
2240 error("Invalid --sort key: `+'");
2241 return -EINVAL;
2242 }
2243
2244 /*
2245 * We allocate new sort_order string, but we never free it,
2246 * because it's checked over the rest of the code.
2247 */
2248 if (asprintf(&new_sort_order, "%s,%s",
Namhyung Kimd49dade2015-12-23 02:07:10 +09002249 get_default_sort_order(evlist), sort_order + 1) < 0) {
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002250 error("Not enough memory to set up --sort");
2251 return -ENOMEM;
2252 }
2253
2254 sort_order = new_sort_order;
2255 return 0;
2256}
2257
Jiri Olsab97511c2016-01-07 10:14:08 +01002258/*
2259 * Adds 'pre,' prefix into 'str' is 'pre' is
2260 * not already part of 'str'.
2261 */
2262static char *prefix_if_not_in(const char *pre, char *str)
2263{
2264 char *n;
2265
2266 if (!str || strstr(str, pre))
2267 return str;
2268
2269 if (asprintf(&n, "%s,%s", pre, str) < 0)
2270 return NULL;
2271
2272 free(str);
2273 return n;
2274}
2275
2276static char *setup_overhead(char *keys)
2277{
2278 keys = prefix_if_not_in("overhead", keys);
2279
2280 if (symbol_conf.cumulate_callchain)
2281 keys = prefix_if_not_in("overhead_children", keys);
2282
2283 return keys;
2284}
2285
Namhyung Kim40184c42015-12-23 02:07:01 +09002286static int __setup_sorting(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002287{
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002288 char *tmp, *tok, *str;
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002289 const char *sort_keys;
Namhyung Kim55309982013-02-06 14:57:16 +09002290 int ret = 0;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002291
Namhyung Kimd49dade2015-12-23 02:07:10 +09002292 ret = setup_sort_order(evlist);
Jiri Olsa1a1c0ff2014-08-23 14:59:48 +02002293 if (ret)
2294 return ret;
2295
2296 sort_keys = sort_order;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002297 if (sort_keys == NULL) {
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002298 if (is_strict_order(field_order)) {
Namhyung Kima7d945b2014-03-04 10:46:34 +09002299 /*
2300 * If user specified field order but no sort order,
2301 * we'll honor it and not add default sort orders.
2302 */
2303 return 0;
2304 }
2305
Namhyung Kimd49dade2015-12-23 02:07:10 +09002306 sort_keys = get_default_sort_order(evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002307 }
Namhyung Kim512ae1b2014-03-18 11:31:39 +09002308
2309 str = strdup(sort_keys);
Namhyung Kim5936f542013-02-06 14:57:17 +09002310 if (str == NULL) {
2311 error("Not enough memory to setup sort keys");
2312 return -ENOMEM;
2313 }
2314
Jiri Olsab97511c2016-01-07 10:14:08 +01002315 /*
2316 * Prepend overhead fields for backward compatibility.
2317 */
2318 if (!is_strict_order(field_order)) {
2319 str = setup_overhead(str);
2320 if (str == NULL) {
2321 error("Not enough memory to setup overhead keys");
2322 return -ENOMEM;
2323 }
2324 }
2325
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002326 for (tok = strtok_r(str, ", ", &tmp);
2327 tok; tok = strtok_r(NULL, ", ", &tmp)) {
Namhyung Kim40184c42015-12-23 02:07:01 +09002328 ret = sort_dimension__add(tok, evlist);
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002329 if (ret == -EINVAL) {
2330 error("Invalid --sort key: `%s'", tok);
Namhyung Kim55309982013-02-06 14:57:16 +09002331 break;
Namhyung Kimfc5871e2012-12-27 18:11:46 +09002332 } else if (ret == -ESRCH) {
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002333 error("Unknown --sort key: `%s'", tok);
Namhyung Kim55309982013-02-06 14:57:16 +09002334 break;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002335 }
2336 }
2337
2338 free(str);
Namhyung Kim55309982013-02-06 14:57:16 +09002339 return ret;
Arnaldo Carvalho de Meloc8829c72009-12-14 20:09:29 -02002340}
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002341
Jiri Olsaf2998422014-05-23 17:15:47 +02002342void perf_hpp__set_elide(int idx, bool elide)
Namhyung Kime67d49a2014-03-18 13:00:59 +09002343{
Jiri Olsaf2998422014-05-23 17:15:47 +02002344 struct perf_hpp_fmt *fmt;
2345 struct hpp_sort_entry *hse;
Namhyung Kime67d49a2014-03-18 13:00:59 +09002346
Jiri Olsaf2998422014-05-23 17:15:47 +02002347 perf_hpp__for_each_format(fmt) {
2348 if (!perf_hpp__is_sort_entry(fmt))
2349 continue;
2350
2351 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2352 if (hse->se->se_width_idx == idx) {
2353 fmt->elide = elide;
2354 break;
2355 }
Namhyung Kime67d49a2014-03-18 13:00:59 +09002356 }
Namhyung Kime67d49a2014-03-18 13:00:59 +09002357}
2358
Jiri Olsaf2998422014-05-23 17:15:47 +02002359static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002360{
2361 if (list && strlist__nr_entries(list) == 1) {
2362 if (fp != NULL)
2363 fprintf(fp, "# %s: %s\n", list_name,
2364 strlist__entry(list, 0)->s);
Jiri Olsaf2998422014-05-23 17:15:47 +02002365 return true;
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002366 }
Jiri Olsaf2998422014-05-23 17:15:47 +02002367 return false;
2368}
2369
2370static bool get_elide(int idx, FILE *output)
2371{
2372 switch (idx) {
2373 case HISTC_SYMBOL:
2374 return __get_elide(symbol_conf.sym_list, "symbol", output);
2375 case HISTC_DSO:
2376 return __get_elide(symbol_conf.dso_list, "dso", output);
2377 case HISTC_COMM:
2378 return __get_elide(symbol_conf.comm_list, "comm", output);
2379 default:
2380 break;
2381 }
2382
2383 if (sort__mode != SORT_MODE__BRANCH)
2384 return false;
2385
2386 switch (idx) {
2387 case HISTC_SYMBOL_FROM:
2388 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
2389 case HISTC_SYMBOL_TO:
2390 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
2391 case HISTC_DSO_FROM:
2392 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
2393 case HISTC_DSO_TO:
2394 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
2395 default:
2396 break;
2397 }
2398
2399 return false;
Arnaldo Carvalho de Meloc351c282009-12-16 13:49:27 -02002400}
Namhyung Kim08e71542013-04-03 21:26:19 +09002401
2402void sort__setup_elide(FILE *output)
2403{
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002404 struct perf_hpp_fmt *fmt;
2405 struct hpp_sort_entry *hse;
Namhyung Kim7524f632013-11-08 17:53:42 +09002406
Jiri Olsaf2998422014-05-23 17:15:47 +02002407 perf_hpp__for_each_format(fmt) {
2408 if (!perf_hpp__is_sort_entry(fmt))
2409 continue;
Namhyung Kim08e71542013-04-03 21:26:19 +09002410
Jiri Olsaf2998422014-05-23 17:15:47 +02002411 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2412 fmt->elide = get_elide(hse->se->se_width_idx, output);
Namhyung Kim08e71542013-04-03 21:26:19 +09002413 }
2414
Namhyung Kim7524f632013-11-08 17:53:42 +09002415 /*
2416 * It makes no sense to elide all of sort entries.
2417 * Just revert them to show up again.
2418 */
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002419 perf_hpp__for_each_format(fmt) {
2420 if (!perf_hpp__is_sort_entry(fmt))
2421 continue;
2422
Jiri Olsaf2998422014-05-23 17:15:47 +02002423 if (!fmt->elide)
Namhyung Kim7524f632013-11-08 17:53:42 +09002424 return;
2425 }
2426
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002427 perf_hpp__for_each_format(fmt) {
2428 if (!perf_hpp__is_sort_entry(fmt))
2429 continue;
2430
Jiri Olsaf2998422014-05-23 17:15:47 +02002431 fmt->elide = false;
Namhyung Kimcfaa1542014-05-19 14:19:30 +09002432 }
Namhyung Kim08e71542013-04-03 21:26:19 +09002433}
Namhyung Kima7d945b2014-03-04 10:46:34 +09002434
2435static int output_field_add(char *tok)
2436{
2437 unsigned int i;
2438
2439 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
2440 struct sort_dimension *sd = &common_sort_dimensions[i];
2441
2442 if (strncasecmp(tok, sd->name, strlen(tok)))
2443 continue;
2444
2445 return __sort_dimension__add_output(sd);
2446 }
2447
2448 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
2449 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
2450
2451 if (strncasecmp(tok, hd->name, strlen(tok)))
2452 continue;
2453
2454 return __hpp_dimension__add_output(hd);
2455 }
2456
2457 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
2458 struct sort_dimension *sd = &bstack_sort_dimensions[i];
2459
2460 if (strncasecmp(tok, sd->name, strlen(tok)))
2461 continue;
2462
2463 return __sort_dimension__add_output(sd);
2464 }
2465
2466 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
2467 struct sort_dimension *sd = &memory_sort_dimensions[i];
2468
2469 if (strncasecmp(tok, sd->name, strlen(tok)))
2470 continue;
2471
2472 return __sort_dimension__add_output(sd);
2473 }
2474
2475 return -ESRCH;
2476}
2477
2478static void reset_dimensions(void)
2479{
2480 unsigned int i;
2481
2482 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
2483 common_sort_dimensions[i].taken = 0;
2484
2485 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
2486 hpp_sort_dimensions[i].taken = 0;
2487
2488 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
2489 bstack_sort_dimensions[i].taken = 0;
2490
2491 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
2492 memory_sort_dimensions[i].taken = 0;
2493}
2494
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002495bool is_strict_order(const char *order)
2496{
2497 return order && (*order != '+');
2498}
2499
Namhyung Kima7d945b2014-03-04 10:46:34 +09002500static int __setup_output_field(void)
2501{
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002502 char *tmp, *tok, *str, *strp;
2503 int ret = -EINVAL;
Namhyung Kima7d945b2014-03-04 10:46:34 +09002504
2505 if (field_order == NULL)
2506 return 0;
2507
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002508 strp = str = strdup(field_order);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002509 if (str == NULL) {
2510 error("Not enough memory to setup output fields");
2511 return -ENOMEM;
2512 }
2513
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002514 if (!is_strict_order(field_order))
2515 strp++;
2516
2517 if (!strlen(strp)) {
2518 error("Invalid --fields key: `+'");
2519 goto out;
2520 }
2521
2522 for (tok = strtok_r(strp, ", ", &tmp);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002523 tok; tok = strtok_r(NULL, ", ", &tmp)) {
2524 ret = output_field_add(tok);
2525 if (ret == -EINVAL) {
2526 error("Invalid --fields key: `%s'", tok);
2527 break;
2528 } else if (ret == -ESRCH) {
2529 error("Unknown --fields key: `%s'", tok);
2530 break;
2531 }
2532 }
2533
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +02002534out:
Namhyung Kima7d945b2014-03-04 10:46:34 +09002535 free(str);
2536 return ret;
2537}
2538
Namhyung Kim40184c42015-12-23 02:07:01 +09002539int setup_sorting(struct perf_evlist *evlist)
Namhyung Kima7d945b2014-03-04 10:46:34 +09002540{
2541 int err;
2542
Namhyung Kim40184c42015-12-23 02:07:01 +09002543 err = __setup_sorting(evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002544 if (err < 0)
2545 return err;
2546
2547 if (parent_pattern != default_parent_pattern) {
Namhyung Kim40184c42015-12-23 02:07:01 +09002548 err = sort_dimension__add("parent", evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +09002549 if (err < 0)
2550 return err;
2551 }
2552
2553 reset_dimensions();
2554
2555 /*
2556 * perf diff doesn't use default hpp output fields.
2557 */
2558 if (sort__mode != SORT_MODE__DIFF)
2559 perf_hpp__init();
2560
2561 err = __setup_output_field();
2562 if (err < 0)
2563 return err;
2564
2565 /* copy sort keys to output fields */
2566 perf_hpp__setup_output_field();
2567 /* and then copy output fields to sort keys */
2568 perf_hpp__append_sort_keys();
2569
2570 return 0;
2571}
Namhyung Kim1c89fe92014-05-07 18:42:24 +09002572
2573void reset_output_field(void)
2574{
2575 sort__need_collapse = 0;
2576 sort__has_parent = 0;
2577 sort__has_sym = 0;
2578 sort__has_dso = 0;
2579
Namhyung Kimd69b2962014-05-23 10:59:01 +09002580 field_order = NULL;
2581 sort_order = NULL;
2582
Namhyung Kim1c89fe92014-05-07 18:42:24 +09002583 reset_dimensions();
2584 perf_hpp__reset_output_field();
2585}