blob: 7ca37ea1739559e1e3f064c9260d5d078cb5b84f [file] [log] [blame]
John Kacurdd68ada2009-09-24 18:02:49 +02001#ifndef __PERF_SORT_H
2#define __PERF_SORT_H
3#include "../builtin.h"
4
5#include "util.h"
6
7#include "color.h"
8#include <linux/list.h>
9#include "cache.h"
10#include <linux/rbtree.h>
11#include "symbol.h"
12#include "string.h"
13#include "callchain.h"
14#include "strlist.h"
15#include "values.h"
16
17#include "../perf.h"
18#include "debug.h"
19#include "header.h"
20
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060021#include <subcmd/parse-options.h>
John Kacurdd68ada2009-09-24 18:02:49 +020022#include "parse-events.h"
Namhyung Kim14135662013-10-31 10:17:39 +090023#include "hist.h"
John Kacurdd68ada2009-09-24 18:02:49 +020024#include "thread.h"
John Kacurdd68ada2009-09-24 18:02:49 +020025
26extern regex_t parent_regex;
Arnaldo Carvalho de Meloedb7c602010-05-17 16:22:41 -030027extern const char *sort_order;
Namhyung Kima7d945b2014-03-04 10:46:34 +090028extern const char *field_order;
Arnaldo Carvalho de Meloedb7c602010-05-17 16:22:41 -030029extern const char default_parent_pattern[];
30extern const char *parent_pattern;
31extern const char default_sort_order[];
Greg Priceb21484f2012-12-06 21:48:05 -080032extern regex_t ignore_callees_regex;
33extern int have_ignore_callees;
Namhyung Kim55369fc2013-04-01 20:35:20 +090034extern enum sort_mode sort__mode;
John Kacurdd68ada2009-09-24 18:02:49 +020035extern struct sort_entry sort_comm;
36extern struct sort_entry sort_dso;
37extern struct sort_entry sort_sym;
38extern struct sort_entry sort_parent;
Stephane Eraniana68c2c52012-03-08 23:47:48 +010039extern struct sort_entry sort_dso_from;
40extern struct sort_entry sort_dso_to;
41extern struct sort_entry sort_sym_from;
42extern struct sort_entry sort_sym_to;
Frederic Weisbeckera4fb5812009-10-22 23:23:23 +020043extern enum sort_type sort__first_dimension;
Yunlong Song228f14f2015-03-23 11:50:05 +080044extern const char default_mem_sort_order[];
John Kacurdd68ada2009-09-24 18:02:49 +020045
Namhyung Kimb24c28f2012-10-04 21:49:41 +090046struct he_stat {
47 u64 period;
48 u64 period_sys;
49 u64 period_us;
50 u64 period_guest_sys;
51 u64 period_guest_us;
Andi Kleen05484292013-01-24 16:10:29 +010052 u64 weight;
Namhyung Kimb24c28f2012-10-04 21:49:41 +090053 u32 nr_events;
54};
55
Jiri Olsa96c47f12012-10-05 16:44:42 +020056struct hist_entry_diff {
57 bool computed;
Namhyung Kima0b404f2015-04-19 13:04:10 +090058 union {
59 /* PERF_HPP__DELTA */
60 double period_ratio_delta;
Jiri Olsa96c47f12012-10-05 16:44:42 +020061
Namhyung Kima0b404f2015-04-19 13:04:10 +090062 /* PERF_HPP__RATIO */
63 double period_ratio;
Jiri Olsa96c47f12012-10-05 16:44:42 +020064
Namhyung Kima0b404f2015-04-19 13:04:10 +090065 /* HISTC_WEIGHTED_DIFF */
66 s64 wdiff;
67 };
Jiri Olsa96c47f12012-10-05 16:44:42 +020068};
69
Jiri Olsaf542e762016-07-05 08:56:04 +020070struct hist_entry_ops {
71 void *(*new)(size_t size);
72 void (*free)(void *ptr);
73};
74
Arnaldo Carvalho de Melo0f0cbf72010-07-26 17:13:40 -030075/**
76 * struct hist_entry - histogram entry
77 *
78 * @row_offset - offset from the first callchain expanded to appear on screen
79 * @nr_rows - rows expanded in callchain, recalculated on folding/unfolding
80 */
John Kacurdd68ada2009-09-24 18:02:49 +020081struct hist_entry {
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030082 struct rb_node rb_node_in;
John Kacurdd68ada2009-09-24 18:02:49 +020083 struct rb_node rb_node;
Arnaldo Carvalho de Melob821c732012-10-25 14:42:45 -020084 union {
85 struct list_head node;
86 struct list_head head;
87 } pairs;
Namhyung Kimb24c28f2012-10-04 21:49:41 +090088 struct he_stat stat;
Namhyung Kimf8be1c82012-09-11 13:15:07 +090089 struct he_stat *stat_acc;
Arnaldo Carvalho de Melo59fd5302010-03-24 16:40:17 -030090 struct map_symbol ms;
Arnaldo Carvalho de Meloa5e29ac2010-04-03 22:44:37 -030091 struct thread *thread;
Namhyung Kim4dfced32013-09-13 16:28:57 +090092 struct comm *comm;
John Kacurdd68ada2009-09-24 18:02:49 +020093 u64 ip;
Andi Kleen475eeab2013-09-20 07:40:43 -070094 u64 transaction;
Kan Liang0c4c4de2015-09-04 10:45:42 -040095 s32 socket;
Arun Sharmaf60f3592010-06-04 11:27:10 -030096 s32 cpu;
Don Zickus7365be52014-05-27 12:28:05 -040097 u8 cpumode;
Namhyung Kimaef810e2016-02-25 00:13:34 +090098 u8 depth;
Arnaldo Carvalho de Melo0f0cbf72010-07-26 17:13:40 -030099
Jiri Olsae0af43d2012-12-01 21:18:20 +0100100 /* We are added by hists__add_dummy_entry. */
101 bool dummy;
Namhyung Kimaef810e2016-02-25 00:13:34 +0900102 bool leaf;
Jiri Olsae0af43d2012-12-01 21:18:20 +0100103
John Kacurdd68ada2009-09-24 18:02:49 +0200104 char level;
Arnaldo Carvalho de Meloa5e29ac2010-04-03 22:44:37 -0300105 u8 filtered;
Namhyung Kim29750822015-04-22 16:18:12 +0900106 union {
107 /*
108 * Since perf diff only supports the stdio output, TUI
109 * fields are only accessed from perf report (or perf
110 * top). So make it an union to reduce memory usage.
111 */
112 struct hist_entry_diff diff;
113 struct /* for TUI */ {
114 u16 row_offset;
115 u16 nr_rows;
Namhyung Kimd8a0f802015-04-22 16:18:13 +0900116 bool init_have_children;
Namhyung Kim3698dab2015-05-05 23:55:46 +0900117 bool unfolded;
118 bool has_children;
Namhyung Kim79dded82016-02-26 21:13:19 +0900119 bool has_no_entry;
Namhyung Kim29750822015-04-22 16:18:12 +0900120 };
121 };
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -0300122 char *srcline;
Andi Kleen31191a82015-08-07 15:54:24 -0700123 char *srcfile;
Arnaldo Carvalho de Melo83753192010-04-03 16:30:44 -0300124 struct symbol *parent;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100125 struct branch_info *branch_info;
Jiri Olsaae359f12012-10-04 21:49:35 +0900126 struct hists *hists;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100127 struct mem_info *mem_info;
Namhyung Kim72392832015-12-24 11:16:17 +0900128 void *raw_data;
129 u32 raw_size;
Namhyung Kim60517d22015-12-23 02:07:03 +0900130 void *trace_output;
Namhyung Kim1b2dbbf2016-03-07 16:44:46 -0300131 struct perf_hpp_list *hpp_list;
Namhyung Kimaef810e2016-02-25 00:13:34 +0900132 struct hist_entry *parent_he;
Jiri Olsaf542e762016-07-05 08:56:04 +0200133 struct hist_entry_ops *ops;
Namhyung Kimaef810e2016-02-25 00:13:34 +0900134 union {
135 /* this is for hierarchical entry structure */
136 struct {
137 struct rb_root hroot_in;
138 struct rb_root hroot_out;
139 }; /* non-leaf entries */
140 struct rb_root sorted_chain; /* leaf entry has callchains */
141 };
Stephane Eranian98a3b322013-01-24 16:10:35 +0100142 struct callchain_root callchain[0]; /* must be last member */
John Kacurdd68ada2009-09-24 18:02:49 +0200143};
144
Arnaldo Carvalho de Melob821c732012-10-25 14:42:45 -0200145static inline bool hist_entry__has_pairs(struct hist_entry *he)
146{
147 return !list_empty(&he->pairs.node);
148}
149
150static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he)
151{
152 if (hist_entry__has_pairs(he))
153 return list_entry(he->pairs.node.next, struct hist_entry, pairs.node);
154 return NULL;
155}
156
Jiri Olsa4d233222012-12-13 14:09:00 +0100157static inline void hist_entry__add_pair(struct hist_entry *pair,
158 struct hist_entry *he)
Arnaldo Carvalho de Melob821c732012-10-25 14:42:45 -0200159{
Jiri Olsa4d233222012-12-13 14:09:00 +0100160 list_add_tail(&pair->pairs.node, &he->pairs.head);
Arnaldo Carvalho de Melob821c732012-10-25 14:42:45 -0200161}
162
Namhyung Kim14135662013-10-31 10:17:39 +0900163static inline float hist_entry__get_percent_limit(struct hist_entry *he)
164{
165 u64 period = he->stat.period;
166 u64 total_period = hists__total_period(he->hists);
167
168 if (unlikely(total_period == 0))
169 return 0;
170
171 if (symbol_conf.cumulate_callchain)
172 period = he->stat_acc->period;
173
174 return period * 100.0 / total_period;
175}
176
Jiri Olsae95cf702016-02-15 09:34:32 +0100177static inline u64 cl_address(u64 address)
178{
179 /* return the cacheline of the address */
180 return (address & ~(cacheline_size - 1));
181}
Namhyung Kim14135662013-10-31 10:17:39 +0900182
Jiri Olsad3927112016-02-15 09:34:33 +0100183static inline u64 cl_offset(u64 address)
184{
185 /* return the cacheline of the address */
186 return (address & (cacheline_size - 1));
187}
188
Namhyung Kim55369fc2013-04-01 20:35:20 +0900189enum sort_mode {
190 SORT_MODE__NORMAL,
191 SORT_MODE__BRANCH,
192 SORT_MODE__MEMORY,
Namhyung Kim512ae1b2014-03-18 11:31:39 +0900193 SORT_MODE__TOP,
194 SORT_MODE__DIFF,
Namhyung Kimd49dade2015-12-23 02:07:10 +0900195 SORT_MODE__TRACEPOINT,
Namhyung Kim55369fc2013-04-01 20:35:20 +0900196};
197
Frederic Weisbeckera4fb5812009-10-22 23:23:23 +0200198enum sort_type {
Namhyung Kimfc5871e2012-12-27 18:11:46 +0900199 /* common sort keys */
Frederic Weisbeckera4fb5812009-10-22 23:23:23 +0200200 SORT_PID,
201 SORT_COMM,
202 SORT_DSO,
203 SORT_SYM,
Arun Sharmaf60f3592010-06-04 11:27:10 -0300204 SORT_PARENT,
205 SORT_CPU,
Kan Liang2e7ea3a2015-09-04 10:45:43 -0400206 SORT_SOCKET,
Namhyung Kimfc5871e2012-12-27 18:11:46 +0900207 SORT_SRCLINE,
Andi Kleen31191a82015-08-07 15:54:24 -0700208 SORT_SRCFILE,
Andi Kleenf9ea55d2013-07-18 15:58:53 -0700209 SORT_LOCAL_WEIGHT,
210 SORT_GLOBAL_WEIGHT,
Andi Kleen475eeab2013-09-20 07:40:43 -0700211 SORT_TRANSACTION,
Namhyung Kima34bb6a02015-12-23 02:07:04 +0900212 SORT_TRACE,
Namhyung Kimfc5871e2012-12-27 18:11:46 +0900213
214 /* branch stack specific sort keys */
215 __SORT_BRANCH_STACK,
216 SORT_DSO_FROM = __SORT_BRANCH_STACK,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100217 SORT_DSO_TO,
218 SORT_SYM_FROM,
219 SORT_SYM_TO,
220 SORT_MISPREDICT,
Andi Kleenf5d05bc2013-09-20 07:40:41 -0700221 SORT_ABORT,
222 SORT_IN_TX,
Andi Kleen0e332f02015-07-18 08:24:46 -0700223 SORT_CYCLES,
Andi Kleen508be0d2016-05-20 13:15:08 -0700224 SORT_SRCLINE_FROM,
225 SORT_SRCLINE_TO,
Namhyung Kimafab87b2013-04-03 21:26:11 +0900226
227 /* memory mode specific sort keys */
228 __SORT_MEMORY_MODE,
Andi Kleenf9ea55d2013-07-18 15:58:53 -0700229 SORT_MEM_DADDR_SYMBOL = __SORT_MEMORY_MODE,
Namhyung Kimafab87b2013-04-03 21:26:11 +0900230 SORT_MEM_DADDR_DSO,
231 SORT_MEM_LOCKED,
232 SORT_MEM_TLB,
233 SORT_MEM_LVL,
234 SORT_MEM_SNOOP,
Don Zickus9b32ba72014-06-01 15:38:29 +0200235 SORT_MEM_DCACHELINE,
Don Zickus28e6db22015-10-05 20:06:07 +0200236 SORT_MEM_IADDR_SYMBOL,
Frederic Weisbeckera4fb5812009-10-22 23:23:23 +0200237};
238
John Kacurdd68ada2009-09-24 18:02:49 +0200239/*
240 * configurable sorting bits
241 */
242
243struct sort_entry {
Frederic Weisbeckerfcd14982010-04-14 19:11:29 +0200244 const char *se_header;
John Kacurdd68ada2009-09-24 18:02:49 +0200245
Frederic Weisbeckerfcd14982010-04-14 19:11:29 +0200246 int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *);
247 int64_t (*se_collapse)(struct hist_entry *, struct hist_entry *);
Namhyung Kim202e7a62014-03-04 11:01:41 +0900248 int64_t (*se_sort)(struct hist_entry *, struct hist_entry *);
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300249 int (*se_snprintf)(struct hist_entry *he, char *bf, size_t size,
Frederic Weisbeckerfcd14982010-04-14 19:11:29 +0200250 unsigned int width);
Namhyung Kim54430102016-02-25 00:13:37 +0900251 int (*se_filter)(struct hist_entry *he, int type, const void *arg);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300252 u8 se_width_idx;
John Kacurdd68ada2009-09-24 18:02:49 +0200253};
254
255extern struct sort_entry sort_thread;
256extern struct list_head hist_entry__sort_list;
257
Namhyung Kim40184c42015-12-23 02:07:01 +0900258struct perf_evlist;
259struct pevent;
260int setup_sorting(struct perf_evlist *evlist);
Namhyung Kima7d945b2014-03-04 10:46:34 +0900261int setup_output_field(void);
Namhyung Kim1c89fe92014-05-07 18:42:24 +0900262void reset_output_field(void);
Namhyung Kim08e71542013-04-03 21:26:19 +0900263void sort__setup_elide(FILE *fp);
Jiri Olsaf2998422014-05-23 17:15:47 +0200264void perf_hpp__set_elide(int idx, bool elide);
John Kacurdd68ada2009-09-24 18:02:49 +0200265
Greg Priceb21484f2012-12-06 21:48:05 -0800266int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
267
Jiri Olsa2f3f9bc2014-08-22 15:58:38 +0200268bool is_strict_order(const char *order);
Jiri Olsabeeaaeb2015-10-06 14:25:11 +0200269
270int hpp_dimension__add_output(unsigned col);
John Kacurdd68ada2009-09-24 18:02:49 +0200271#endif /* __PERF_SORT_H */