blob: ce76f36aeb0a2781c840f58770f1ce5d651b45e4 [file] [log] [blame]
John Kacur3d1d07e2009-09-28 15:32:55 +02001#ifndef __PERF_HIST_H
2#define __PERF_HIST_H
John Kacur3d1d07e2009-09-28 15:32:55 +02003
Arnaldo Carvalho de Melo4e4f06e2009-12-14 13:10:39 -02004#include <linux/types.h>
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -03005#include <pthread.h>
John Kacur3d1d07e2009-09-28 15:32:55 +02006#include "callchain.h"
John Kacur3d1d07e2009-09-28 15:32:55 +02007
John Kacur3d1d07e2009-09-28 15:32:55 +02008extern struct callchain_param callchain_param;
John Kacur3d1d07e2009-09-28 15:32:55 +02009
Arnaldo Carvalho de Melo4e4f06e2009-12-14 13:10:39 -020010struct hist_entry;
11struct addr_location;
12struct symbol;
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -030013
Arnaldo Carvalho de Melocee75ac2010-05-14 13:16:55 -030014/*
15 * The kernel collects the number of events it couldn't send in a stretch and
16 * when possible sends this number in a PERF_RECORD_LOST event. The number of
17 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
18 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
19 * the sum of all struct lost_event.lost fields reported.
20 *
21 * The total_period is needed because by default auto-freq is used, so
22 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
23 * the total number of low level events, it is necessary to to sum all struct
24 * sample_event.period and stash the result in total_period.
25 */
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -030026struct events_stats {
Arnaldo Carvalho de Melocee75ac2010-05-14 13:16:55 -030027 u64 total_period;
28 u64 total_lost;
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -020029 u64 total_invalid_chains;
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -030030 u32 nr_events[PERF_RECORD_HEADER_MAX];
Arnaldo Carvalho de Melo7b275092011-10-29 12:15:04 -020031 u32 nr_lost_warned;
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -030032 u32 nr_unknown_events;
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -020033 u32 nr_invalid_chains;
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -030034 u32 nr_unknown_id;
Joerg Roedel0c095712012-02-10 18:05:04 +010035 u32 nr_unprocessable_samples;
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -030036};
37
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030038enum hist_column {
39 HISTC_SYMBOL,
40 HISTC_DSO,
41 HISTC_THREAD,
42 HISTC_COMM,
43 HISTC_PARENT,
44 HISTC_CPU,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010045 HISTC_MISPREDICT,
46 HISTC_SYMBOL_FROM,
47 HISTC_SYMBOL_TO,
48 HISTC_DSO_FROM,
49 HISTC_DSO_TO,
Arnaldo Carvalho de Melo409a8be2012-05-30 10:33:24 -030050 HISTC_SRCLINE,
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030051 HISTC_NR_COLS, /* Last entry */
52};
53
Arnaldo Carvalho de Melod7b76f02011-10-18 19:07:34 -020054struct thread;
55struct dso;
56
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -030057struct hists {
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030058 struct rb_root entries_in_array[2];
59 struct rb_root *entries_in;
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -030060 struct rb_root entries;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030061 struct rb_root entries_collapsed;
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -030062 u64 nr_entries;
Arnaldo Carvalho de Melod7b76f02011-10-18 19:07:34 -020063 const struct thread *thread_filter;
64 const struct dso *dso_filter;
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -020065 const char *uid_filter_str;
Namhyung Kime94d53e2012-03-16 17:50:51 +090066 const char *symbol_filter_str;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030067 pthread_mutex_t lock;
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -030068 struct events_stats stats;
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -030069 u64 event_stream;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030070 u16 col_len[HISTC_NR_COLS];
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -030071};
72
73struct hist_entry *__hists__add_entry(struct hists *self,
74 struct addr_location *al,
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -030075 struct symbol *parent, u64 period);
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -020076int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
77int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
Namhyung Kim000078b2012-08-20 13:52:06 +090078int hist_entry__sort_snprintf(struct hist_entry *self, char *bf, size_t size,
79 struct hists *hists);
Arnaldo Carvalho de Melo4e4f06e2009-12-14 13:10:39 -020080void hist_entry__free(struct hist_entry *);
81
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010082struct hist_entry *__hists__add_branch_entry(struct hists *self,
83 struct addr_location *al,
84 struct symbol *sym_parent,
85 struct branch_info *bi,
86 u64 period);
87
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -030088void hists__output_resort(struct hists *self);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030089void hists__output_resort_threaded(struct hists *hists);
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -030090void hists__collapse_resort(struct hists *self);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030091void hists__collapse_resort_threaded(struct hists *hists);
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -030092
Arnaldo Carvalho de Melob079d4e2011-10-17 09:05:04 -020093void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
94void hists__decay_entries_threaded(struct hists *hists, bool zap_user,
95 bool zap_kernel);
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -030096void hists__output_recalc_col_len(struct hists *hists, int max_rows);
97
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -030098void hists__inc_nr_events(struct hists *self, u32 type);
99size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
100
Jiri Olsa41724e42012-10-04 21:49:38 +0900101size_t hists__fprintf(struct hists *self, bool show_header, int max_rows,
102 int max_cols, FILE *fp);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300103
Arnaldo Carvalho de Melo2f525d02011-02-04 13:43:24 -0200104int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr);
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200105int hist_entry__annotate(struct hist_entry *self, size_t privsize);
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300106
Arnaldo Carvalho de Melod7b76f02011-10-18 19:07:34 -0200107void hists__filter_by_dso(struct hists *hists);
108void hists__filter_by_thread(struct hists *hists);
Namhyung Kime94d53e2012-03-16 17:50:51 +0900109void hists__filter_by_symbol(struct hists *hists);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300110
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300111u16 hists__col_len(struct hists *self, enum hist_column col);
112void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
113bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len);
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900114void hists__reset_col_len(struct hists *hists);
115void hists__calc_col_len(struct hists *hists, struct hist_entry *he);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300116
Namhyung Kimea251d52012-09-03 11:53:06 +0900117struct perf_hpp {
118 char *buf;
119 size_t size;
Namhyung Kimea251d52012-09-03 11:53:06 +0900120 const char *sep;
Namhyung Kimea251d52012-09-03 11:53:06 +0900121 void *ptr;
122};
123
124struct perf_hpp_fmt {
125 bool cond;
126 int (*header)(struct perf_hpp *hpp);
127 int (*width)(struct perf_hpp *hpp);
128 int (*color)(struct perf_hpp *hpp, struct hist_entry *he);
129 int (*entry)(struct perf_hpp *hpp, struct hist_entry *he);
130};
131
132extern struct perf_hpp_fmt perf_hpp__format[];
133
134enum {
Jiri Olsa5395a042012-10-04 21:49:37 +0900135 PERF_HPP__BASELINE,
Namhyung Kimea251d52012-09-03 11:53:06 +0900136 PERF_HPP__OVERHEAD,
137 PERF_HPP__OVERHEAD_SYS,
138 PERF_HPP__OVERHEAD_US,
139 PERF_HPP__OVERHEAD_GUEST_SYS,
140 PERF_HPP__OVERHEAD_GUEST_US,
141 PERF_HPP__SAMPLES,
142 PERF_HPP__PERIOD,
143 PERF_HPP__DELTA,
Jiri Olsa7aaf6b32012-10-05 16:44:41 +0200144 PERF_HPP__RATIO,
Jiri Olsa81d5f952012-10-05 16:44:43 +0200145 PERF_HPP__WEIGHTED_DIFF,
Namhyung Kimea251d52012-09-03 11:53:06 +0900146 PERF_HPP__DISPL,
147
148 PERF_HPP__MAX_INDEX
149};
150
Jiri Olsa1d778222012-10-04 21:49:39 +0900151void perf_hpp__init(void);
152void perf_hpp__column_enable(unsigned col, bool enable);
Namhyung Kimea251d52012-09-03 11:53:06 +0900153int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
154 bool color);
155
Arnaldo Carvalho de Meloe248de32011-03-05 21:40:06 -0300156struct perf_evlist;
157
Namhyung Kim1254b512012-09-28 18:32:02 +0900158#ifdef NEWT_SUPPORT
159#include "../ui/keysyms.h"
160int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
161 void(*timer)(void *arg), void *arg, int delay_secs);
162
163int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
164 void(*timer)(void *arg), void *arg,
165 int refresh);
166#else
Arnaldo Carvalho de Melo7f0030b2011-03-06 13:07:30 -0300167static inline
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300168int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
169 const char *help __maybe_unused,
170 void(*timer)(void *arg) __maybe_unused,
171 void *arg __maybe_unused,
172 int refresh __maybe_unused)
Arnaldo Carvalho de Melod67f0882010-05-23 22:36:51 -0300173{
174 return 0;
175}
176
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300177static inline int hist_entry__tui_annotate(struct hist_entry *self
178 __maybe_unused,
179 int evidx __maybe_unused,
180 void(*timer)(void *arg)
181 __maybe_unused,
182 void *arg __maybe_unused,
183 int delay_secs __maybe_unused)
Arnaldo Carvalho de Melo46e3e052010-05-22 11:25:40 -0300184{
185 return 0;
186}
Arnaldo Carvalho de Melocf958002011-10-20 16:59:15 -0200187#define K_LEFT -1
188#define K_RIGHT -2
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300189#endif
Arnaldo Carvalho de Melo06daaab2010-07-21 17:58:25 -0300190
Namhyung Kimf9f526e2012-09-28 18:32:03 +0900191#ifdef GTK2_SUPPORT
192int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help,
193 void(*timer)(void *arg), void *arg,
194 int refresh);
195#else
Pekka Enbergc31a9452012-03-19 15:13:29 -0300196static inline
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300197int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused,
198 const char *help __maybe_unused,
199 void(*timer)(void *arg) __maybe_unused,
200 void *arg __maybe_unused,
201 int refresh __maybe_unused)
Pekka Enbergc31a9452012-03-19 15:13:29 -0300202{
203 return 0;
204}
Pekka Enbergc31a9452012-03-19 15:13:29 -0300205#endif
206
Arnaldo Carvalho de Melo06daaab2010-07-21 17:58:25 -0300207unsigned int hists__sort_list_width(struct hists *self);
208
Jiri Olsa96c47f12012-10-05 16:44:42 +0200209double perf_diff__compute_delta(struct hist_entry *he);
210double perf_diff__compute_ratio(struct hist_entry *he);
Jiri Olsa81d5f952012-10-05 16:44:43 +0200211s64 perf_diff__compute_wdiff(struct hist_entry *he);
John Kacur3d1d07e2009-09-28 15:32:55 +0200212#endif /* __PERF_HIST_H */