blob: 46a0d35a05e1f21aae097e7a67cea89bfe9ecddf [file] [log] [blame]
Arnaldo Carvalho de Melo78f7def2011-02-04 09:45:46 -02001#include "annotate.h"
Frederic Weisbecker8a0ecfb2010-05-13 19:47:16 +02002#include "util.h"
Frederic Weisbecker598357e2010-05-21 12:48:39 +02003#include "build-id.h"
John Kacur3d1d07e2009-09-28 15:32:55 +02004#include "hist.h"
Arnaldo Carvalho de Melo4e4f06e2009-12-14 13:10:39 -02005#include "session.h"
6#include "sort.h"
Namhyung Kim29d720e2013-01-22 18:09:33 +09007#include "evsel.h"
Arnaldo Carvalho de Melo9b338272009-12-16 14:31:49 -02008#include <math.h>
John Kacur3d1d07e2009-09-28 15:32:55 +02009
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -020010static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
Namhyung Kime94d53e2012-03-16 17:50:51 +090014static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -020016
Arnaldo Carvalho de Melo7a007ca2010-07-21 09:19:41 -030017enum hist_filter {
18 HIST_FILTER__DSO,
19 HIST_FILTER__THREAD,
20 HIST_FILTER__PARENT,
Namhyung Kime94d53e2012-03-16 17:50:51 +090021 HIST_FILTER__SYMBOL,
Arnaldo Carvalho de Melo7a007ca2010-07-21 09:19:41 -030022};
23
John Kacur3d1d07e2009-09-28 15:32:55 +020024struct callchain_param callchain_param = {
25 .mode = CHAIN_GRAPH_REL,
Sam Liaod797fdc2011-06-07 23:49:46 +080026 .min_percent = 0.5,
Andi Kleen99571ab2013-07-18 15:33:57 -070027 .order = ORDER_CALLEE,
28 .key = CCKEY_FUNCTION
John Kacur3d1d07e2009-09-28 15:32:55 +020029};
30
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030031u16 hists__col_len(struct hists *hists, enum hist_column col)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030032{
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030033 return hists->col_len[col];
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030034}
35
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030036void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030037{
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030038 hists->col_len[col] = len;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030039}
40
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030041bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030042{
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030043 if (len > hists__col_len(hists, col)) {
44 hists__set_col_len(hists, col, len);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030045 return true;
46 }
47 return false;
48}
49
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090050void hists__reset_col_len(struct hists *hists)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030051{
52 enum hist_column col;
53
54 for (col = 0; col < HISTC_NR_COLS; ++col)
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030055 hists__set_col_len(hists, col, 0);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030056}
57
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010058static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
59{
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61
62 if (hists__col_len(hists, dso) < unresolved_col_width &&
63 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
64 !symbol_conf.dso_list)
65 hists__set_col_len(hists, dso, unresolved_col_width);
66}
67
Namhyung Kim7ccf4f92012-08-20 13:52:05 +090068void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030069{
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010070 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
Stephane Eranian98a3b322013-01-24 16:10:35 +010071 int symlen;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030072 u16 len;
73
Namhyung Kimded19d52013-04-01 20:35:19 +090074 /*
75 * +4 accounts for '[x] ' priv level info
76 * +2 accounts for 0x prefix on raw addresses
77 * +3 accounts for ' y ' symtab origin info
78 */
79 if (h->ms.sym) {
80 symlen = h->ms.sym->namelen + 4;
81 if (verbose)
82 symlen += BITS_PER_LONG / 4 + 2 + 3;
83 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
84 } else {
Stephane Eranian98a3b322013-01-24 16:10:35 +010085 symlen = unresolved_col_width + 4 + 2;
86 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010087 hists__set_unres_dso_col_len(hists, HISTC_DSO);
Stephane Eranian98a3b322013-01-24 16:10:35 +010088 }
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030089
90 len = thread__comm_len(h->thread);
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030091 if (hists__new_col_len(hists, HISTC_COMM, len))
92 hists__set_col_len(hists, HISTC_THREAD, len + 6);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030093
94 if (h->ms.map) {
95 len = dso__name_len(h->ms.map->dso);
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -030096 hists__new_col_len(hists, HISTC_DSO, len);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -030097 }
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +010098
Namhyung Kimcb993742012-12-27 18:11:42 +090099 if (h->parent)
100 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
101
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100102 if (h->branch_info) {
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100103 if (h->branch_info->from.sym) {
104 symlen = (int)h->branch_info->from.sym->namelen + 4;
Namhyung Kimded19d52013-04-01 20:35:19 +0900105 if (verbose)
106 symlen += BITS_PER_LONG / 4 + 2 + 3;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100107 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
108
109 symlen = dso__name_len(h->branch_info->from.map->dso);
110 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
111 } else {
112 symlen = unresolved_col_width + 4 + 2;
113 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
114 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
115 }
116
117 if (h->branch_info->to.sym) {
118 symlen = (int)h->branch_info->to.sym->namelen + 4;
Namhyung Kimded19d52013-04-01 20:35:19 +0900119 if (verbose)
120 symlen += BITS_PER_LONG / 4 + 2 + 3;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100121 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
122
123 symlen = dso__name_len(h->branch_info->to.map->dso);
124 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
125 } else {
126 symlen = unresolved_col_width + 4 + 2;
127 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
128 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
129 }
130 }
Stephane Eranian98a3b322013-01-24 16:10:35 +0100131
132 if (h->mem_info) {
Stephane Eranian98a3b322013-01-24 16:10:35 +0100133 if (h->mem_info->daddr.sym) {
134 symlen = (int)h->mem_info->daddr.sym->namelen + 4
135 + unresolved_col_width + 2;
136 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
137 symlen);
138 } else {
139 symlen = unresolved_col_width + 4 + 2;
140 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
141 symlen);
142 }
143 if (h->mem_info->daddr.map) {
144 symlen = dso__name_len(h->mem_info->daddr.map->dso);
145 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
146 symlen);
147 } else {
148 symlen = unresolved_col_width + 4 + 2;
149 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
150 }
151 } else {
152 symlen = unresolved_col_width + 4 + 2;
153 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
154 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
155 }
156
157 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
158 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
159 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
160 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
161 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
162 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300163}
164
Namhyung Kim7ccf4f92012-08-20 13:52:05 +0900165void hists__output_recalc_col_len(struct hists *hists, int max_rows)
166{
167 struct rb_node *next = rb_first(&hists->entries);
168 struct hist_entry *n;
169 int row = 0;
170
171 hists__reset_col_len(hists);
172
173 while (next && row++ < max_rows) {
174 n = rb_entry(next, struct hist_entry, rb_node);
175 if (!n->filtered)
176 hists__calc_col_len(hists, n);
177 next = rb_next(&n->rb_node);
178 }
179}
180
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200181static void hist_entry__add_cpumode_period(struct hist_entry *he,
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300182 unsigned int cpumode, u64 period)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800183{
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300184 switch (cpumode) {
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800185 case PERF_RECORD_MISC_KERNEL:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900186 he->stat.period_sys += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800187 break;
188 case PERF_RECORD_MISC_USER:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900189 he->stat.period_us += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800190 break;
191 case PERF_RECORD_MISC_GUEST_KERNEL:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900192 he->stat.period_guest_sys += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800193 break;
194 case PERF_RECORD_MISC_GUEST_USER:
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900195 he->stat.period_guest_us += period;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800196 break;
197 default:
198 break;
199 }
200}
201
Andi Kleen05484292013-01-24 16:10:29 +0100202static void he_stat__add_period(struct he_stat *he_stat, u64 period,
203 u64 weight)
Namhyung Kim139c0812012-10-04 21:49:43 +0900204{
Stephane Eranian98a3b322013-01-24 16:10:35 +0100205
Namhyung Kim139c0812012-10-04 21:49:43 +0900206 he_stat->period += period;
Andi Kleen05484292013-01-24 16:10:29 +0100207 he_stat->weight += weight;
Namhyung Kim139c0812012-10-04 21:49:43 +0900208 he_stat->nr_events += 1;
209}
210
211static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
212{
213 dest->period += src->period;
214 dest->period_sys += src->period_sys;
215 dest->period_us += src->period_us;
216 dest->period_guest_sys += src->period_guest_sys;
217 dest->period_guest_us += src->period_guest_us;
218 dest->nr_events += src->nr_events;
Andi Kleen05484292013-01-24 16:10:29 +0100219 dest->weight += src->weight;
Namhyung Kim139c0812012-10-04 21:49:43 +0900220}
221
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300222static void hist_entry__decay(struct hist_entry *he)
223{
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900224 he->stat.period = (he->stat.period * 7) / 8;
225 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
Andi Kleen05484292013-01-24 16:10:29 +0100226 /* XXX need decay for weight too? */
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300227}
228
229static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
230{
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900231 u64 prev_period = he->stat.period;
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200232
233 if (prev_period == 0)
Arnaldo Carvalho de Melodf71d952011-10-13 08:01:33 -0300234 return true;
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200235
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300236 hist_entry__decay(he);
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200237
238 if (!he->filtered)
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900239 hists->stats.total_period -= prev_period - he->stat.period;
Arnaldo Carvalho de Meloc64550c2011-10-20 06:45:44 -0200240
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900241 return he->stat.period == 0;
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300242}
243
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900244void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300245{
246 struct rb_node *next = rb_first(&hists->entries);
247 struct hist_entry *n;
248
249 while (next) {
250 n = rb_entry(next, struct hist_entry, rb_node);
251 next = rb_next(&n->rb_node);
Arnaldo Carvalho de Melodf71d952011-10-13 08:01:33 -0300252 /*
253 * We may be annotating this, for instance, so keep it here in
254 * case some it gets new samples, we'll eventually free it when
255 * the user stops browsing and it agains gets fully decayed.
256 */
Arnaldo Carvalho de Melob079d4e2011-10-17 09:05:04 -0200257 if (((zap_user && n->level == '.') ||
258 (zap_kernel && n->level != '.') ||
259 hists__decay_entry(hists, n)) &&
260 !n->used) {
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300261 rb_erase(&n->rb_node, &hists->entries);
262
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900263 if (sort__need_collapse)
Arnaldo Carvalho de Meloab81f3f2011-10-05 19:16:15 -0300264 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
265
266 hist_entry__free(n);
267 --hists->nr_entries;
268 }
269 }
270}
271
John Kacur3d1d07e2009-09-28 15:32:55 +0200272/*
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300273 * histogram, sorted on item, collects periods
John Kacur3d1d07e2009-09-28 15:32:55 +0200274 */
275
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300276static struct hist_entry *hist_entry__new(struct hist_entry *template)
277{
Frederic Weisbeckerd2009c52010-08-22 20:05:22 +0200278 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
Stephane Eranian98a3b322013-01-24 16:10:35 +0100279 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300280
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200281 if (he != NULL) {
282 *he = *template;
Namhyung Kimc4b35352012-10-04 21:49:42 +0900283
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200284 if (he->ms.map)
285 he->ms.map->referenced = true;
Stephane Eranian3cf0cb12013-01-14 15:02:45 +0100286
287 if (he->branch_info) {
Namhyung Kim26353a62013-04-01 20:35:17 +0900288 /*
289 * This branch info is (a part of) allocated from
290 * machine__resolve_bstack() and will be freed after
291 * adding new entries. So we need to save a copy.
292 */
293 he->branch_info = malloc(sizeof(*he->branch_info));
294 if (he->branch_info == NULL) {
295 free(he);
296 return NULL;
297 }
298
299 memcpy(he->branch_info, template->branch_info,
300 sizeof(*he->branch_info));
301
Stephane Eranian3cf0cb12013-01-14 15:02:45 +0100302 if (he->branch_info->from.map)
303 he->branch_info->from.map->referenced = true;
304 if (he->branch_info->to.map)
305 he->branch_info->to.map->referenced = true;
306 }
307
Stephane Eranian98a3b322013-01-24 16:10:35 +0100308 if (he->mem_info) {
309 if (he->mem_info->iaddr.map)
310 he->mem_info->iaddr.map->referenced = true;
311 if (he->mem_info->daddr.map)
312 he->mem_info->daddr.map->referenced = true;
313 }
314
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300315 if (symbol_conf.use_callchain)
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200316 callchain_init(he->callchain);
Arnaldo Carvalho de Melob821c732012-10-25 14:42:45 -0200317
318 INIT_LIST_HEAD(&he->pairs.node);
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300319 }
320
Arnaldo Carvalho de Melo12c14272012-01-04 12:27:03 -0200321 return he;
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300322}
323
Namhyung Kim66f97ed2012-12-10 17:29:56 +0900324void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300325{
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300326 if (!h->filtered) {
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300327 hists__calc_col_len(hists, h);
328 ++hists->nr_entries;
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900329 hists->stats.total_period += h->stat.period;
Arnaldo Carvalho de Melo8a6c5b22010-07-20 14:42:52 -0300330 }
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300331}
332
Arnaldo Carvalho de Melo7a007ca2010-07-21 09:19:41 -0300333static u8 symbol__parent_filter(const struct symbol *parent)
334{
335 if (symbol_conf.exclude_other && parent == NULL)
336 return 1 << HIST_FILTER__PARENT;
337 return 0;
338}
339
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100340static struct hist_entry *add_hist_entry(struct hists *hists,
341 struct hist_entry *entry,
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300342 struct addr_location *al,
Andi Kleen05484292013-01-24 16:10:29 +0100343 u64 period,
344 u64 weight)
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300345{
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300346 struct rb_node **p;
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300347 struct rb_node *parent = NULL;
348 struct hist_entry *he;
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300349 int cmp;
350
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300351 p = &hists->entries_in->rb_node;
352
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300353 while (*p != NULL) {
354 parent = *p;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300355 he = rb_entry(parent, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300356
Namhyung Kim9afcf932012-12-10 17:29:54 +0900357 /*
358 * Make sure that it receives arguments in a same order as
359 * hist_entry__collapse() so that we can use an appropriate
360 * function when searching an entry regardless which sort
361 * keys were used.
362 */
363 cmp = hist_entry__cmp(he, entry);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300364
365 if (!cmp) {
Andi Kleen05484292013-01-24 16:10:29 +0100366 he_stat__add_period(&he->stat, period, weight);
David Miller63fa4712012-03-27 03:14:18 -0400367
Namhyung Kimceb2acb2013-04-01 20:35:18 +0900368 /*
369 * This mem info was allocated from machine__resolve_mem
370 * and will not be used anymore.
371 */
372 free(entry->mem_info);
373
David Miller63fa4712012-03-27 03:14:18 -0400374 /* If the map of an existing hist_entry has
375 * become out-of-date due to an exec() or
376 * similar, update it. Otherwise we will
377 * mis-adjust symbol addresses when computing
378 * the history counter to increment.
379 */
380 if (he->ms.map != entry->ms.map) {
381 he->ms.map = entry->ms.map;
382 if (he->ms.map)
383 he->ms.map->referenced = true;
384 }
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300385 goto out;
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300386 }
387
388 if (cmp < 0)
389 p = &(*p)->rb_left;
390 else
391 p = &(*p)->rb_right;
392 }
393
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100394 he = hist_entry__new(entry);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300395 if (!he)
Namhyung Kim27a0dcb2013-05-14 11:09:02 +0900396 return NULL;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300397
398 rb_link_node(&he->rb_node_in, parent, p);
399 rb_insert_color(&he->rb_node_in, hists->entries_in);
Arnaldo Carvalho de Melo28e2a102010-05-09 13:02:23 -0300400out:
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300401 hist_entry__add_cpumode_period(he, al->cpumode, period);
Arnaldo Carvalho de Melo9735abf2009-10-03 10:42:45 -0300402 return he;
403}
404
Stephane Eranian98a3b322013-01-24 16:10:35 +0100405struct hist_entry *__hists__add_mem_entry(struct hists *self,
406 struct addr_location *al,
407 struct symbol *sym_parent,
408 struct mem_info *mi,
409 u64 period,
410 u64 weight)
411{
412 struct hist_entry entry = {
413 .thread = al->thread,
414 .ms = {
415 .map = al->map,
416 .sym = al->sym,
417 },
418 .stat = {
419 .period = period,
420 .weight = weight,
421 .nr_events = 1,
422 },
423 .cpu = al->cpu,
424 .ip = al->addr,
425 .level = al->level,
426 .parent = sym_parent,
427 .filtered = symbol__parent_filter(sym_parent),
428 .hists = self,
429 .mem_info = mi,
430 .branch_info = NULL,
431 };
432 return add_hist_entry(self, &entry, al, period, weight);
433}
434
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100435struct hist_entry *__hists__add_branch_entry(struct hists *self,
436 struct addr_location *al,
437 struct symbol *sym_parent,
438 struct branch_info *bi,
Andi Kleen05484292013-01-24 16:10:29 +0100439 u64 period,
440 u64 weight)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100441{
442 struct hist_entry entry = {
443 .thread = al->thread,
444 .ms = {
445 .map = bi->to.map,
446 .sym = bi->to.sym,
447 },
448 .cpu = al->cpu,
449 .ip = bi->to.addr,
450 .level = al->level,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900451 .stat = {
452 .period = period,
Namhyung Kimc4b35352012-10-04 21:49:42 +0900453 .nr_events = 1,
Andi Kleen05484292013-01-24 16:10:29 +0100454 .weight = weight,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900455 },
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100456 .parent = sym_parent,
457 .filtered = symbol__parent_filter(sym_parent),
458 .branch_info = bi,
Jiri Olsaae359f12012-10-04 21:49:35 +0900459 .hists = self,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100460 .mem_info = NULL,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100461 };
462
Andi Kleen05484292013-01-24 16:10:29 +0100463 return add_hist_entry(self, &entry, al, period, weight);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100464}
465
466struct hist_entry *__hists__add_entry(struct hists *self,
467 struct addr_location *al,
Andi Kleen05484292013-01-24 16:10:29 +0100468 struct symbol *sym_parent, u64 period,
469 u64 weight)
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100470{
471 struct hist_entry entry = {
472 .thread = al->thread,
473 .ms = {
474 .map = al->map,
475 .sym = al->sym,
476 },
477 .cpu = al->cpu,
478 .ip = al->addr,
479 .level = al->level,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900480 .stat = {
481 .period = period,
Namhyung Kimc4b35352012-10-04 21:49:42 +0900482 .nr_events = 1,
Andi Kleen05484292013-01-24 16:10:29 +0100483 .weight = weight,
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900484 },
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100485 .parent = sym_parent,
486 .filtered = symbol__parent_filter(sym_parent),
Jiri Olsaae359f12012-10-04 21:49:35 +0900487 .hists = self,
Stephane Eranian98a3b322013-01-24 16:10:35 +0100488 .branch_info = NULL,
489 .mem_info = NULL,
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100490 };
491
Andi Kleen05484292013-01-24 16:10:29 +0100492 return add_hist_entry(self, &entry, al, period, weight);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100493}
494
John Kacur3d1d07e2009-09-28 15:32:55 +0200495int64_t
496hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
497{
498 struct sort_entry *se;
499 int64_t cmp = 0;
500
501 list_for_each_entry(se, &hist_entry__sort_list, list) {
Frederic Weisbeckerfcd14982010-04-14 19:11:29 +0200502 cmp = se->se_cmp(left, right);
John Kacur3d1d07e2009-09-28 15:32:55 +0200503 if (cmp)
504 break;
505 }
506
507 return cmp;
508}
509
510int64_t
511hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
512{
513 struct sort_entry *se;
514 int64_t cmp = 0;
515
516 list_for_each_entry(se, &hist_entry__sort_list, list) {
517 int64_t (*f)(struct hist_entry *, struct hist_entry *);
518
Frederic Weisbeckerfcd14982010-04-14 19:11:29 +0200519 f = se->se_collapse ?: se->se_cmp;
John Kacur3d1d07e2009-09-28 15:32:55 +0200520
521 cmp = f(left, right);
522 if (cmp)
523 break;
524 }
525
526 return cmp;
527}
528
529void hist_entry__free(struct hist_entry *he)
530{
Namhyung Kim580e3382012-11-07 16:27:14 +0900531 free(he->branch_info);
Stephane Eranian028f12e2013-01-24 16:10:38 +0100532 free(he->mem_info);
John Kacur3d1d07e2009-09-28 15:32:55 +0200533 free(he);
534}
535
536/*
537 * collapse the histogram
538 */
539
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300540static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
Frederic Weisbecker1b3a0e92011-01-14 04:51:58 +0100541 struct rb_root *root,
542 struct hist_entry *he)
John Kacur3d1d07e2009-09-28 15:32:55 +0200543{
Arnaldo Carvalho de Melob9bf0892009-12-14 11:37:11 -0200544 struct rb_node **p = &root->rb_node;
John Kacur3d1d07e2009-09-28 15:32:55 +0200545 struct rb_node *parent = NULL;
546 struct hist_entry *iter;
547 int64_t cmp;
548
549 while (*p != NULL) {
550 parent = *p;
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300551 iter = rb_entry(parent, struct hist_entry, rb_node_in);
John Kacur3d1d07e2009-09-28 15:32:55 +0200552
553 cmp = hist_entry__collapse(iter, he);
554
555 if (!cmp) {
Namhyung Kim139c0812012-10-04 21:49:43 +0900556 he_stat__add_stat(&iter->stat, &he->stat);
Namhyung Kim9ec60972012-09-26 16:47:28 +0900557
Frederic Weisbecker1b3a0e92011-01-14 04:51:58 +0100558 if (symbol_conf.use_callchain) {
Namhyung Kim47260642012-05-31 14:43:26 +0900559 callchain_cursor_reset(&callchain_cursor);
560 callchain_merge(&callchain_cursor,
561 iter->callchain,
Frederic Weisbecker1b3a0e92011-01-14 04:51:58 +0100562 he->callchain);
563 }
John Kacur3d1d07e2009-09-28 15:32:55 +0200564 hist_entry__free(he);
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300565 return false;
John Kacur3d1d07e2009-09-28 15:32:55 +0200566 }
567
568 if (cmp < 0)
569 p = &(*p)->rb_left;
570 else
571 p = &(*p)->rb_right;
572 }
573
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300574 rb_link_node(&he->rb_node_in, parent, p);
575 rb_insert_color(&he->rb_node_in, root);
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300576 return true;
John Kacur3d1d07e2009-09-28 15:32:55 +0200577}
578
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300579static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
580{
581 struct rb_root *root;
582
583 pthread_mutex_lock(&hists->lock);
584
585 root = hists->entries_in;
586 if (++hists->entries_in > &hists->entries_in_array[1])
587 hists->entries_in = &hists->entries_in_array[0];
588
589 pthread_mutex_unlock(&hists->lock);
590
591 return root;
592}
593
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200594static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
595{
596 hists__filter_entry_by_dso(hists, he);
597 hists__filter_entry_by_thread(hists, he);
Namhyung Kime94d53e2012-03-16 17:50:51 +0900598 hists__filter_entry_by_symbol(hists, he);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200599}
600
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900601void hists__collapse_resort(struct hists *hists)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300602{
603 struct rb_root *root;
604 struct rb_node *next;
605 struct hist_entry *n;
606
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900607 if (!sort__need_collapse)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300608 return;
609
610 root = hists__get_rotate_entries_in(hists);
611 next = rb_first(root);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300612
613 while (next) {
614 n = rb_entry(next, struct hist_entry, rb_node_in);
615 next = rb_next(&n->rb_node_in);
616
617 rb_erase(&n->rb_node_in, root);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200618 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
619 /*
620 * If it wasn't combined with one of the entries already
621 * collapsed, we need to apply the filters that may have
622 * been set by, say, the hist_browser.
623 */
624 hists__apply_filters(hists, n);
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200625 }
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300626 }
627}
628
John Kacur3d1d07e2009-09-28 15:32:55 +0200629/*
Arnaldo Carvalho de Meloc82ee822010-05-14 14:19:35 -0300630 * reverse the map, sort on period.
John Kacur3d1d07e2009-09-28 15:32:55 +0200631 */
632
Namhyung Kim29d720e2013-01-22 18:09:33 +0900633static int period_cmp(u64 period_a, u64 period_b)
634{
635 if (period_a > period_b)
636 return 1;
637 if (period_a < period_b)
638 return -1;
639 return 0;
640}
641
642static int hist_entry__sort_on_period(struct hist_entry *a,
643 struct hist_entry *b)
644{
645 int ret;
646 int i, nr_members;
647 struct perf_evsel *evsel;
648 struct hist_entry *pair;
649 u64 *periods_a, *periods_b;
650
651 ret = period_cmp(a->stat.period, b->stat.period);
652 if (ret || !symbol_conf.event_group)
653 return ret;
654
655 evsel = hists_to_evsel(a->hists);
656 nr_members = evsel->nr_members;
657 if (nr_members <= 1)
658 return ret;
659
660 periods_a = zalloc(sizeof(periods_a) * nr_members);
661 periods_b = zalloc(sizeof(periods_b) * nr_members);
662
663 if (!periods_a || !periods_b)
664 goto out;
665
666 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
667 evsel = hists_to_evsel(pair->hists);
668 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
669 }
670
671 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
672 evsel = hists_to_evsel(pair->hists);
673 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
674 }
675
676 for (i = 1; i < nr_members; i++) {
677 ret = period_cmp(periods_a[i], periods_b[i]);
678 if (ret)
679 break;
680 }
681
682out:
683 free(periods_a);
684 free(periods_b);
685
686 return ret;
687}
688
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300689static void __hists__insert_output_entry(struct rb_root *entries,
690 struct hist_entry *he,
691 u64 min_callchain_hits)
John Kacur3d1d07e2009-09-28 15:32:55 +0200692{
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300693 struct rb_node **p = &entries->rb_node;
John Kacur3d1d07e2009-09-28 15:32:55 +0200694 struct rb_node *parent = NULL;
695 struct hist_entry *iter;
696
Arnaldo Carvalho de Melod599db32009-12-15 20:04:42 -0200697 if (symbol_conf.use_callchain)
Arnaldo Carvalho de Melob9fb9302010-04-02 09:50:42 -0300698 callchain_param.sort(&he->sorted_chain, he->callchain,
John Kacur3d1d07e2009-09-28 15:32:55 +0200699 min_callchain_hits, &callchain_param);
700
701 while (*p != NULL) {
702 parent = *p;
703 iter = rb_entry(parent, struct hist_entry, rb_node);
704
Namhyung Kim29d720e2013-01-22 18:09:33 +0900705 if (hist_entry__sort_on_period(he, iter) > 0)
John Kacur3d1d07e2009-09-28 15:32:55 +0200706 p = &(*p)->rb_left;
707 else
708 p = &(*p)->rb_right;
709 }
710
711 rb_link_node(&he->rb_node, parent, p);
Arnaldo Carvalho de Melo1c02c4d2010-05-10 13:04:11 -0300712 rb_insert_color(&he->rb_node, entries);
John Kacur3d1d07e2009-09-28 15:32:55 +0200713}
714
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900715void hists__output_resort(struct hists *hists)
John Kacur3d1d07e2009-09-28 15:32:55 +0200716{
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300717 struct rb_root *root;
John Kacur3d1d07e2009-09-28 15:32:55 +0200718 struct rb_node *next;
719 struct hist_entry *n;
John Kacur3d1d07e2009-09-28 15:32:55 +0200720 u64 min_callchain_hits;
721
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300722 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
John Kacur3d1d07e2009-09-28 15:32:55 +0200723
Namhyung Kim3a5714f2013-05-14 11:09:01 +0900724 if (sort__need_collapse)
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300725 root = &hists->entries_collapsed;
726 else
727 root = hists->entries_in;
728
729 next = rb_first(root);
730 hists->entries = RB_ROOT;
John Kacur3d1d07e2009-09-28 15:32:55 +0200731
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300732 hists->nr_entries = 0;
Arnaldo Carvalho de Melo79286312011-10-27 09:19:48 -0200733 hists->stats.total_period = 0;
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300734 hists__reset_col_len(hists);
Arnaldo Carvalho de Melofefb0b92010-05-10 13:57:51 -0300735
John Kacur3d1d07e2009-09-28 15:32:55 +0200736 while (next) {
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300737 n = rb_entry(next, struct hist_entry, rb_node_in);
738 next = rb_next(&n->rb_node_in);
John Kacur3d1d07e2009-09-28 15:32:55 +0200739
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300740 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300741 hists__inc_nr_entries(hists, n);
John Kacur3d1d07e2009-09-28 15:32:55 +0200742 }
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -0300743}
Arnaldo Carvalho de Melob9bf0892009-12-14 11:37:11 -0200744
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300745static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300746 enum hist_filter filter)
747{
748 h->filtered &= ~(1 << filter);
749 if (h->filtered)
750 return;
751
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300752 ++hists->nr_entries;
Arnaldo Carvalho de Melo0f0cbf72010-07-26 17:13:40 -0300753 if (h->ms.unfolded)
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300754 hists->nr_entries += h->nr_rows;
Arnaldo Carvalho de Melo0f0cbf72010-07-26 17:13:40 -0300755 h->row_offset = 0;
Namhyung Kimb24c28f2012-10-04 21:49:41 +0900756 hists->stats.total_period += h->stat.period;
757 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300758
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300759 hists__calc_col_len(hists, h);
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300760}
761
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200762
763static bool hists__filter_entry_by_dso(struct hists *hists,
764 struct hist_entry *he)
765{
766 if (hists->dso_filter != NULL &&
767 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
768 he->filtered |= (1 << HIST_FILTER__DSO);
769 return true;
770 }
771
772 return false;
773}
774
Arnaldo Carvalho de Melod7b76f02011-10-18 19:07:34 -0200775void hists__filter_by_dso(struct hists *hists)
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300776{
777 struct rb_node *nd;
778
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300779 hists->nr_entries = hists->stats.total_period = 0;
780 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
781 hists__reset_col_len(hists);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300782
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300783 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300784 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
785
786 if (symbol_conf.exclude_other && !h->parent)
787 continue;
788
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200789 if (hists__filter_entry_by_dso(hists, h))
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300790 continue;
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300791
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300792 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300793 }
794}
795
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200796static bool hists__filter_entry_by_thread(struct hists *hists,
797 struct hist_entry *he)
798{
799 if (hists->thread_filter != NULL &&
800 he->thread != hists->thread_filter) {
801 he->filtered |= (1 << HIST_FILTER__THREAD);
802 return true;
803 }
804
805 return false;
806}
807
Arnaldo Carvalho de Melod7b76f02011-10-18 19:07:34 -0200808void hists__filter_by_thread(struct hists *hists)
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300809{
810 struct rb_node *nd;
811
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300812 hists->nr_entries = hists->stats.total_period = 0;
813 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
814 hists__reset_col_len(hists);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300815
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300816 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300817 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
818
Arnaldo Carvalho de Melo90cf1fb2011-10-19 13:09:10 -0200819 if (hists__filter_entry_by_thread(hists, h))
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300820 continue;
Arnaldo Carvalho de Melocc5edb02010-07-16 12:35:07 -0300821
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300822 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
Arnaldo Carvalho de Melob09e0192010-05-11 11:10:15 -0300823 }
824}
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300825
Namhyung Kime94d53e2012-03-16 17:50:51 +0900826static bool hists__filter_entry_by_symbol(struct hists *hists,
827 struct hist_entry *he)
828{
829 if (hists->symbol_filter_str != NULL &&
830 (!he->ms.sym || strstr(he->ms.sym->name,
831 hists->symbol_filter_str) == NULL)) {
832 he->filtered |= (1 << HIST_FILTER__SYMBOL);
833 return true;
834 }
835
836 return false;
837}
838
839void hists__filter_by_symbol(struct hists *hists)
840{
841 struct rb_node *nd;
842
843 hists->nr_entries = hists->stats.total_period = 0;
844 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
845 hists__reset_col_len(hists);
846
847 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
848 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
849
850 if (hists__filter_entry_by_symbol(hists, h))
851 continue;
852
853 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
854 }
855}
856
Arnaldo Carvalho de Melo2f525d02011-02-04 13:43:24 -0200857int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300858{
Arnaldo Carvalho de Melo2f525d02011-02-04 13:43:24 -0200859 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300860}
861
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200862int hist_entry__annotate(struct hist_entry *he, size_t privsize)
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300863{
Arnaldo Carvalho de Meloce6f4fa2011-02-08 13:27:39 -0200864 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
Arnaldo Carvalho de Meloef7b93a2010-05-11 23:18:06 -0300865}
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -0300866
Arnaldo Carvalho de Melo28a6b6a2012-12-18 16:24:46 -0300867void events_stats__inc(struct events_stats *stats, u32 type)
868{
869 ++stats->nr_events[0];
870 ++stats->nr_events[type];
871}
872
Arnaldo Carvalho de Melo42b28ac2011-09-26 12:33:28 -0300873void hists__inc_nr_events(struct hists *hists, u32 type)
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -0300874{
Arnaldo Carvalho de Melo28a6b6a2012-12-18 16:24:46 -0300875 events_stats__inc(&hists->stats, type);
Arnaldo Carvalho de Meloc8446b92010-05-14 10:36:42 -0300876}
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300877
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300878static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
879 struct hist_entry *pair)
880{
Namhyung Kimce74f602012-12-10 17:29:55 +0900881 struct rb_root *root;
882 struct rb_node **p;
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300883 struct rb_node *parent = NULL;
884 struct hist_entry *he;
885 int cmp;
886
Namhyung Kimce74f602012-12-10 17:29:55 +0900887 if (sort__need_collapse)
888 root = &hists->entries_collapsed;
889 else
890 root = hists->entries_in;
891
892 p = &root->rb_node;
893
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300894 while (*p != NULL) {
895 parent = *p;
Namhyung Kimce74f602012-12-10 17:29:55 +0900896 he = rb_entry(parent, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300897
Namhyung Kimce74f602012-12-10 17:29:55 +0900898 cmp = hist_entry__collapse(he, pair);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300899
900 if (!cmp)
901 goto out;
902
903 if (cmp < 0)
904 p = &(*p)->rb_left;
905 else
906 p = &(*p)->rb_right;
907 }
908
909 he = hist_entry__new(pair);
910 if (he) {
Arnaldo Carvalho de Melo30193d72012-11-12 13:20:03 -0300911 memset(&he->stat, 0, sizeof(he->stat));
912 he->hists = hists;
Namhyung Kimce74f602012-12-10 17:29:55 +0900913 rb_link_node(&he->rb_node_in, parent, p);
914 rb_insert_color(&he->rb_node_in, root);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300915 hists__inc_nr_entries(hists, he);
Jiri Olsae0af43d2012-12-01 21:18:20 +0100916 he->dummy = true;
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300917 }
918out:
919 return he;
920}
921
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300922static struct hist_entry *hists__find_entry(struct hists *hists,
923 struct hist_entry *he)
924{
Namhyung Kimce74f602012-12-10 17:29:55 +0900925 struct rb_node *n;
926
927 if (sort__need_collapse)
928 n = hists->entries_collapsed.rb_node;
929 else
930 n = hists->entries_in->rb_node;
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300931
932 while (n) {
Namhyung Kimce74f602012-12-10 17:29:55 +0900933 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
934 int64_t cmp = hist_entry__collapse(iter, he);
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300935
936 if (cmp < 0)
937 n = n->rb_left;
938 else if (cmp > 0)
939 n = n->rb_right;
940 else
941 return iter;
942 }
943
944 return NULL;
945}
946
947/*
948 * Look for pairs to link to the leader buckets (hist_entries):
949 */
950void hists__match(struct hists *leader, struct hists *other)
951{
Namhyung Kimce74f602012-12-10 17:29:55 +0900952 struct rb_root *root;
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300953 struct rb_node *nd;
954 struct hist_entry *pos, *pair;
955
Namhyung Kimce74f602012-12-10 17:29:55 +0900956 if (sort__need_collapse)
957 root = &leader->entries_collapsed;
958 else
959 root = leader->entries_in;
960
961 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
962 pos = rb_entry(nd, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300963 pair = hists__find_entry(other, pos);
964
965 if (pair)
Namhyung Kim5fa90412012-11-29 15:38:34 +0900966 hist_entry__add_pair(pair, pos);
Arnaldo Carvalho de Melo95529be2012-11-08 17:54:33 -0300967 }
968}
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300969
970/*
971 * Look for entries in the other hists that are not present in the leader, if
972 * we find them, just add a dummy entry on the leader hists, with period=0,
973 * nr_events=0, to serve as the list header.
974 */
975int hists__link(struct hists *leader, struct hists *other)
976{
Namhyung Kimce74f602012-12-10 17:29:55 +0900977 struct rb_root *root;
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300978 struct rb_node *nd;
979 struct hist_entry *pos, *pair;
980
Namhyung Kimce74f602012-12-10 17:29:55 +0900981 if (sort__need_collapse)
982 root = &other->entries_collapsed;
983 else
984 root = other->entries_in;
985
986 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
987 pos = rb_entry(nd, struct hist_entry, rb_node_in);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300988
989 if (!hist_entry__has_pairs(pos)) {
990 pair = hists__add_dummy_entry(leader, pos);
991 if (pair == NULL)
992 return -1;
Namhyung Kim5fa90412012-11-29 15:38:34 +0900993 hist_entry__add_pair(pos, pair);
Arnaldo Carvalho de Melo494d70a2012-11-08 18:03:09 -0300994 }
995 }
996
997 return 0;
998}