blob: b009bbf440d968ffea5eef026e6ce2645a628b20 [file] [log] [blame]
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +09001#include "perf.h"
2#include "tests.h"
3#include "debug.h"
4#include "symbol.h"
5#include "sort.h"
6#include "evsel.h"
7#include "evlist.h"
8#include "machine.h"
9#include "thread.h"
10#include "parse-events.h"
Namhyung Kim6e344a92014-04-25 12:28:13 +090011#include "hists_common.h"
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090012
13struct sample {
14 u32 pid;
15 u64 ip;
16 struct thread *thread;
17 struct map *map;
18 struct symbol *sym;
19};
20
Namhyung Kim6e344a92014-04-25 12:28:13 +090021/* For the numbers, see hists_common.c */
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090022static struct sample fake_common_samples[] = {
23 /* perf [kernel] schedule() */
24 { .pid = 100, .ip = 0xf0000 + 700, },
25 /* perf [perf] main() */
26 { .pid = 200, .ip = 0x40000 + 700, },
27 /* perf [perf] cmd_record() */
28 { .pid = 200, .ip = 0x40000 + 900, },
29 /* bash [bash] xmalloc() */
30 { .pid = 300, .ip = 0x40000 + 800, },
31 /* bash [libc] malloc() */
32 { .pid = 300, .ip = 0x50000 + 700, },
33};
34
35static struct sample fake_samples[][5] = {
36 {
37 /* perf [perf] run_command() */
38 { .pid = 100, .ip = 0x40000 + 800, },
39 /* perf [libc] malloc() */
40 { .pid = 100, .ip = 0x50000 + 700, },
41 /* perf [kernel] page_fault() */
42 { .pid = 100, .ip = 0xf0000 + 800, },
43 /* perf [kernel] sys_perf_event_open() */
44 { .pid = 200, .ip = 0xf0000 + 900, },
45 /* bash [libc] free() */
46 { .pid = 300, .ip = 0x50000 + 800, },
47 },
48 {
49 /* perf [libc] free() */
50 { .pid = 200, .ip = 0x50000 + 800, },
51 /* bash [libc] malloc() */
52 { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */
53 /* bash [bash] xfee() */
54 { .pid = 300, .ip = 0x40000 + 900, },
55 /* bash [libc] realloc() */
56 { .pid = 300, .ip = 0x50000 + 900, },
57 /* bash [kernel] page_fault() */
58 { .pid = 300, .ip = 0xf0000 + 800, },
59 },
60};
61
62static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
63{
64 struct perf_evsel *evsel;
65 struct addr_location al;
66 struct hist_entry *he;
67 struct perf_sample sample = { .cpu = 0, };
68 size_t i = 0, k;
69
70 /*
71 * each evsel will have 10 samples - 5 common and 5 distinct.
72 * However the second evsel also has a collapsed entry for
73 * "bash [libc] malloc" so total 9 entries will be in the tree.
74 */
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -030075 evlist__for_each(evlist, evsel) {
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090076 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
77 const union perf_event event = {
Adrian Hunteref893252013-08-27 11:23:06 +030078 .header = {
79 .misc = PERF_RECORD_MISC_USER,
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090080 },
81 };
82
Adrian Hunteref893252013-08-27 11:23:06 +030083 sample.pid = fake_common_samples[k].pid;
Namhyung Kim13ce34d2014-05-12 09:56:42 +090084 sample.tid = fake_common_samples[k].pid;
Adrian Hunteref893252013-08-27 11:23:06 +030085 sample.ip = fake_common_samples[k].ip;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090086 if (perf_event__preprocess_sample(&event, machine, &al,
Adrian Huntere44baa32013-08-08 14:32:25 +030087 &sample) < 0)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090088 goto out;
89
Andi Kleen475eeab2013-09-20 07:40:43 -070090 he = __hists__add_entry(&evsel->hists, &al, NULL,
Namhyung Kim41a4e6e2013-10-31 15:56:03 +090091 NULL, NULL, 1, 1, 0);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090092 if (he == NULL)
93 goto out;
94
95 fake_common_samples[k].thread = al.thread;
96 fake_common_samples[k].map = al.map;
97 fake_common_samples[k].sym = al.sym;
98 }
99
100 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
101 const union perf_event event = {
Adrian Hunteref893252013-08-27 11:23:06 +0300102 .header = {
103 .misc = PERF_RECORD_MISC_USER,
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900104 },
105 };
106
Adrian Hunteref893252013-08-27 11:23:06 +0300107 sample.pid = fake_samples[i][k].pid;
Namhyung Kim13ce34d2014-05-12 09:56:42 +0900108 sample.tid = fake_samples[i][k].pid;
Adrian Hunteref893252013-08-27 11:23:06 +0300109 sample.ip = fake_samples[i][k].ip;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900110 if (perf_event__preprocess_sample(&event, machine, &al,
Adrian Huntere44baa32013-08-08 14:32:25 +0300111 &sample) < 0)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900112 goto out;
113
Namhyung Kim41a4e6e2013-10-31 15:56:03 +0900114 he = __hists__add_entry(&evsel->hists, &al, NULL,
115 NULL, NULL, 1, 1, 0);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900116 if (he == NULL)
117 goto out;
118
119 fake_samples[i][k].thread = al.thread;
120 fake_samples[i][k].map = al.map;
121 fake_samples[i][k].sym = al.sym;
122 }
123 i++;
124 }
125
126 return 0;
127
128out:
129 pr_debug("Not enough memory for adding a hist entry\n");
130 return -1;
131}
132
133static int find_sample(struct sample *samples, size_t nr_samples,
134 struct thread *t, struct map *m, struct symbol *s)
135{
136 while (nr_samples--) {
137 if (samples->thread == t && samples->map == m &&
138 samples->sym == s)
139 return 1;
140 samples++;
141 }
142 return 0;
143}
144
145static int __validate_match(struct hists *hists)
146{
147 size_t count = 0;
148 struct rb_root *root;
149 struct rb_node *node;
150
151 /*
152 * Only entries from fake_common_samples should have a pair.
153 */
154 if (sort__need_collapse)
155 root = &hists->entries_collapsed;
156 else
157 root = hists->entries_in;
158
159 node = rb_first(root);
160 while (node) {
161 struct hist_entry *he;
162
163 he = rb_entry(node, struct hist_entry, rb_node_in);
164
165 if (hist_entry__has_pairs(he)) {
166 if (find_sample(fake_common_samples,
167 ARRAY_SIZE(fake_common_samples),
168 he->thread, he->ms.map, he->ms.sym)) {
169 count++;
170 } else {
171 pr_debug("Can't find the matched entry\n");
172 return -1;
173 }
174 }
175
176 node = rb_next(node);
177 }
178
179 if (count != ARRAY_SIZE(fake_common_samples)) {
180 pr_debug("Invalid count for matched entries: %zd of %zd\n",
181 count, ARRAY_SIZE(fake_common_samples));
182 return -1;
183 }
184
185 return 0;
186}
187
188static int validate_match(struct hists *leader, struct hists *other)
189{
190 return __validate_match(leader) || __validate_match(other);
191}
192
193static int __validate_link(struct hists *hists, int idx)
194{
195 size_t count = 0;
196 size_t count_pair = 0;
197 size_t count_dummy = 0;
198 struct rb_root *root;
199 struct rb_node *node;
200
201 /*
202 * Leader hists (idx = 0) will have dummy entries from other,
203 * and some entries will have no pair. However every entry
204 * in other hists should have (dummy) pair.
205 */
206 if (sort__need_collapse)
207 root = &hists->entries_collapsed;
208 else
209 root = hists->entries_in;
210
211 node = rb_first(root);
212 while (node) {
213 struct hist_entry *he;
214
215 he = rb_entry(node, struct hist_entry, rb_node_in);
216
217 if (hist_entry__has_pairs(he)) {
218 if (!find_sample(fake_common_samples,
219 ARRAY_SIZE(fake_common_samples),
220 he->thread, he->ms.map, he->ms.sym) &&
221 !find_sample(fake_samples[idx],
222 ARRAY_SIZE(fake_samples[idx]),
223 he->thread, he->ms.map, he->ms.sym)) {
224 count_dummy++;
225 }
226 count_pair++;
227 } else if (idx) {
228 pr_debug("A entry from the other hists should have pair\n");
229 return -1;
230 }
231
232 count++;
233 node = rb_next(node);
234 }
235
236 /*
237 * Note that we have a entry collapsed in the other (idx = 1) hists.
238 */
239 if (idx == 0) {
240 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
241 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
242 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
243 return -1;
244 }
245 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
246 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
247 count, count_pair + ARRAY_SIZE(fake_samples[0]));
248 return -1;
249 }
250 } else {
251 if (count != count_pair) {
252 pr_debug("Invalid count of total other entries: %zd of %zd\n",
253 count, count_pair);
254 return -1;
255 }
256 if (count_dummy > 0) {
257 pr_debug("Other hists should not have dummy entries: %zd\n",
258 count_dummy);
259 return -1;
260 }
261 }
262
263 return 0;
264}
265
266static int validate_link(struct hists *leader, struct hists *other)
267{
268 return __validate_link(leader, 0) || __validate_link(other, 1);
269}
270
271static void print_hists(struct hists *hists)
272{
273 int i = 0;
274 struct rb_root *root;
275 struct rb_node *node;
276
277 if (sort__need_collapse)
278 root = &hists->entries_collapsed;
279 else
280 root = hists->entries_in;
281
282 pr_info("----- %s --------\n", __func__);
283 node = rb_first(root);
284 while (node) {
285 struct hist_entry *he;
286
287 he = rb_entry(node, struct hist_entry, rb_node_in);
288
289 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +0200290 i, thread__comm_str(he->thread), he->ms.map->dso->short_name,
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900291 he->ms.sym->name, he->stat.period);
292
293 i++;
294 node = rb_next(node);
295 }
296}
297
298int test__hists_link(void)
299{
300 int err = -1;
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300301 struct machines machines;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900302 struct machine *machine = NULL;
303 struct perf_evsel *evsel, *first;
Namhyung Kim334fe7a2013-03-11 16:43:12 +0900304 struct perf_evlist *evlist = perf_evlist__new();
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900305
306 if (evlist == NULL)
307 return -ENOMEM;
308
Jiri Olsad8f7bbc2013-01-15 14:39:51 +0100309 err = parse_events(evlist, "cpu-clock");
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900310 if (err)
311 goto out;
Jiri Olsad8f7bbc2013-01-15 14:39:51 +0100312 err = parse_events(evlist, "task-clock");
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900313 if (err)
314 goto out;
315
316 /* default sort order (comm,dso,sym) will be used */
Namhyung Kim55309982013-02-06 14:57:16 +0900317 if (setup_sorting() < 0)
318 goto out;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900319
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300320 machines__init(&machines);
321
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900322 /* setup threads/dso/map/symbols also */
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300323 machine = setup_fake_machine(&machines);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900324 if (!machine)
325 goto out;
326
327 if (verbose > 1)
328 machine__fprintf(machine, stderr);
329
330 /* process sample events */
331 err = add_hist_entries(evlist, machine);
332 if (err < 0)
333 goto out;
334
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -0300335 evlist__for_each(evlist, evsel) {
Namhyung Kimc1fb5652013-10-11 14:15:38 +0900336 hists__collapse_resort(&evsel->hists, NULL);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900337
338 if (verbose > 2)
339 print_hists(&evsel->hists);
340 }
341
342 first = perf_evlist__first(evlist);
343 evsel = perf_evlist__last(evlist);
344
345 /* match common entries */
346 hists__match(&first->hists, &evsel->hists);
347 err = validate_match(&first->hists, &evsel->hists);
348 if (err)
349 goto out;
350
351 /* link common and/or dummy entries */
352 hists__link(&first->hists, &evsel->hists);
353 err = validate_link(&first->hists, &evsel->hists);
354 if (err)
355 goto out;
356
357 err = 0;
358
359out:
360 /* tear down everything */
361 perf_evlist__delete(evlist);
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300362 machines__exit(&machines);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900363
364 return err;
365}