blob: e42d6790811afc93640989fd94af8ec094834bf4 [file] [log] [blame]
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +09001#include "perf.h"
2#include "tests.h"
3#include "debug.h"
4#include "symbol.h"
5#include "sort.h"
6#include "evsel.h"
7#include "evlist.h"
8#include "machine.h"
9#include "thread.h"
10#include "parse-events.h"
Namhyung Kim6e344a92014-04-25 12:28:13 +090011#include "hists_common.h"
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090012
13struct sample {
14 u32 pid;
15 u64 ip;
16 struct thread *thread;
17 struct map *map;
18 struct symbol *sym;
19};
20
Namhyung Kim6e344a92014-04-25 12:28:13 +090021/* For the numbers, see hists_common.c */
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090022static struct sample fake_common_samples[] = {
23 /* perf [kernel] schedule() */
24 { .pid = 100, .ip = 0xf0000 + 700, },
25 /* perf [perf] main() */
26 { .pid = 200, .ip = 0x40000 + 700, },
27 /* perf [perf] cmd_record() */
28 { .pid = 200, .ip = 0x40000 + 900, },
29 /* bash [bash] xmalloc() */
30 { .pid = 300, .ip = 0x40000 + 800, },
31 /* bash [libc] malloc() */
32 { .pid = 300, .ip = 0x50000 + 700, },
33};
34
35static struct sample fake_samples[][5] = {
36 {
37 /* perf [perf] run_command() */
38 { .pid = 100, .ip = 0x40000 + 800, },
39 /* perf [libc] malloc() */
40 { .pid = 100, .ip = 0x50000 + 700, },
41 /* perf [kernel] page_fault() */
42 { .pid = 100, .ip = 0xf0000 + 800, },
43 /* perf [kernel] sys_perf_event_open() */
44 { .pid = 200, .ip = 0xf0000 + 900, },
45 /* bash [libc] free() */
46 { .pid = 300, .ip = 0x50000 + 800, },
47 },
48 {
49 /* perf [libc] free() */
50 { .pid = 200, .ip = 0x50000 + 800, },
51 /* bash [libc] malloc() */
52 { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */
53 /* bash [bash] xfee() */
54 { .pid = 300, .ip = 0x40000 + 900, },
55 /* bash [libc] realloc() */
56 { .pid = 300, .ip = 0x50000 + 900, },
57 /* bash [kernel] page_fault() */
58 { .pid = 300, .ip = 0xf0000 + 800, },
59 },
60};
61
62static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
63{
64 struct perf_evsel *evsel;
65 struct addr_location al;
66 struct hist_entry *he;
67 struct perf_sample sample = { .cpu = 0, };
68 size_t i = 0, k;
69
70 /*
71 * each evsel will have 10 samples - 5 common and 5 distinct.
72 * However the second evsel also has a collapsed entry for
73 * "bash [libc] malloc" so total 9 entries will be in the tree.
74 */
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -030075 evlist__for_each(evlist, evsel) {
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090076 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
77 const union perf_event event = {
Adrian Hunteref893252013-08-27 11:23:06 +030078 .header = {
79 .misc = PERF_RECORD_MISC_USER,
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090080 },
81 };
82
Adrian Hunteref893252013-08-27 11:23:06 +030083 sample.pid = fake_common_samples[k].pid;
84 sample.ip = fake_common_samples[k].ip;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090085 if (perf_event__preprocess_sample(&event, machine, &al,
Adrian Huntere44baa32013-08-08 14:32:25 +030086 &sample) < 0)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090087 goto out;
88
Andi Kleen475eeab2013-09-20 07:40:43 -070089 he = __hists__add_entry(&evsel->hists, &al, NULL,
Namhyung Kim41a4e6e2013-10-31 15:56:03 +090090 NULL, NULL, 1, 1, 0);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090091 if (he == NULL)
92 goto out;
93
94 fake_common_samples[k].thread = al.thread;
95 fake_common_samples[k].map = al.map;
96 fake_common_samples[k].sym = al.sym;
97 }
98
99 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
100 const union perf_event event = {
Adrian Hunteref893252013-08-27 11:23:06 +0300101 .header = {
102 .misc = PERF_RECORD_MISC_USER,
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900103 },
104 };
105
Adrian Hunteref893252013-08-27 11:23:06 +0300106 sample.pid = fake_samples[i][k].pid;
107 sample.ip = fake_samples[i][k].ip;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900108 if (perf_event__preprocess_sample(&event, machine, &al,
Adrian Huntere44baa32013-08-08 14:32:25 +0300109 &sample) < 0)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900110 goto out;
111
Namhyung Kim41a4e6e2013-10-31 15:56:03 +0900112 he = __hists__add_entry(&evsel->hists, &al, NULL,
113 NULL, NULL, 1, 1, 0);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900114 if (he == NULL)
115 goto out;
116
117 fake_samples[i][k].thread = al.thread;
118 fake_samples[i][k].map = al.map;
119 fake_samples[i][k].sym = al.sym;
120 }
121 i++;
122 }
123
124 return 0;
125
126out:
127 pr_debug("Not enough memory for adding a hist entry\n");
128 return -1;
129}
130
131static int find_sample(struct sample *samples, size_t nr_samples,
132 struct thread *t, struct map *m, struct symbol *s)
133{
134 while (nr_samples--) {
135 if (samples->thread == t && samples->map == m &&
136 samples->sym == s)
137 return 1;
138 samples++;
139 }
140 return 0;
141}
142
143static int __validate_match(struct hists *hists)
144{
145 size_t count = 0;
146 struct rb_root *root;
147 struct rb_node *node;
148
149 /*
150 * Only entries from fake_common_samples should have a pair.
151 */
152 if (sort__need_collapse)
153 root = &hists->entries_collapsed;
154 else
155 root = hists->entries_in;
156
157 node = rb_first(root);
158 while (node) {
159 struct hist_entry *he;
160
161 he = rb_entry(node, struct hist_entry, rb_node_in);
162
163 if (hist_entry__has_pairs(he)) {
164 if (find_sample(fake_common_samples,
165 ARRAY_SIZE(fake_common_samples),
166 he->thread, he->ms.map, he->ms.sym)) {
167 count++;
168 } else {
169 pr_debug("Can't find the matched entry\n");
170 return -1;
171 }
172 }
173
174 node = rb_next(node);
175 }
176
177 if (count != ARRAY_SIZE(fake_common_samples)) {
178 pr_debug("Invalid count for matched entries: %zd of %zd\n",
179 count, ARRAY_SIZE(fake_common_samples));
180 return -1;
181 }
182
183 return 0;
184}
185
186static int validate_match(struct hists *leader, struct hists *other)
187{
188 return __validate_match(leader) || __validate_match(other);
189}
190
191static int __validate_link(struct hists *hists, int idx)
192{
193 size_t count = 0;
194 size_t count_pair = 0;
195 size_t count_dummy = 0;
196 struct rb_root *root;
197 struct rb_node *node;
198
199 /*
200 * Leader hists (idx = 0) will have dummy entries from other,
201 * and some entries will have no pair. However every entry
202 * in other hists should have (dummy) pair.
203 */
204 if (sort__need_collapse)
205 root = &hists->entries_collapsed;
206 else
207 root = hists->entries_in;
208
209 node = rb_first(root);
210 while (node) {
211 struct hist_entry *he;
212
213 he = rb_entry(node, struct hist_entry, rb_node_in);
214
215 if (hist_entry__has_pairs(he)) {
216 if (!find_sample(fake_common_samples,
217 ARRAY_SIZE(fake_common_samples),
218 he->thread, he->ms.map, he->ms.sym) &&
219 !find_sample(fake_samples[idx],
220 ARRAY_SIZE(fake_samples[idx]),
221 he->thread, he->ms.map, he->ms.sym)) {
222 count_dummy++;
223 }
224 count_pair++;
225 } else if (idx) {
226 pr_debug("A entry from the other hists should have pair\n");
227 return -1;
228 }
229
230 count++;
231 node = rb_next(node);
232 }
233
234 /*
235 * Note that we have a entry collapsed in the other (idx = 1) hists.
236 */
237 if (idx == 0) {
238 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
239 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
240 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
241 return -1;
242 }
243 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
244 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
245 count, count_pair + ARRAY_SIZE(fake_samples[0]));
246 return -1;
247 }
248 } else {
249 if (count != count_pair) {
250 pr_debug("Invalid count of total other entries: %zd of %zd\n",
251 count, count_pair);
252 return -1;
253 }
254 if (count_dummy > 0) {
255 pr_debug("Other hists should not have dummy entries: %zd\n",
256 count_dummy);
257 return -1;
258 }
259 }
260
261 return 0;
262}
263
264static int validate_link(struct hists *leader, struct hists *other)
265{
266 return __validate_link(leader, 0) || __validate_link(other, 1);
267}
268
269static void print_hists(struct hists *hists)
270{
271 int i = 0;
272 struct rb_root *root;
273 struct rb_node *node;
274
275 if (sort__need_collapse)
276 root = &hists->entries_collapsed;
277 else
278 root = hists->entries_in;
279
280 pr_info("----- %s --------\n", __func__);
281 node = rb_first(root);
282 while (node) {
283 struct hist_entry *he;
284
285 he = rb_entry(node, struct hist_entry, rb_node_in);
286
287 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
Frederic Weisbeckerb9c51432013-09-11 14:46:56 +0200288 i, thread__comm_str(he->thread), he->ms.map->dso->short_name,
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900289 he->ms.sym->name, he->stat.period);
290
291 i++;
292 node = rb_next(node);
293 }
294}
295
296int test__hists_link(void)
297{
298 int err = -1;
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300299 struct machines machines;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900300 struct machine *machine = NULL;
301 struct perf_evsel *evsel, *first;
Namhyung Kim334fe7a2013-03-11 16:43:12 +0900302 struct perf_evlist *evlist = perf_evlist__new();
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900303
304 if (evlist == NULL)
305 return -ENOMEM;
306
Jiri Olsad8f7bbc2013-01-15 14:39:51 +0100307 err = parse_events(evlist, "cpu-clock");
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900308 if (err)
309 goto out;
Jiri Olsad8f7bbc2013-01-15 14:39:51 +0100310 err = parse_events(evlist, "task-clock");
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900311 if (err)
312 goto out;
313
314 /* default sort order (comm,dso,sym) will be used */
Namhyung Kim55309982013-02-06 14:57:16 +0900315 if (setup_sorting() < 0)
316 goto out;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900317
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300318 machines__init(&machines);
319
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900320 /* setup threads/dso/map/symbols also */
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300321 machine = setup_fake_machine(&machines);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900322 if (!machine)
323 goto out;
324
325 if (verbose > 1)
326 machine__fprintf(machine, stderr);
327
328 /* process sample events */
329 err = add_hist_entries(evlist, machine);
330 if (err < 0)
331 goto out;
332
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -0300333 evlist__for_each(evlist, evsel) {
Namhyung Kimc1fb5652013-10-11 14:15:38 +0900334 hists__collapse_resort(&evsel->hists, NULL);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900335
336 if (verbose > 2)
337 print_hists(&evsel->hists);
338 }
339
340 first = perf_evlist__first(evlist);
341 evsel = perf_evlist__last(evlist);
342
343 /* match common entries */
344 hists__match(&first->hists, &evsel->hists);
345 err = validate_match(&first->hists, &evsel->hists);
346 if (err)
347 goto out;
348
349 /* link common and/or dummy entries */
350 hists__link(&first->hists, &evsel->hists);
351 err = validate_link(&first->hists, &evsel->hists);
352 if (err)
353 goto out;
354
355 err = 0;
356
357out:
358 /* tear down everything */
359 perf_evlist__delete(evlist);
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300360 machines__exit(&machines);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900361
362 return err;
363}