blob: 7885b1d324e49750cbd7c632b4e11dc7240bdede [file] [log] [blame]
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +09001#include "perf.h"
2#include "tests.h"
3#include "debug.h"
4#include "symbol.h"
5#include "sort.h"
6#include "evsel.h"
7#include "evlist.h"
8#include "machine.h"
9#include "thread.h"
10#include "parse-events.h"
Namhyung Kim6e344a92014-04-25 12:28:13 +090011#include "hists_common.h"
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090012
13struct sample {
14 u32 pid;
15 u64 ip;
16 struct thread *thread;
17 struct map *map;
18 struct symbol *sym;
19};
20
Namhyung Kim6e344a92014-04-25 12:28:13 +090021/* For the numbers, see hists_common.c */
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090022static struct sample fake_common_samples[] = {
23 /* perf [kernel] schedule() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090024 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090025 /* perf [perf] main() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090026 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090027 /* perf [perf] cmd_record() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090028 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090029 /* bash [bash] xmalloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090030 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090031 /* bash [libc] malloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090032 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090033};
34
35static struct sample fake_samples[][5] = {
36 {
37 /* perf [perf] run_command() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090038 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090039 /* perf [libc] malloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090040 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090041 /* perf [kernel] page_fault() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090042 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090043 /* perf [kernel] sys_perf_event_open() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090044 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090045 /* bash [libc] free() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090046 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090047 },
48 {
49 /* perf [libc] free() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090050 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090051 /* bash [libc] malloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090052 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090053 /* bash [bash] xfee() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090054 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090055 /* bash [libc] realloc() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090056 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090057 /* bash [kernel] page_fault() */
Namhyung Kima1891aa2014-05-23 14:59:57 +090058 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090059 },
60};
61
62static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
63{
64 struct perf_evsel *evsel;
65 struct addr_location al;
66 struct hist_entry *he;
Namhyung Kimfd36f3d2015-12-23 02:06:58 +090067 struct perf_sample sample = { .period = 1, .weight = 1, };
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090068 size_t i = 0, k;
69
70 /*
71 * each evsel will have 10 samples - 5 common and 5 distinct.
72 * However the second evsel also has a collapsed entry for
73 * "bash [libc] malloc" so total 9 entries will be in the tree.
74 */
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -030075 evlist__for_each(evlist, evsel) {
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -030076 struct hists *hists = evsel__hists(evsel);
77
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090078 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
79 const union perf_event event = {
Adrian Hunteref893252013-08-27 11:23:06 +030080 .header = {
81 .misc = PERF_RECORD_MISC_USER,
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090082 },
83 };
84
Arnaldo Carvalho de Melo473398a2016-03-22 18:23:43 -030085 sample.cpumode = PERF_RECORD_MISC_USER;
Adrian Hunteref893252013-08-27 11:23:06 +030086 sample.pid = fake_common_samples[k].pid;
Namhyung Kim13ce34d2014-05-12 09:56:42 +090087 sample.tid = fake_common_samples[k].pid;
Adrian Hunteref893252013-08-27 11:23:06 +030088 sample.ip = fake_common_samples[k].ip;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090089 if (perf_event__preprocess_sample(&event, machine, &al,
Adrian Huntere44baa32013-08-08 14:32:25 +030090 &sample) < 0)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090091 goto out;
92
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -030093 he = __hists__add_entry(hists, &al, NULL,
Namhyung Kimfd36f3d2015-12-23 02:06:58 +090094 NULL, NULL, &sample, true);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -030095 if (he == NULL) {
96 addr_location__put(&al);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090097 goto out;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -030098 }
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +090099
100 fake_common_samples[k].thread = al.thread;
101 fake_common_samples[k].map = al.map;
102 fake_common_samples[k].sym = al.sym;
103 }
104
105 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
106 const union perf_event event = {
Adrian Hunteref893252013-08-27 11:23:06 +0300107 .header = {
108 .misc = PERF_RECORD_MISC_USER,
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900109 },
110 };
111
Adrian Hunteref893252013-08-27 11:23:06 +0300112 sample.pid = fake_samples[i][k].pid;
Namhyung Kim13ce34d2014-05-12 09:56:42 +0900113 sample.tid = fake_samples[i][k].pid;
Adrian Hunteref893252013-08-27 11:23:06 +0300114 sample.ip = fake_samples[i][k].ip;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900115 if (perf_event__preprocess_sample(&event, machine, &al,
Adrian Huntere44baa32013-08-08 14:32:25 +0300116 &sample) < 0)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900117 goto out;
118
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300119 he = __hists__add_entry(hists, &al, NULL,
Namhyung Kimfd36f3d2015-12-23 02:06:58 +0900120 NULL, NULL, &sample, true);
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -0300121 if (he == NULL) {
122 addr_location__put(&al);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900123 goto out;
Arnaldo Carvalho de Melob91fc392015-04-06 20:43:22 -0300124 }
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900125
126 fake_samples[i][k].thread = al.thread;
127 fake_samples[i][k].map = al.map;
128 fake_samples[i][k].sym = al.sym;
129 }
130 i++;
131 }
132
133 return 0;
134
135out:
136 pr_debug("Not enough memory for adding a hist entry\n");
137 return -1;
138}
139
140static int find_sample(struct sample *samples, size_t nr_samples,
141 struct thread *t, struct map *m, struct symbol *s)
142{
143 while (nr_samples--) {
144 if (samples->thread == t && samples->map == m &&
145 samples->sym == s)
146 return 1;
147 samples++;
148 }
149 return 0;
150}
151
152static int __validate_match(struct hists *hists)
153{
154 size_t count = 0;
155 struct rb_root *root;
156 struct rb_node *node;
157
158 /*
159 * Only entries from fake_common_samples should have a pair.
160 */
161 if (sort__need_collapse)
162 root = &hists->entries_collapsed;
163 else
164 root = hists->entries_in;
165
166 node = rb_first(root);
167 while (node) {
168 struct hist_entry *he;
169
170 he = rb_entry(node, struct hist_entry, rb_node_in);
171
172 if (hist_entry__has_pairs(he)) {
173 if (find_sample(fake_common_samples,
174 ARRAY_SIZE(fake_common_samples),
175 he->thread, he->ms.map, he->ms.sym)) {
176 count++;
177 } else {
178 pr_debug("Can't find the matched entry\n");
179 return -1;
180 }
181 }
182
183 node = rb_next(node);
184 }
185
186 if (count != ARRAY_SIZE(fake_common_samples)) {
187 pr_debug("Invalid count for matched entries: %zd of %zd\n",
188 count, ARRAY_SIZE(fake_common_samples));
189 return -1;
190 }
191
192 return 0;
193}
194
195static int validate_match(struct hists *leader, struct hists *other)
196{
197 return __validate_match(leader) || __validate_match(other);
198}
199
200static int __validate_link(struct hists *hists, int idx)
201{
202 size_t count = 0;
203 size_t count_pair = 0;
204 size_t count_dummy = 0;
205 struct rb_root *root;
206 struct rb_node *node;
207
208 /*
209 * Leader hists (idx = 0) will have dummy entries from other,
210 * and some entries will have no pair. However every entry
211 * in other hists should have (dummy) pair.
212 */
213 if (sort__need_collapse)
214 root = &hists->entries_collapsed;
215 else
216 root = hists->entries_in;
217
218 node = rb_first(root);
219 while (node) {
220 struct hist_entry *he;
221
222 he = rb_entry(node, struct hist_entry, rb_node_in);
223
224 if (hist_entry__has_pairs(he)) {
225 if (!find_sample(fake_common_samples,
226 ARRAY_SIZE(fake_common_samples),
227 he->thread, he->ms.map, he->ms.sym) &&
228 !find_sample(fake_samples[idx],
229 ARRAY_SIZE(fake_samples[idx]),
230 he->thread, he->ms.map, he->ms.sym)) {
231 count_dummy++;
232 }
233 count_pair++;
234 } else if (idx) {
235 pr_debug("A entry from the other hists should have pair\n");
236 return -1;
237 }
238
239 count++;
240 node = rb_next(node);
241 }
242
243 /*
244 * Note that we have a entry collapsed in the other (idx = 1) hists.
245 */
246 if (idx == 0) {
247 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
248 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
249 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
250 return -1;
251 }
252 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
253 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
254 count, count_pair + ARRAY_SIZE(fake_samples[0]));
255 return -1;
256 }
257 } else {
258 if (count != count_pair) {
259 pr_debug("Invalid count of total other entries: %zd of %zd\n",
260 count, count_pair);
261 return -1;
262 }
263 if (count_dummy > 0) {
264 pr_debug("Other hists should not have dummy entries: %zd\n",
265 count_dummy);
266 return -1;
267 }
268 }
269
270 return 0;
271}
272
273static int validate_link(struct hists *leader, struct hists *other)
274{
275 return __validate_link(leader, 0) || __validate_link(other, 1);
276}
277
Arnaldo Carvalho de Melo721a1f52015-11-19 12:01:48 -0300278int test__hists_link(int subtest __maybe_unused)
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900279{
280 int err = -1;
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300281 struct hists *hists, *first_hists;
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300282 struct machines machines;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900283 struct machine *machine = NULL;
284 struct perf_evsel *evsel, *first;
Namhyung Kim334fe7a2013-03-11 16:43:12 +0900285 struct perf_evlist *evlist = perf_evlist__new();
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900286
287 if (evlist == NULL)
288 return -ENOMEM;
289
Jiri Olsab39b8392015-04-22 21:10:16 +0200290 err = parse_events(evlist, "cpu-clock", NULL);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900291 if (err)
292 goto out;
Jiri Olsab39b8392015-04-22 21:10:16 +0200293 err = parse_events(evlist, "task-clock", NULL);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900294 if (err)
295 goto out;
296
Wang Nanb0500c12016-01-11 13:48:03 +0000297 err = TEST_FAIL;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900298 /* default sort order (comm,dso,sym) will be used */
Namhyung Kim40184c42015-12-23 02:07:01 +0900299 if (setup_sorting(NULL) < 0)
Namhyung Kim55309982013-02-06 14:57:16 +0900300 goto out;
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900301
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300302 machines__init(&machines);
303
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900304 /* setup threads/dso/map/symbols also */
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300305 machine = setup_fake_machine(&machines);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900306 if (!machine)
307 goto out;
308
309 if (verbose > 1)
310 machine__fprintf(machine, stderr);
311
312 /* process sample events */
313 err = add_hist_entries(evlist, machine);
314 if (err < 0)
315 goto out;
316
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -0300317 evlist__for_each(evlist, evsel) {
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300318 hists = evsel__hists(evsel);
319 hists__collapse_resort(hists, NULL);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900320
321 if (verbose > 2)
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300322 print_hists_in(hists);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900323 }
324
325 first = perf_evlist__first(evlist);
326 evsel = perf_evlist__last(evlist);
327
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300328 first_hists = evsel__hists(first);
329 hists = evsel__hists(evsel);
330
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900331 /* match common entries */
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300332 hists__match(first_hists, hists);
333 err = validate_match(first_hists, hists);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900334 if (err)
335 goto out;
336
337 /* link common and/or dummy entries */
Arnaldo Carvalho de Melo4ea062ed2014-10-09 13:13:41 -0300338 hists__link(first_hists, hists);
339 err = validate_link(first_hists, hists);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900340 if (err)
341 goto out;
342
343 err = 0;
344
345out:
346 /* tear down everything */
347 perf_evlist__delete(evlist);
Namhyung Kimf21d1812014-05-12 14:43:18 +0900348 reset_output_field();
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300349 machines__exit(&machines);
Namhyung Kimf8ebb0c2012-12-10 17:29:57 +0900350
351 return err;
352}