blob: 01a5ba2788c604c8c6b6c6e9f0a0364444f90da0 [file] [log] [blame]
Namhyung Kimd723a552013-03-15 14:58:11 +09001#include "evlist.h"
2#include "evsel.h"
3#include "thread_map.h"
4#include "cpumap.h"
5#include "tests.h"
6
7#include <signal.h>
8
9static int exited;
10static int nr_exit;
11
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030012static void sig_handler(int sig __maybe_unused)
Namhyung Kimd723a552013-03-15 14:58:11 +090013{
14 exited = 1;
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030015}
Namhyung Kimd723a552013-03-15 14:58:11 +090016
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030017/*
18 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
19 * we asked by setting its exec_error to this handler.
20 */
21static void workload_exec_failed_signal(int signo __maybe_unused,
22 siginfo_t *info __maybe_unused,
23 void *ucontext __maybe_unused)
24{
25 exited = 1;
26 nr_exit = -1;
Namhyung Kimd723a552013-03-15 14:58:11 +090027}
28
29/*
30 * This test will start a workload that does nothing then it checks
31 * if the number of exit event reported by the kernel is 1 or not
32 * in order to check the kernel returns correct number of event.
33 */
Arnaldo Carvalho de Melo721a1f52015-11-19 12:01:48 -030034int test__task_exit(int subtest __maybe_unused)
Namhyung Kimd723a552013-03-15 14:58:11 +090035{
36 int err = -1;
37 union perf_event *event;
38 struct perf_evsel *evsel;
39 struct perf_evlist *evlist;
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -030040 struct target target = {
Namhyung Kimd723a552013-03-15 14:58:11 +090041 .uid = UINT_MAX,
42 .uses_mmap = true,
43 };
44 const char *argv[] = { "true", NULL };
Masami Hiramatsuba3dfff2014-08-14 02:22:45 +000045 char sbuf[STRERR_BUFSIZE];
Adrian Hunter29982722015-09-08 10:59:01 +030046 struct cpu_map *cpus;
47 struct thread_map *threads;
Namhyung Kimd723a552013-03-15 14:58:11 +090048
49 signal(SIGCHLD, sig_handler);
Namhyung Kimd723a552013-03-15 14:58:11 +090050
Jiri Olsab22d54b2013-09-01 12:36:14 +020051 evlist = perf_evlist__new_default();
Namhyung Kimd723a552013-03-15 14:58:11 +090052 if (evlist == NULL) {
Jiri Olsab22d54b2013-09-01 12:36:14 +020053 pr_debug("perf_evlist__new_default\n");
Namhyung Kimd723a552013-03-15 14:58:11 +090054 return -1;
55 }
Namhyung Kimd723a552013-03-15 14:58:11 +090056
57 /*
58 * Create maps of threads and cpus to monitor. In this case
59 * we start with all threads and cpus (-1, -1) but then in
60 * perf_evlist__prepare_workload we'll fill in the only thread
61 * we're monitoring, the one forked there.
62 */
Adrian Hunter29982722015-09-08 10:59:01 +030063 cpus = cpu_map__dummy_new();
64 threads = thread_map__new_by_tid(-1);
65 if (!cpus || !threads) {
Namhyung Kimd723a552013-03-15 14:58:11 +090066 err = -ENOMEM;
67 pr_debug("Not enough memory to create thread/cpu maps\n");
Adrian Hunter29982722015-09-08 10:59:01 +030068 goto out_free_maps;
Namhyung Kimd723a552013-03-15 14:58:11 +090069 }
70
Adrian Hunter29982722015-09-08 10:59:01 +030071 perf_evlist__set_maps(evlist, cpus, threads);
72
73 cpus = NULL;
74 threads = NULL;
75
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030076 err = perf_evlist__prepare_workload(evlist, &target, argv, false,
77 workload_exec_failed_signal);
Namhyung Kimd723a552013-03-15 14:58:11 +090078 if (err < 0) {
79 pr_debug("Couldn't run the workload!\n");
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -030080 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +090081 }
82
83 evsel = perf_evlist__first(evlist);
84 evsel->attr.task = 1;
85 evsel->attr.sample_freq = 0;
86 evsel->attr.inherit = 0;
87 evsel->attr.watermark = 0;
88 evsel->attr.wakeup_events = 1;
89 evsel->attr.exclude_kernel = 1;
90
91 err = perf_evlist__open(evlist);
92 if (err < 0) {
Masami Hiramatsuba3dfff2014-08-14 02:22:45 +000093 pr_debug("Couldn't open the evlist: %s\n",
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -030094 str_error_r(-err, sbuf, sizeof(sbuf)));
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -030095 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +090096 }
97
98 if (perf_evlist__mmap(evlist, 128, true) < 0) {
99 pr_debug("failed to mmap events: %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300100 str_error_r(errno, sbuf, sizeof(sbuf)));
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300101 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +0900102 }
103
104 perf_evlist__start_workload(evlist);
105
106retry:
107 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800108 if (event->header.type == PERF_RECORD_EXIT)
109 nr_exit++;
Namhyung Kimd723a552013-03-15 14:58:11 +0900110
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800111 perf_evlist__mmap_consume(evlist, 0);
Namhyung Kimd723a552013-03-15 14:58:11 +0900112 }
113
114 if (!exited || !nr_exit) {
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300115 perf_evlist__poll(evlist, -1);
Namhyung Kimd723a552013-03-15 14:58:11 +0900116 goto retry;
117 }
118
119 if (nr_exit != 1) {
120 pr_debug("received %d EXIT records\n", nr_exit);
121 err = -1;
122 }
123
Adrian Hunter29982722015-09-08 10:59:01 +0300124out_free_maps:
125 cpu_map__put(cpus);
126 thread_map__put(threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300127out_delete_evlist:
Namhyung Kimd723a552013-03-15 14:58:11 +0900128 perf_evlist__delete(evlist);
129 return err;
130}