blob: 89c8e1604ca73ab277b4cf988d08400c527f315f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Namhyung Kimd723a552013-03-15 14:58:11 +09002#include "evlist.h"
3#include "evsel.h"
4#include "thread_map.h"
5#include "cpumap.h"
6#include "tests.h"
7
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03008#include <errno.h>
Namhyung Kimd723a552013-03-15 14:58:11 +09009#include <signal.h>
10
11static int exited;
12static int nr_exit;
13
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030014static void sig_handler(int sig __maybe_unused)
Namhyung Kimd723a552013-03-15 14:58:11 +090015{
16 exited = 1;
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030017}
Namhyung Kimd723a552013-03-15 14:58:11 +090018
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030019/*
20 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
21 * we asked by setting its exec_error to this handler.
22 */
23static void workload_exec_failed_signal(int signo __maybe_unused,
24 siginfo_t *info __maybe_unused,
25 void *ucontext __maybe_unused)
26{
27 exited = 1;
28 nr_exit = -1;
Namhyung Kimd723a552013-03-15 14:58:11 +090029}
30
31/*
32 * This test will start a workload that does nothing then it checks
33 * if the number of exit event reported by the kernel is 1 or not
34 * in order to check the kernel returns correct number of event.
35 */
Arnaldo Carvalho de Melo81f17c92017-08-03 15:16:31 -030036int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused)
Namhyung Kimd723a552013-03-15 14:58:11 +090037{
38 int err = -1;
39 union perf_event *event;
40 struct perf_evsel *evsel;
41 struct perf_evlist *evlist;
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -030042 struct target target = {
Namhyung Kimd723a552013-03-15 14:58:11 +090043 .uid = UINT_MAX,
44 .uses_mmap = true,
45 };
46 const char *argv[] = { "true", NULL };
Masami Hiramatsuba3dfff2014-08-14 02:22:45 +000047 char sbuf[STRERR_BUFSIZE];
Adrian Hunter29982722015-09-08 10:59:01 +030048 struct cpu_map *cpus;
49 struct thread_map *threads;
Namhyung Kimd723a552013-03-15 14:58:11 +090050
51 signal(SIGCHLD, sig_handler);
Namhyung Kimd723a552013-03-15 14:58:11 +090052
Jiri Olsab22d54b2013-09-01 12:36:14 +020053 evlist = perf_evlist__new_default();
Namhyung Kimd723a552013-03-15 14:58:11 +090054 if (evlist == NULL) {
Jiri Olsab22d54b2013-09-01 12:36:14 +020055 pr_debug("perf_evlist__new_default\n");
Namhyung Kimd723a552013-03-15 14:58:11 +090056 return -1;
57 }
Namhyung Kimd723a552013-03-15 14:58:11 +090058
59 /*
60 * Create maps of threads and cpus to monitor. In this case
61 * we start with all threads and cpus (-1, -1) but then in
62 * perf_evlist__prepare_workload we'll fill in the only thread
63 * we're monitoring, the one forked there.
64 */
Adrian Hunter29982722015-09-08 10:59:01 +030065 cpus = cpu_map__dummy_new();
66 threads = thread_map__new_by_tid(-1);
67 if (!cpus || !threads) {
Namhyung Kimd723a552013-03-15 14:58:11 +090068 err = -ENOMEM;
69 pr_debug("Not enough memory to create thread/cpu maps\n");
Adrian Hunter29982722015-09-08 10:59:01 +030070 goto out_free_maps;
Namhyung Kimd723a552013-03-15 14:58:11 +090071 }
72
Adrian Hunter29982722015-09-08 10:59:01 +030073 perf_evlist__set_maps(evlist, cpus, threads);
74
75 cpus = NULL;
76 threads = NULL;
77
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -030078 err = perf_evlist__prepare_workload(evlist, &target, argv, false,
79 workload_exec_failed_signal);
Namhyung Kimd723a552013-03-15 14:58:11 +090080 if (err < 0) {
81 pr_debug("Couldn't run the workload!\n");
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -030082 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +090083 }
84
85 evsel = perf_evlist__first(evlist);
86 evsel->attr.task = 1;
Thomas Richter99654842017-11-23 12:46:11 +010087#ifdef __s390x__
88 evsel->attr.sample_freq = 1000000;
89#else
Arnaldo Carvalho de Melo7a1ac112017-06-09 16:54:28 -030090 evsel->attr.sample_freq = 1;
Thomas Richter99654842017-11-23 12:46:11 +010091#endif
Namhyung Kimd723a552013-03-15 14:58:11 +090092 evsel->attr.inherit = 0;
93 evsel->attr.watermark = 0;
94 evsel->attr.wakeup_events = 1;
95 evsel->attr.exclude_kernel = 1;
96
97 err = perf_evlist__open(evlist);
98 if (err < 0) {
Masami Hiramatsuba3dfff2014-08-14 02:22:45 +000099 pr_debug("Couldn't open the evlist: %s\n",
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300100 str_error_r(-err, sbuf, sizeof(sbuf)));
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300101 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +0900102 }
103
104 if (perf_evlist__mmap(evlist, 128, true) < 0) {
105 pr_debug("failed to mmap events: %d (%s)\n", errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300106 str_error_r(errno, sbuf, sizeof(sbuf)));
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300107 goto out_delete_evlist;
Namhyung Kimd723a552013-03-15 14:58:11 +0900108 }
109
110 perf_evlist__start_workload(evlist);
111
112retry:
113 while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800114 if (event->header.type == PERF_RECORD_EXIT)
115 nr_exit++;
Namhyung Kimd723a552013-03-15 14:58:11 +0900116
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800117 perf_evlist__mmap_consume(evlist, 0);
Namhyung Kimd723a552013-03-15 14:58:11 +0900118 }
119
120 if (!exited || !nr_exit) {
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300121 perf_evlist__poll(evlist, -1);
Namhyung Kimd723a552013-03-15 14:58:11 +0900122 goto retry;
123 }
124
125 if (nr_exit != 1) {
126 pr_debug("received %d EXIT records\n", nr_exit);
127 err = -1;
128 }
129
Adrian Hunter29982722015-09-08 10:59:01 +0300130out_free_maps:
131 cpu_map__put(cpus);
132 thread_map__put(threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300133out_delete_evlist:
Namhyung Kimd723a552013-03-15 14:58:11 +0900134 perf_evlist__delete(evlist);
135 return err;
136}