blob: 415c359de4654be8f68a30effa530dd06696b52b [file] [log] [blame]
Xiao Guangrong0007ece2012-09-17 16:31:14 +08001#include <math.h>
Xiao Guangrong0007ece2012-09-17 16:31:14 +08002#include "stat.h"
Jiri Olsa24e34f62015-06-26 11:29:16 +02003#include "evlist.h"
Jiri Olsae2f56da2015-06-04 15:50:55 +02004#include "evsel.h"
Jiri Olsa24e34f62015-06-26 11:29:16 +02005#include "thread_map.h"
Xiao Guangrong0007ece2012-09-17 16:31:14 +08006
7void update_stats(struct stats *stats, u64 val)
8{
9 double delta;
10
11 stats->n++;
12 delta = val - stats->mean;
13 stats->mean += delta / stats->n;
14 stats->M2 += delta*(val - stats->mean);
David Ahernffe4f3c2013-08-02 14:05:40 -060015
16 if (val > stats->max)
17 stats->max = val;
18
19 if (val < stats->min)
20 stats->min = val;
Xiao Guangrong0007ece2012-09-17 16:31:14 +080021}
22
23double avg_stats(struct stats *stats)
24{
25 return stats->mean;
26}
27
28/*
29 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
30 *
31 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
32 * s^2 = -------------------------------
33 * n - 1
34 *
35 * http://en.wikipedia.org/wiki/Stddev
36 *
37 * The std dev of the mean is related to the std dev by:
38 *
39 * s
40 * s_mean = -------
41 * sqrt(n)
42 *
43 */
44double stddev_stats(struct stats *stats)
45{
46 double variance, variance_mean;
47
David Ahern45528f72013-05-25 18:24:48 -060048 if (stats->n < 2)
Xiao Guangrong0007ece2012-09-17 16:31:14 +080049 return 0.0;
50
51 variance = stats->M2 / (stats->n - 1);
52 variance_mean = variance / stats->n;
53
54 return sqrt(variance_mean);
55}
56
57double rel_stddev_stats(double stddev, double avg)
58{
59 double pct = 0.0;
60
61 if (avg)
62 pct = 100.0 * stddev/avg;
63
64 return pct;
65}
Jiri Olsae2f56da2015-06-04 15:50:55 +020066
67bool __perf_evsel_stat__is(struct perf_evsel *evsel,
68 enum perf_stat_evsel_id id)
69{
70 struct perf_stat *ps = evsel->priv;
71
72 return ps->id == id;
73}
74
75#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
76static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
Jiri Olsa4c358d52015-06-03 16:25:52 +020077 ID(NONE, x),
78 ID(CYCLES_IN_TX, cpu/cycles-t/),
79 ID(TRANSACTION_START, cpu/tx-start/),
80 ID(ELISION_START, cpu/el-start/),
81 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
Jiri Olsae2f56da2015-06-04 15:50:55 +020082};
83#undef ID
84
85void perf_stat_evsel_id_init(struct perf_evsel *evsel)
86{
87 struct perf_stat *ps = evsel->priv;
88 int i;
89
90 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
91
92 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
93 if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
94 ps->id = i;
95 break;
96 }
97 }
98}
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +020099
Jiri Olsa9689edf2015-06-26 11:29:14 +0200100void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
101{
102 int i;
103 struct perf_stat *ps = evsel->priv;
104
105 for (i = 0; i < 3; i++)
106 init_stats(&ps->res_stats[i]);
107
108 perf_stat_evsel_id_init(evsel);
109}
110
111int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
112{
113 evsel->priv = zalloc(sizeof(struct perf_stat));
114 if (evsel->priv == NULL)
115 return -ENOMEM;
116 perf_evsel__reset_stat_priv(evsel);
117 return 0;
118}
119
120void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
121{
122 zfree(&evsel->priv);
123}
Jiri Olsaa9395122015-06-26 11:29:15 +0200124
125int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
126 int ncpus, int nthreads)
127{
128 struct perf_counts *counts;
129
130 counts = perf_counts__new(ncpus, nthreads);
131 if (counts)
132 evsel->prev_raw_counts = counts;
133
134 return counts ? 0 : -ENOMEM;
135}
136
137void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
138{
139 perf_counts__delete(evsel->prev_raw_counts);
140 evsel->prev_raw_counts = NULL;
141}
Jiri Olsa24e34f62015-06-26 11:29:16 +0200142
Jiri Olsaa7d0a102015-06-26 11:29:17 +0200143int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
144{
145 int ncpus = perf_evsel__nr_cpus(evsel);
146 int nthreads = thread_map__nr(evsel->threads);
147
148 if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
149 perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
150 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
151 return -ENOMEM;
152
153 return 0;
154}
155
Jiri Olsa24e34f62015-06-26 11:29:16 +0200156int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
157{
158 struct perf_evsel *evsel;
Jiri Olsa24e34f62015-06-26 11:29:16 +0200159
160 evlist__for_each(evlist, evsel) {
Jiri Olsaa7d0a102015-06-26 11:29:17 +0200161 if (perf_evsel__alloc_stats(evsel, alloc_raw))
Jiri Olsa24e34f62015-06-26 11:29:16 +0200162 goto out_free;
163 }
164
165 return 0;
166
167out_free:
168 perf_evlist__free_stats(evlist);
169 return -1;
170}
171
172void perf_evlist__free_stats(struct perf_evlist *evlist)
173{
174 struct perf_evsel *evsel;
175
176 evlist__for_each(evlist, evsel) {
177 perf_evsel__free_stat_priv(evsel);
178 perf_evsel__free_counts(evsel);
179 perf_evsel__free_prev_raw_counts(evsel);
180 }
181}
182
183void perf_evlist__reset_stats(struct perf_evlist *evlist)
184{
185 struct perf_evsel *evsel;
186
187 evlist__for_each(evlist, evsel) {
188 perf_evsel__reset_stat_priv(evsel);
189 perf_evsel__reset_counts(evsel);
190 }
191}
Jiri Olsaf80010e2015-07-21 14:31:27 +0200192
193static void zero_per_pkg(struct perf_evsel *counter)
194{
195 if (counter->per_pkg_mask)
196 memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
197}
198
199static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
200{
201 unsigned long *mask = counter->per_pkg_mask;
202 struct cpu_map *cpus = perf_evsel__cpus(counter);
203 int s;
204
205 *skip = false;
206
207 if (!counter->per_pkg)
208 return 0;
209
210 if (cpu_map__empty(cpus))
211 return 0;
212
213 if (!mask) {
214 mask = zalloc(MAX_NR_CPUS);
215 if (!mask)
216 return -ENOMEM;
217
218 counter->per_pkg_mask = mask;
219 }
220
221 s = cpu_map__get_socket(cpus, cpu);
222 if (s < 0)
223 return -1;
224
225 *skip = test_and_set_bit(s, mask) == 1;
226 return 0;
227}
228
229static int
230process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel,
231 int cpu, int thread,
232 struct perf_counts_values *count)
233{
234 struct perf_counts_values *aggr = &evsel->counts->aggr;
235 static struct perf_counts_values zero;
236 bool skip = false;
237
238 if (check_per_pkg(evsel, cpu, &skip)) {
239 pr_err("failed to read per-pkg counter\n");
240 return -1;
241 }
242
243 if (skip)
244 count = &zero;
245
246 switch (config->aggr_mode) {
247 case AGGR_THREAD:
248 case AGGR_CORE:
249 case AGGR_SOCKET:
250 case AGGR_NONE:
251 if (!evsel->snapshot)
252 perf_evsel__compute_deltas(evsel, cpu, thread, count);
253 perf_counts_values__scale(count, config->scale, NULL);
254 if (config->aggr_mode == AGGR_NONE)
255 perf_stat__update_shadow_stats(evsel, count->values, cpu);
256 break;
257 case AGGR_GLOBAL:
258 aggr->val += count->val;
259 if (config->scale) {
260 aggr->ena += count->ena;
261 aggr->run += count->run;
262 }
263 default:
264 break;
265 }
266
267 return 0;
268}
269
270static int process_counter_maps(struct perf_stat_config *config,
271 struct perf_evsel *counter)
272{
273 int nthreads = thread_map__nr(counter->threads);
274 int ncpus = perf_evsel__nr_cpus(counter);
275 int cpu, thread;
276
277 if (counter->system_wide)
278 nthreads = 1;
279
280 for (thread = 0; thread < nthreads; thread++) {
281 for (cpu = 0; cpu < ncpus; cpu++) {
282 if (process_counter_values(config, counter, cpu, thread,
283 perf_counts(counter->counts, cpu, thread)))
284 return -1;
285 }
286 }
287
288 return 0;
289}
290
291int perf_stat_process_counter(struct perf_stat_config *config,
292 struct perf_evsel *counter)
293{
294 struct perf_counts_values *aggr = &counter->counts->aggr;
295 struct perf_stat *ps = counter->priv;
296 u64 *count = counter->counts->aggr.values;
297 int i, ret;
298
299 aggr->val = aggr->ena = aggr->run = 0;
300 init_stats(ps->res_stats);
301
302 if (counter->per_pkg)
303 zero_per_pkg(counter);
304
305 ret = process_counter_maps(config, counter);
306 if (ret)
307 return ret;
308
309 if (config->aggr_mode != AGGR_GLOBAL)
310 return 0;
311
312 if (!counter->snapshot)
313 perf_evsel__compute_deltas(counter, -1, -1, aggr);
314 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
315
316 for (i = 0; i < 3; i++)
317 update_stats(&ps->res_stats[i], count[i]);
318
319 if (verbose) {
320 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
321 perf_evsel__name(counter), count[0], count[1], count[2]);
322 }
323
324 /*
325 * Save the full runtime - to allow normalization during printout:
326 */
327 perf_stat__update_shadow_stats(counter, count, 0);
328
329 return 0;
330}