blob: 818cb022fcb61eafbe46ef58f9a703a42b33cba1 [file] [log] [blame]
Xiao Guangrong0007ece2012-09-17 16:31:14 +08001#include <math.h>
Xiao Guangrong0007ece2012-09-17 16:31:14 +08002#include "stat.h"
Jiri Olsa24e34f62015-06-26 11:29:16 +02003#include "evlist.h"
Jiri Olsae2f56da2015-06-04 15:50:55 +02004#include "evsel.h"
Jiri Olsa24e34f62015-06-26 11:29:16 +02005#include "thread_map.h"
Xiao Guangrong0007ece2012-09-17 16:31:14 +08006
7void update_stats(struct stats *stats, u64 val)
8{
9 double delta;
10
11 stats->n++;
12 delta = val - stats->mean;
13 stats->mean += delta / stats->n;
14 stats->M2 += delta*(val - stats->mean);
David Ahernffe4f3c2013-08-02 14:05:40 -060015
16 if (val > stats->max)
17 stats->max = val;
18
19 if (val < stats->min)
20 stats->min = val;
Xiao Guangrong0007ece2012-09-17 16:31:14 +080021}
22
23double avg_stats(struct stats *stats)
24{
25 return stats->mean;
26}
27
28/*
29 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
30 *
31 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
32 * s^2 = -------------------------------
33 * n - 1
34 *
35 * http://en.wikipedia.org/wiki/Stddev
36 *
37 * The std dev of the mean is related to the std dev by:
38 *
39 * s
40 * s_mean = -------
41 * sqrt(n)
42 *
43 */
44double stddev_stats(struct stats *stats)
45{
46 double variance, variance_mean;
47
David Ahern45528f72013-05-25 18:24:48 -060048 if (stats->n < 2)
Xiao Guangrong0007ece2012-09-17 16:31:14 +080049 return 0.0;
50
51 variance = stats->M2 / (stats->n - 1);
52 variance_mean = variance / stats->n;
53
54 return sqrt(variance_mean);
55}
56
57double rel_stddev_stats(double stddev, double avg)
58{
59 double pct = 0.0;
60
61 if (avg)
62 pct = 100.0 * stddev/avg;
63
64 return pct;
65}
Jiri Olsae2f56da2015-06-04 15:50:55 +020066
67bool __perf_evsel_stat__is(struct perf_evsel *evsel,
68 enum perf_stat_evsel_id id)
69{
70 struct perf_stat *ps = evsel->priv;
71
72 return ps->id == id;
73}
74
75#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
76static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
Jiri Olsa4c358d52015-06-03 16:25:52 +020077 ID(NONE, x),
78 ID(CYCLES_IN_TX, cpu/cycles-t/),
79 ID(TRANSACTION_START, cpu/tx-start/),
80 ID(ELISION_START, cpu/el-start/),
81 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
Jiri Olsae2f56da2015-06-04 15:50:55 +020082};
83#undef ID
84
85void perf_stat_evsel_id_init(struct perf_evsel *evsel)
86{
87 struct perf_stat *ps = evsel->priv;
88 int i;
89
90 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
91
92 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
93 if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
94 ps->id = i;
95 break;
96 }
97 }
98}
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +020099
Jiri Olsaa6fa0032015-06-26 11:29:11 +0200100struct perf_counts *perf_counts__new(int ncpus, int nthreads)
Jiri Olsa9df38e82015-06-14 10:19:27 +0200101{
Jiri Olsaa8e02322015-06-26 11:29:10 +0200102 struct perf_counts *counts = zalloc(sizeof(*counts));
Jiri Olsa9df38e82015-06-14 10:19:27 +0200103
Jiri Olsaa8e02322015-06-26 11:29:10 +0200104 if (counts) {
Jiri Olsa57b28912015-06-26 11:29:12 +0200105 struct xyarray *values;
Jiri Olsaa8e02322015-06-26 11:29:10 +0200106
Jiri Olsa57b28912015-06-26 11:29:12 +0200107 values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values));
108 if (!values) {
Jiri Olsaa8e02322015-06-26 11:29:10 +0200109 free(counts);
110 return NULL;
111 }
112
Jiri Olsa57b28912015-06-26 11:29:12 +0200113 counts->values = values;
Jiri Olsaa8e02322015-06-26 11:29:10 +0200114 }
115
116 return counts;
Jiri Olsa9df38e82015-06-14 10:19:27 +0200117}
118
119void perf_counts__delete(struct perf_counts *counts)
120{
Jiri Olsaa8e02322015-06-26 11:29:10 +0200121 if (counts) {
Jiri Olsa57b28912015-06-26 11:29:12 +0200122 xyarray__delete(counts->values);
Jiri Olsaa8e02322015-06-26 11:29:10 +0200123 free(counts);
124 }
Jiri Olsa9df38e82015-06-14 10:19:27 +0200125}
126
Jiri Olsaa8e02322015-06-26 11:29:10 +0200127static void perf_counts__reset(struct perf_counts *counts)
Jiri Olsa9df38e82015-06-14 10:19:27 +0200128{
Jiri Olsa57b28912015-06-26 11:29:12 +0200129 xyarray__reset(counts->values);
Jiri Olsa9df38e82015-06-14 10:19:27 +0200130}
131
Jiri Olsaa8e02322015-06-26 11:29:10 +0200132void perf_evsel__reset_counts(struct perf_evsel *evsel)
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +0200133{
Jiri Olsaa8e02322015-06-26 11:29:10 +0200134 perf_counts__reset(evsel->counts);
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +0200135}
136
Jiri Olsaa6fa0032015-06-26 11:29:11 +0200137int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads)
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +0200138{
Jiri Olsaa6fa0032015-06-26 11:29:11 +0200139 evsel->counts = perf_counts__new(ncpus, nthreads);
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +0200140 return evsel->counts != NULL ? 0 : -ENOMEM;
141}
142
143void perf_evsel__free_counts(struct perf_evsel *evsel)
144{
Jiri Olsa9df38e82015-06-14 10:19:27 +0200145 perf_counts__delete(evsel->counts);
146 evsel->counts = NULL;
Jiri Olsaa9a3a4d2015-06-14 10:19:26 +0200147}
Jiri Olsa9689edf2015-06-26 11:29:14 +0200148
149void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
150{
151 int i;
152 struct perf_stat *ps = evsel->priv;
153
154 for (i = 0; i < 3; i++)
155 init_stats(&ps->res_stats[i]);
156
157 perf_stat_evsel_id_init(evsel);
158}
159
160int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
161{
162 evsel->priv = zalloc(sizeof(struct perf_stat));
163 if (evsel->priv == NULL)
164 return -ENOMEM;
165 perf_evsel__reset_stat_priv(evsel);
166 return 0;
167}
168
169void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
170{
171 zfree(&evsel->priv);
172}
Jiri Olsaa9395122015-06-26 11:29:15 +0200173
174int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
175 int ncpus, int nthreads)
176{
177 struct perf_counts *counts;
178
179 counts = perf_counts__new(ncpus, nthreads);
180 if (counts)
181 evsel->prev_raw_counts = counts;
182
183 return counts ? 0 : -ENOMEM;
184}
185
186void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
187{
188 perf_counts__delete(evsel->prev_raw_counts);
189 evsel->prev_raw_counts = NULL;
190}
Jiri Olsa24e34f62015-06-26 11:29:16 +0200191
192int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
193{
194 struct perf_evsel *evsel;
195 int nthreads = thread_map__nr(evlist->threads);
196
197 evlist__for_each(evlist, evsel) {
198 int ncpus = perf_evsel__nr_cpus(evsel);
199
200 if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
201 perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
202 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
203 goto out_free;
204 }
205
206 return 0;
207
208out_free:
209 perf_evlist__free_stats(evlist);
210 return -1;
211}
212
213void perf_evlist__free_stats(struct perf_evlist *evlist)
214{
215 struct perf_evsel *evsel;
216
217 evlist__for_each(evlist, evsel) {
218 perf_evsel__free_stat_priv(evsel);
219 perf_evsel__free_counts(evsel);
220 perf_evsel__free_prev_raw_counts(evsel);
221 }
222}
223
224void perf_evlist__reset_stats(struct perf_evlist *evlist)
225{
226 struct perf_evsel *evsel;
227
228 evlist__for_each(evlist, evsel) {
229 perf_evsel__reset_stat_priv(evsel);
230 perf_evsel__reset_counts(evsel);
231 }
232}