blob: 32f3e9423e99ea40b23ee9196003fc0de2546829 [file] [log] [blame]
Stephane Eranian4788e5b2013-11-12 17:58:50 +01001/*
Borislav Petkov940b2f22017-02-18 12:31:40 +01002 * Support Intel RAPL energy consumption counters
Stephane Eranian4788e5b2013-11-12 17:58:50 +01003 * Copyright (C) 2013 Google, Inc., Stephane Eranian
4 *
5 * Intel RAPL interface is specified in the IA-32 Manual Vol3b
6 * section 14.7.1 (September 2013)
7 *
8 * RAPL provides more controls than just reporting energy consumption
9 * however here we only expose the 3 energy consumption free running
10 * counters (pp0, pkg, dram).
11 *
12 * Each of those counters increments in a power unit defined by the
13 * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
14 * but it can vary.
15 *
16 * Counter to rapl events mappings:
17 *
18 * pp0 counter: consumption of all physical cores (power plane 0)
19 * event: rapl_energy_cores
20 * perf code: 0x1
21 *
22 * pkg counter: consumption of the whole processor package
23 * event: rapl_energy_pkg
24 * perf code: 0x2
25 *
26 * dram counter: consumption of the dram domain (servers only)
27 * event: rapl_energy_dram
28 * perf code: 0x3
29 *
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -070030 * gpu counter: consumption of the builtin-gpu domain (client only)
Stephane Eranianf228c5b2014-01-08 11:15:53 +010031 * event: rapl_energy_gpu
32 * perf code: 0x4
33 *
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -070034 * psys counter: consumption of the builtin-psys domain (client only)
35 * event: rapl_energy_psys
36 * perf code: 0x5
37 *
Stephane Eranian4788e5b2013-11-12 17:58:50 +010038 * We manage those counters as free running (read-only). They may be
39 * use simultaneously by other tools, such as turbostat.
40 *
41 * The events only support system-wide mode counting. There is no
42 * sampling support because it does not make sense and is not
43 * supported by the RAPL hardware.
44 *
45 * Because we want to avoid floating-point operations in the kernel,
46 * the events are all reported in fixed point arithmetic (32.32).
47 * Tools must adjust the counts to convert them to Watts using
48 * the duration of the measurement. Tools may use a function such as
49 * ldexp(raw_count, -32);
50 */
Thomas Gleixner512089d2016-02-22 22:19:23 +000051
52#define pr_fmt(fmt) "RAPL PMU: " fmt
53
Stephane Eranian4788e5b2013-11-12 17:58:50 +010054#include <linux/module.h>
55#include <linux/slab.h>
56#include <linux/perf_event.h>
57#include <asm/cpu_device_id.h>
Dave Hansen7f2236d2016-06-02 17:19:30 -070058#include <asm/intel-family.h>
Borislav Petkov27f6d222016-02-10 10:55:23 +010059#include "../perf_event.h"
Stephane Eranian4788e5b2013-11-12 17:58:50 +010060
Kan Liang4b6e2572016-03-19 00:20:50 -070061MODULE_LICENSE("GPL");
62
Stephane Eranian4788e5b2013-11-12 17:58:50 +010063/*
64 * RAPL energy status counters
65 */
66#define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
67#define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
68#define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
69#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
70#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
71#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
Vince Weavere69af462014-04-02 00:49:55 -040072#define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
Stephane Eranianf228c5b2014-01-08 11:15:53 +010073#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -070074#define RAPL_IDX_PSYS_NRG_STAT 4 /* psys */
75#define INTEL_RAPL_PSYS 0x5 /* pseudo-encoding */
Stephane Eranian4788e5b2013-11-12 17:58:50 +010076
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -070077#define NR_RAPL_DOMAINS 0x5
Andi Kleenda008ee2015-11-30 09:48:42 -080078static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
Jacob Pan64552392015-03-26 14:28:45 -070079 "pp0-core",
80 "package",
81 "dram",
82 "pp1-gpu",
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -070083 "psys",
Jacob Pan64552392015-03-26 14:28:45 -070084};
85
Stephane Eranian4788e5b2013-11-12 17:58:50 +010086/* Clients have PP0, PKG */
87#define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
Stephane Eranianf228c5b2014-01-08 11:15:53 +010088 1<<RAPL_IDX_PKG_NRG_STAT|\
89 1<<RAPL_IDX_PP1_NRG_STAT)
Stephane Eranian4788e5b2013-11-12 17:58:50 +010090
91/* Servers have PP0, PKG, RAM */
92#define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
93 1<<RAPL_IDX_PKG_NRG_STAT|\
94 1<<RAPL_IDX_RAM_NRG_STAT)
95
Vince Weavere69af462014-04-02 00:49:55 -040096/* Servers have PP0, PKG, RAM, PP1 */
97#define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
98 1<<RAPL_IDX_PKG_NRG_STAT|\
99 1<<RAPL_IDX_RAM_NRG_STAT|\
100 1<<RAPL_IDX_PP1_NRG_STAT)
101
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -0700102/* SKL clients have PP0, PKG, RAM, PP1, PSYS */
103#define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
104 1<<RAPL_IDX_PKG_NRG_STAT|\
105 1<<RAPL_IDX_RAM_NRG_STAT|\
106 1<<RAPL_IDX_PP1_NRG_STAT|\
107 1<<RAPL_IDX_PSYS_NRG_STAT)
108
Dasaratharaman Chandramouli3a2a7792015-05-26 11:47:39 -0700109/* Knights Landing has PKG, RAM */
110#define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\
111 1<<RAPL_IDX_RAM_NRG_STAT)
112
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100113/*
114 * event code: LSB 8 bits, passed in attr->config
115 * any other bit is reserved
116 */
117#define RAPL_EVENT_MASK 0xFFULL
118
119#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
120static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
121 struct kobj_attribute *attr, \
122 char *page) \
123{ \
124 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
125 return sprintf(page, _format "\n"); \
126} \
127static struct kobj_attribute format_attr_##_var = \
128 __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
129
Thomas Gleixner7162b8f2016-02-22 22:19:24 +0000130#define RAPL_CNTR_WIDTH 32
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100131
Huang Ruid3bcd642015-12-04 18:07:41 +0800132#define RAPL_EVENT_ATTR_STR(_name, v, str) \
133static struct perf_pmu_events_attr event_attr_##v = { \
134 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
135 .id = 0, \
136 .event_str = str, \
Stephane Eranian433678b2015-01-13 23:59:53 +0100137};
138
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100139struct rapl_pmu {
Thomas Gleixnera2087492016-02-22 22:19:25 +0000140 raw_spinlock_t lock;
Thomas Gleixner7162b8f2016-02-22 22:19:24 +0000141 int n_active;
Thomas Gleixner8a6d2f82016-02-22 22:19:25 +0000142 int cpu;
Thomas Gleixner7162b8f2016-02-22 22:19:24 +0000143 struct list_head active_list;
144 struct pmu *pmu;
145 ktime_t timer_interval;
146 struct hrtimer hrtimer;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100147};
148
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000149struct rapl_pmus {
150 struct pmu pmu;
151 unsigned int maxpkg;
152 struct rapl_pmu *pmus[];
153};
154
Thomas Gleixner7162b8f2016-02-22 22:19:24 +0000155 /* 1/2^hw_unit Joule */
156static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000157static struct rapl_pmus *rapl_pmus;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100158static cpumask_t rapl_cpu_mask;
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000159static unsigned int rapl_cntr_mask;
Thomas Gleixner75c70032016-02-22 22:19:22 +0000160static u64 rapl_timer_ms;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100161
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000162static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
163{
Thomas Gleixnerdd86e372017-01-31 23:58:38 +0100164 unsigned int pkgid = topology_logical_package_id(cpu);
165
166 /*
167 * The unsigned check also catches the '-1' return value for non
168 * existent mappings in the topology map.
169 */
170 return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000171}
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100172
173static inline u64 rapl_read_counter(struct perf_event *event)
174{
175 u64 raw;
176 rdmsrl(event->hw.event_base, raw);
177 return raw;
178}
179
Jacob Pan64552392015-03-26 14:28:45 -0700180static inline u64 rapl_scale(u64 v, int cfg)
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100181{
Jacob Pan64552392015-03-26 14:28:45 -0700182 if (cfg > NR_RAPL_DOMAINS) {
Thomas Gleixner512089d2016-02-22 22:19:23 +0000183 pr_warn("Invalid domain %d, failed to scale data\n", cfg);
Jacob Pan64552392015-03-26 14:28:45 -0700184 return v;
185 }
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100186 /*
187 * scale delta to smallest unit (1/2^32)
188 * users must then scale back: count * 1/(1e9*2^32) to get Joules
189 * or use ldexp(count, -32).
190 * Watts = Joules/Time delta
191 */
Jacob Pan64552392015-03-26 14:28:45 -0700192 return v << (32 - rapl_hw_unit[cfg - 1]);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100193}
194
195static u64 rapl_event_update(struct perf_event *event)
196{
197 struct hw_perf_event *hwc = &event->hw;
198 u64 prev_raw_count, new_raw_count;
199 s64 delta, sdelta;
200 int shift = RAPL_CNTR_WIDTH;
201
202again:
203 prev_raw_count = local64_read(&hwc->prev_count);
204 rdmsrl(event->hw.event_base, new_raw_count);
205
206 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
207 new_raw_count) != prev_raw_count) {
208 cpu_relax();
209 goto again;
210 }
211
212 /*
213 * Now we have the new raw value and have updated the prev
214 * timestamp already. We can now calculate the elapsed delta
215 * (event-)time and add that to the generic event.
216 *
217 * Careful, not all hw sign-extends above the physical width
218 * of the count.
219 */
220 delta = (new_raw_count << shift) - (prev_raw_count << shift);
221 delta >>= shift;
222
Jacob Pan64552392015-03-26 14:28:45 -0700223 sdelta = rapl_scale(delta, event->hw.config);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100224
225 local64_add(sdelta, &event->count);
226
227 return new_raw_count;
228}
229
Stephane Eranian65661f92013-11-12 17:58:51 +0100230static void rapl_start_hrtimer(struct rapl_pmu *pmu)
231{
Thomas Gleixner514c2302015-04-14 21:09:00 +0000232 hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
233 HRTIMER_MODE_REL_PINNED);
Stephane Eranian65661f92013-11-12 17:58:51 +0100234}
235
Stephane Eranian65661f92013-11-12 17:58:51 +0100236static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
237{
Thomas Gleixner8a6d2f82016-02-22 22:19:25 +0000238 struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
Stephane Eranian65661f92013-11-12 17:58:51 +0100239 struct perf_event *event;
240 unsigned long flags;
241
242 if (!pmu->n_active)
243 return HRTIMER_NORESTART;
244
Thomas Gleixnera2087492016-02-22 22:19:25 +0000245 raw_spin_lock_irqsave(&pmu->lock, flags);
Stephane Eranian65661f92013-11-12 17:58:51 +0100246
Thomas Gleixner7162b8f2016-02-22 22:19:24 +0000247 list_for_each_entry(event, &pmu->active_list, active_entry)
Stephane Eranian65661f92013-11-12 17:58:51 +0100248 rapl_event_update(event);
Stephane Eranian65661f92013-11-12 17:58:51 +0100249
Thomas Gleixnera2087492016-02-22 22:19:25 +0000250 raw_spin_unlock_irqrestore(&pmu->lock, flags);
Stephane Eranian65661f92013-11-12 17:58:51 +0100251
252 hrtimer_forward_now(hrtimer, pmu->timer_interval);
253
254 return HRTIMER_RESTART;
255}
256
257static void rapl_hrtimer_init(struct rapl_pmu *pmu)
258{
259 struct hrtimer *hr = &pmu->hrtimer;
260
261 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
262 hr->function = rapl_hrtimer_handle;
263}
264
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100265static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
266 struct perf_event *event)
267{
268 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
269 return;
270
271 event->hw.state = 0;
272
273 list_add_tail(&event->active_entry, &pmu->active_list);
274
275 local64_set(&event->hw.prev_count, rapl_read_counter(event));
276
277 pmu->n_active++;
Stephane Eranian65661f92013-11-12 17:58:51 +0100278 if (pmu->n_active == 1)
279 rapl_start_hrtimer(pmu);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100280}
281
282static void rapl_pmu_event_start(struct perf_event *event, int mode)
283{
Thomas Gleixner8a6d2f82016-02-22 22:19:25 +0000284 struct rapl_pmu *pmu = event->pmu_private;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100285 unsigned long flags;
286
Thomas Gleixnera2087492016-02-22 22:19:25 +0000287 raw_spin_lock_irqsave(&pmu->lock, flags);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100288 __rapl_pmu_event_start(pmu, event);
Thomas Gleixnera2087492016-02-22 22:19:25 +0000289 raw_spin_unlock_irqrestore(&pmu->lock, flags);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100290}
291
292static void rapl_pmu_event_stop(struct perf_event *event, int mode)
293{
Thomas Gleixner8a6d2f82016-02-22 22:19:25 +0000294 struct rapl_pmu *pmu = event->pmu_private;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100295 struct hw_perf_event *hwc = &event->hw;
296 unsigned long flags;
297
Thomas Gleixnera2087492016-02-22 22:19:25 +0000298 raw_spin_lock_irqsave(&pmu->lock, flags);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100299
300 /* mark event as deactivated and stopped */
301 if (!(hwc->state & PERF_HES_STOPPED)) {
302 WARN_ON_ONCE(pmu->n_active <= 0);
303 pmu->n_active--;
Stephane Eranian65661f92013-11-12 17:58:51 +0100304 if (pmu->n_active == 0)
Thomas Gleixner7162b8f2016-02-22 22:19:24 +0000305 hrtimer_cancel(&pmu->hrtimer);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100306
307 list_del(&event->active_entry);
308
309 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
310 hwc->state |= PERF_HES_STOPPED;
311 }
312
313 /* check if update of sw counter is necessary */
314 if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
315 /*
316 * Drain the remaining delta count out of a event
317 * that we are disabling:
318 */
319 rapl_event_update(event);
320 hwc->state |= PERF_HES_UPTODATE;
321 }
322
Thomas Gleixnera2087492016-02-22 22:19:25 +0000323 raw_spin_unlock_irqrestore(&pmu->lock, flags);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100324}
325
326static int rapl_pmu_event_add(struct perf_event *event, int mode)
327{
Thomas Gleixner8a6d2f82016-02-22 22:19:25 +0000328 struct rapl_pmu *pmu = event->pmu_private;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100329 struct hw_perf_event *hwc = &event->hw;
330 unsigned long flags;
331
Thomas Gleixnera2087492016-02-22 22:19:25 +0000332 raw_spin_lock_irqsave(&pmu->lock, flags);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100333
334 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
335
336 if (mode & PERF_EF_START)
337 __rapl_pmu_event_start(pmu, event);
338
Thomas Gleixnera2087492016-02-22 22:19:25 +0000339 raw_spin_unlock_irqrestore(&pmu->lock, flags);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100340
341 return 0;
342}
343
344static void rapl_pmu_event_del(struct perf_event *event, int flags)
345{
346 rapl_pmu_event_stop(event, PERF_EF_UPDATE);
347}
348
349static int rapl_pmu_event_init(struct perf_event *event)
350{
351 u64 cfg = event->attr.config & RAPL_EVENT_MASK;
352 int bit, msr, ret = 0;
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000353 struct rapl_pmu *pmu;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100354
355 /* only look at RAPL events */
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000356 if (event->attr.type != rapl_pmus->pmu.type)
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100357 return -ENOENT;
358
359 /* check only supported bits are set */
360 if (event->attr.config & ~RAPL_EVENT_MASK)
361 return -EINVAL;
362
Thomas Gleixner8a6d2f82016-02-22 22:19:25 +0000363 if (event->cpu < 0)
364 return -EINVAL;
365
David Carrillo-Cisnerose64cd6f2016-08-17 13:55:07 -0700366 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
367
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100368 /*
369 * check event is known (determines counter)
370 */
371 switch (cfg) {
372 case INTEL_RAPL_PP0:
373 bit = RAPL_IDX_PP0_NRG_STAT;
374 msr = MSR_PP0_ENERGY_STATUS;
375 break;
376 case INTEL_RAPL_PKG:
377 bit = RAPL_IDX_PKG_NRG_STAT;
378 msr = MSR_PKG_ENERGY_STATUS;
379 break;
380 case INTEL_RAPL_RAM:
381 bit = RAPL_IDX_RAM_NRG_STAT;
382 msr = MSR_DRAM_ENERGY_STATUS;
383 break;
Stephane Eranianf228c5b2014-01-08 11:15:53 +0100384 case INTEL_RAPL_PP1:
385 bit = RAPL_IDX_PP1_NRG_STAT;
386 msr = MSR_PP1_ENERGY_STATUS;
387 break;
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -0700388 case INTEL_RAPL_PSYS:
389 bit = RAPL_IDX_PSYS_NRG_STAT;
390 msr = MSR_PLATFORM_ENERGY_STATUS;
391 break;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100392 default:
393 return -EINVAL;
394 }
395 /* check event supported */
396 if (!(rapl_cntr_mask & (1 << bit)))
397 return -EINVAL;
398
399 /* unsupported modes and filters */
400 if (event->attr.exclude_user ||
401 event->attr.exclude_kernel ||
402 event->attr.exclude_hv ||
403 event->attr.exclude_idle ||
404 event->attr.exclude_host ||
405 event->attr.exclude_guest ||
406 event->attr.sample_period) /* no sampling */
407 return -EINVAL;
408
409 /* must be done before validate_group */
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000410 pmu = cpu_to_rapl_pmu(event->cpu);
Thomas Gleixnerdd86e372017-01-31 23:58:38 +0100411 if (!pmu)
412 return -EINVAL;
Thomas Gleixner8a6d2f82016-02-22 22:19:25 +0000413 event->cpu = pmu->cpu;
414 event->pmu_private = pmu;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100415 event->hw.event_base = msr;
416 event->hw.config = cfg;
417 event->hw.idx = bit;
418
419 return ret;
420}
421
422static void rapl_pmu_event_read(struct perf_event *event)
423{
424 rapl_event_update(event);
425}
426
427static ssize_t rapl_get_attr_cpumask(struct device *dev,
428 struct device_attribute *attr, char *buf)
429{
Sudeep Holla5aaba362014-09-30 14:48:22 +0100430 return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100431}
432
433static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
434
435static struct attribute *rapl_pmu_attrs[] = {
436 &dev_attr_cpumask.attr,
437 NULL,
438};
439
440static struct attribute_group rapl_pmu_attr_group = {
441 .attrs = rapl_pmu_attrs,
442};
443
Stephane Eranian433678b2015-01-13 23:59:53 +0100444RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
445RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
446RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
447RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -0700448RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
Stephane Eranian433678b2015-01-13 23:59:53 +0100449
450RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
451RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
452RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
453RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -0700454RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules");
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100455
456/*
457 * we compute in 0.23 nJ increments regardless of MSR
458 */
Stephane Eranian433678b2015-01-13 23:59:53 +0100459RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
460RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
461RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
462RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -0700463RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10");
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100464
465static struct attribute *rapl_events_srv_attr[] = {
466 EVENT_PTR(rapl_cores),
467 EVENT_PTR(rapl_pkg),
468 EVENT_PTR(rapl_ram),
469
470 EVENT_PTR(rapl_cores_unit),
471 EVENT_PTR(rapl_pkg_unit),
472 EVENT_PTR(rapl_ram_unit),
473
474 EVENT_PTR(rapl_cores_scale),
475 EVENT_PTR(rapl_pkg_scale),
476 EVENT_PTR(rapl_ram_scale),
477 NULL,
478};
479
480static struct attribute *rapl_events_cln_attr[] = {
481 EVENT_PTR(rapl_cores),
482 EVENT_PTR(rapl_pkg),
Stephane Eranianf228c5b2014-01-08 11:15:53 +0100483 EVENT_PTR(rapl_gpu),
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100484
485 EVENT_PTR(rapl_cores_unit),
486 EVENT_PTR(rapl_pkg_unit),
Stephane Eranianf228c5b2014-01-08 11:15:53 +0100487 EVENT_PTR(rapl_gpu_unit),
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100488
489 EVENT_PTR(rapl_cores_scale),
490 EVENT_PTR(rapl_pkg_scale),
Stephane Eranianf228c5b2014-01-08 11:15:53 +0100491 EVENT_PTR(rapl_gpu_scale),
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100492 NULL,
493};
494
Vince Weavere69af462014-04-02 00:49:55 -0400495static struct attribute *rapl_events_hsw_attr[] = {
496 EVENT_PTR(rapl_cores),
497 EVENT_PTR(rapl_pkg),
498 EVENT_PTR(rapl_gpu),
499 EVENT_PTR(rapl_ram),
500
501 EVENT_PTR(rapl_cores_unit),
502 EVENT_PTR(rapl_pkg_unit),
503 EVENT_PTR(rapl_gpu_unit),
504 EVENT_PTR(rapl_ram_unit),
505
506 EVENT_PTR(rapl_cores_scale),
507 EVENT_PTR(rapl_pkg_scale),
508 EVENT_PTR(rapl_gpu_scale),
509 EVENT_PTR(rapl_ram_scale),
510 NULL,
511};
512
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -0700513static struct attribute *rapl_events_skl_attr[] = {
514 EVENT_PTR(rapl_cores),
515 EVENT_PTR(rapl_pkg),
516 EVENT_PTR(rapl_gpu),
517 EVENT_PTR(rapl_ram),
518 EVENT_PTR(rapl_psys),
519
520 EVENT_PTR(rapl_cores_unit),
521 EVENT_PTR(rapl_pkg_unit),
522 EVENT_PTR(rapl_gpu_unit),
523 EVENT_PTR(rapl_ram_unit),
524 EVENT_PTR(rapl_psys_unit),
525
526 EVENT_PTR(rapl_cores_scale),
527 EVENT_PTR(rapl_pkg_scale),
528 EVENT_PTR(rapl_gpu_scale),
529 EVENT_PTR(rapl_ram_scale),
530 EVENT_PTR(rapl_psys_scale),
531 NULL,
532};
533
Dasaratharaman Chandramouli3a2a7792015-05-26 11:47:39 -0700534static struct attribute *rapl_events_knl_attr[] = {
535 EVENT_PTR(rapl_pkg),
536 EVENT_PTR(rapl_ram),
537
538 EVENT_PTR(rapl_pkg_unit),
539 EVENT_PTR(rapl_ram_unit),
540
541 EVENT_PTR(rapl_pkg_scale),
542 EVENT_PTR(rapl_ram_scale),
543 NULL,
544};
545
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100546static struct attribute_group rapl_pmu_events_group = {
547 .name = "events",
548 .attrs = NULL, /* patched at runtime */
549};
550
551DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
552static struct attribute *rapl_formats_attr[] = {
553 &format_attr_event.attr,
554 NULL,
555};
556
557static struct attribute_group rapl_pmu_format_group = {
558 .name = "format",
559 .attrs = rapl_formats_attr,
560};
561
Colin Ian Kingb45e4c42017-08-10 16:57:09 +0100562static const struct attribute_group *rapl_attr_groups[] = {
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100563 &rapl_pmu_attr_group,
564 &rapl_pmu_format_group,
565 &rapl_pmu_events_group,
566 NULL,
567};
568
Richard Cochran8b5b7732016-07-13 17:16:15 +0000569static int rapl_cpu_offline(unsigned int cpu)
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100570{
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000571 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
572 int target;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100573
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000574 /* Check if exiting cpu is used for collecting rapl events */
575 if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
Richard Cochran8b5b7732016-07-13 17:16:15 +0000576 return 0;
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000577
578 pmu->cpu = -1;
579 /* Find a new cpu to collect rapl events */
580 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
581
582 /* Migrate rapl events to the new target */
583 if (target < nr_cpu_ids) {
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100584 cpumask_set_cpu(target, &rapl_cpu_mask);
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000585 pmu->cpu = target;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100586 perf_pmu_migrate_context(pmu->pmu, cpu, target);
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000587 }
Richard Cochran8b5b7732016-07-13 17:16:15 +0000588 return 0;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100589}
590
Richard Cochran8b5b7732016-07-13 17:16:15 +0000591static int rapl_cpu_online(unsigned int cpu)
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100592{
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000593 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
594 int target;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100595
Thomas Gleixnerdd86e372017-01-31 23:58:38 +0100596 if (!pmu) {
597 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
598 if (!pmu)
599 return -ENOMEM;
600
601 raw_spin_lock_init(&pmu->lock);
602 INIT_LIST_HEAD(&pmu->active_list);
603 pmu->pmu = &rapl_pmus->pmu;
604 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
605 rapl_hrtimer_init(pmu);
606
607 rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
608 }
609
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000610 /*
611 * Check if there is an online cpu in the package which collects rapl
612 * events already.
613 */
614 target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
615 if (target < nr_cpu_ids)
Richard Cochran8b5b7732016-07-13 17:16:15 +0000616 return 0;
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000617
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100618 cpumask_set_cpu(cpu, &rapl_cpu_mask);
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000619 pmu->cpu = cpu;
Richard Cochran8b5b7732016-07-13 17:16:15 +0000620 return 0;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100621}
622
Borislav Petkov7a869802016-03-08 17:40:41 +0100623static int rapl_check_hw_unit(bool apply_quirk)
Jacob Pan64552392015-03-26 14:28:45 -0700624{
625 u64 msr_rapl_power_unit_bits;
626 int i;
627
628 /* protect rdmsrl() to handle virtualization */
629 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
630 return -1;
631 for (i = 0; i < NR_RAPL_DOMAINS; i++)
632 rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
633
Borislav Petkov7a869802016-03-08 17:40:41 +0100634 /*
635 * DRAM domain on HSW server and KNL has fixed energy unit which can be
636 * different than the unit from power unit MSR. See
637 * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
638 * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
639 */
640 if (apply_quirk)
641 rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
Thomas Gleixner75c70032016-02-22 22:19:22 +0000642
643 /*
644 * Calculate the timer rate:
645 * Use reference of 200W for scaling the timeout to avoid counter
646 * overflows. 200W = 200 Joules/sec
647 * Divide interval by 2 to avoid lockstep (2 * 100)
648 * if hw unit is 32, then we use 2 ms 1/200/2
649 */
650 rapl_timer_ms = 2;
651 if (rapl_hw_unit[0] < 32) {
652 rapl_timer_ms = (1000 / (2 * 100));
653 rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
654 }
Jacob Pan64552392015-03-26 14:28:45 -0700655 return 0;
656}
657
Thomas Gleixner512089d2016-02-22 22:19:23 +0000658static void __init rapl_advertise(void)
659{
660 int i;
661
662 pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
663 hweight32(rapl_cntr_mask), rapl_timer_ms);
664
665 for (i = 0; i < NR_RAPL_DOMAINS; i++) {
666 if (rapl_cntr_mask & (1 << i)) {
667 pr_info("hw unit of domain %s 2^-%d Joules\n",
668 rapl_domain_names[i], rapl_hw_unit[i]);
669 }
670 }
671}
672
Kan Liang4b6e2572016-03-19 00:20:50 -0700673static void cleanup_rapl_pmus(void)
Thomas Gleixner55f28902016-02-22 22:19:21 +0000674{
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000675 int i;
Thomas Gleixner55f28902016-02-22 22:19:21 +0000676
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000677 for (i = 0; i < rapl_pmus->maxpkg; i++)
Vincent Stehlé275ae412016-05-24 16:53:49 +0200678 kfree(rapl_pmus->pmus[i]);
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000679 kfree(rapl_pmus);
680}
681
682static int __init init_rapl_pmus(void)
683{
684 int maxpkg = topology_max_packages();
685 size_t size;
686
687 size = sizeof(*rapl_pmus) + maxpkg * sizeof(struct rapl_pmu *);
688 rapl_pmus = kzalloc(size, GFP_KERNEL);
689 if (!rapl_pmus)
690 return -ENOMEM;
691
692 rapl_pmus->maxpkg = maxpkg;
693 rapl_pmus->pmu.attr_groups = rapl_attr_groups;
694 rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
695 rapl_pmus->pmu.event_init = rapl_pmu_event_init;
696 rapl_pmus->pmu.add = rapl_pmu_event_add;
697 rapl_pmus->pmu.del = rapl_pmu_event_del;
698 rapl_pmus->pmu.start = rapl_pmu_event_start;
699 rapl_pmus->pmu.stop = rapl_pmu_event_stop;
700 rapl_pmus->pmu.read = rapl_pmu_event_read;
David Carrillo-Cisneros74545f62016-12-22 17:17:40 -0800701 rapl_pmus->pmu.module = THIS_MODULE;
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000702 return 0;
Thomas Gleixner55f28902016-02-22 22:19:21 +0000703}
704
Kan Liang4b6e2572016-03-19 00:20:50 -0700705#define X86_RAPL_MODEL_MATCH(model, init) \
706 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
707
708struct intel_rapl_init_fun {
709 bool apply_quirk;
710 int cntr_mask;
711 struct attribute **attrs;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100712};
713
Kan Liang4b6e2572016-03-19 00:20:50 -0700714static const struct intel_rapl_init_fun snb_rapl_init __initconst = {
715 .apply_quirk = false,
716 .cntr_mask = RAPL_IDX_CLN,
717 .attrs = rapl_events_cln_attr,
718};
719
720static const struct intel_rapl_init_fun hsx_rapl_init __initconst = {
721 .apply_quirk = true,
722 .cntr_mask = RAPL_IDX_SRV,
723 .attrs = rapl_events_srv_attr,
724};
725
726static const struct intel_rapl_init_fun hsw_rapl_init __initconst = {
727 .apply_quirk = false,
728 .cntr_mask = RAPL_IDX_HSW,
729 .attrs = rapl_events_hsw_attr,
730};
731
732static const struct intel_rapl_init_fun snbep_rapl_init __initconst = {
733 .apply_quirk = false,
734 .cntr_mask = RAPL_IDX_SRV,
735 .attrs = rapl_events_srv_attr,
736};
737
738static const struct intel_rapl_init_fun knl_rapl_init __initconst = {
739 .apply_quirk = true,
740 .cntr_mask = RAPL_IDX_KNL,
741 .attrs = rapl_events_knl_attr,
742};
743
Srinivas Pandruvadadcee75b2016-04-17 15:03:00 -0700744static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
745 .apply_quirk = false,
746 .cntr_mask = RAPL_IDX_SKL_CLN,
747 .attrs = rapl_events_skl_attr,
748};
749
Kan Liang4b6e2572016-03-19 00:20:50 -0700750static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
Dave Hansen7f2236d2016-06-02 17:19:30 -0700751 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_rapl_init),
752 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init),
Peter Zijlstrac416e5a2016-04-21 15:14:17 +0200753
Dave Hansen7f2236d2016-06-02 17:19:30 -0700754 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, snb_rapl_init),
755 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
Peter Zijlstrac416e5a2016-04-21 15:14:17 +0200756
Dave Hansen7f2236d2016-06-02 17:19:30 -0700757 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
Kan Liang1289e0e2018-01-11 11:15:43 -0800758 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsx_rapl_init),
Dave Hansen7f2236d2016-06-02 17:19:30 -0700759 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
Peter Zijlstrac416e5a2016-04-21 15:14:17 +0200761
Dave Hansen7f2236d2016-06-02 17:19:30 -0700762 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
Vince Weaver33b88e72017-05-02 14:08:50 -0400764 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
Kan Liang1289e0e2018-01-11 11:15:43 -0800765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
Peter Zijlstrac416e5a2016-04-21 15:14:17 +0200766
Dave Hansen7f2236d2016-06-02 17:19:30 -0700767 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
Piotr Luc36c4b6c2016-10-12 20:27:25 +0200768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
Peter Zijlstrac416e5a2016-04-21 15:14:17 +0200769
Dave Hansen7f2236d2016-06-02 17:19:30 -0700770 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
771 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
Jacob Pan348c5ac2016-06-02 17:19:53 -0700772 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, hsx_rapl_init),
Harry Pan2668c612016-09-08 17:08:57 +0800773
Srinivas Pandruvadaf2029b12017-02-10 11:38:37 -0800774 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_rapl_init),
775 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
776
Harry Pan490d03e2018-03-09 20:15:47 +0800777 X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, skl_rapl_init),
778
Harry Pan2668c612016-09-08 17:08:57 +0800779 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
Kan Liang450a9782017-09-08 17:34:49 -0400780 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init),
781
782 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init),
Kan Liang4b6e2572016-03-19 00:20:50 -0700783 {},
784};
785
786MODULE_DEVICE_TABLE(x86cpu, rapl_cpu_match);
787
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100788static int __init rapl_pmu_init(void)
789{
Kan Liang4b6e2572016-03-19 00:20:50 -0700790 const struct x86_cpu_id *id;
791 struct intel_rapl_init_fun *rapl_init;
792 bool apply_quirk;
Thomas Gleixner7162b8f2016-02-22 22:19:24 +0000793 int ret;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100794
Kan Liang4b6e2572016-03-19 00:20:50 -0700795 id = x86_match_cpu(rapl_cpu_match);
796 if (!id)
Thomas Gleixner55f28902016-02-22 22:19:21 +0000797 return -ENODEV;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100798
Kan Liang4b6e2572016-03-19 00:20:50 -0700799 rapl_init = (struct intel_rapl_init_fun *)id->driver_data;
800 apply_quirk = rapl_init->apply_quirk;
801 rapl_cntr_mask = rapl_init->cntr_mask;
802 rapl_pmu_events_group.attrs = rapl_init->attrs;
Thomas Gleixner55f28902016-02-22 22:19:21 +0000803
Borislav Petkov7a869802016-03-08 17:40:41 +0100804 ret = rapl_check_hw_unit(apply_quirk);
Jacob Pan64552392015-03-26 14:28:45 -0700805 if (ret)
806 return ret;
Srivatsa S. Bhatfd537e52014-03-11 02:08:09 +0530807
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000808 ret = init_rapl_pmus();
809 if (ret)
810 return ret;
811
Richard Cochran8b5b7732016-07-13 17:16:15 +0000812 /*
813 * Install callbacks. Core will call them for each online cpu.
814 */
Richard Cochran8b5b7732016-07-13 17:16:15 +0000815 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
Thomas Gleixner73c1b412016-12-21 20:19:54 +0100816 "perf/x86/rapl:online",
Richard Cochran8b5b7732016-07-13 17:16:15 +0000817 rapl_cpu_online, rapl_cpu_offline);
818 if (ret)
Thomas Gleixnerdd86e372017-01-31 23:58:38 +0100819 goto out;
Richard Cochran8b5b7732016-07-13 17:16:15 +0000820
Thomas Gleixner9de8d682016-02-22 22:19:26 +0000821 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
Thomas Gleixner512089d2016-02-22 22:19:23 +0000822 if (ret)
Thomas Gleixnerdd86e372017-01-31 23:58:38 +0100823 goto out1;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100824
Thomas Gleixner512089d2016-02-22 22:19:23 +0000825 rapl_advertise();
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100826 return 0;
Thomas Gleixner55f28902016-02-22 22:19:21 +0000827
Richard Cochran8b5b7732016-07-13 17:16:15 +0000828out1:
Thomas Gleixnerdd86e372017-01-31 23:58:38 +0100829 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
Thomas Gleixner55f28902016-02-22 22:19:21 +0000830out:
Thomas Gleixner512089d2016-02-22 22:19:23 +0000831 pr_warn("Initialization failed (%d), disabled\n", ret);
Thomas Gleixner55f28902016-02-22 22:19:21 +0000832 cleanup_rapl_pmus();
Thomas Gleixner55f28902016-02-22 22:19:21 +0000833 return ret;
Stephane Eranian4788e5b2013-11-12 17:58:50 +0100834}
Kan Liang4b6e2572016-03-19 00:20:50 -0700835module_init(rapl_pmu_init);
836
837static void __exit intel_rapl_exit(void)
838{
Richard Cochran8b5b7732016-07-13 17:16:15 +0000839 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
Kan Liang4b6e2572016-03-19 00:20:50 -0700840 perf_pmu_unregister(&rapl_pmus->pmu);
841 cleanup_rapl_pmus();
Kan Liang4b6e2572016-03-19 00:20:50 -0700842}
843module_exit(intel_rapl_exit);