Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 1 | /* |
Borislav Petkov | 940b2f2 | 2017-02-18 12:31:40 +0100 | [diff] [blame] | 2 | * Support Intel RAPL energy consumption counters |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 3 | * Copyright (C) 2013 Google, Inc., Stephane Eranian |
| 4 | * |
| 5 | * Intel RAPL interface is specified in the IA-32 Manual Vol3b |
| 6 | * section 14.7.1 (September 2013) |
| 7 | * |
| 8 | * RAPL provides more controls than just reporting energy consumption |
| 9 | * however here we only expose the 3 energy consumption free running |
| 10 | * counters (pp0, pkg, dram). |
| 11 | * |
| 12 | * Each of those counters increments in a power unit defined by the |
| 13 | * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules |
| 14 | * but it can vary. |
| 15 | * |
| 16 | * Counter to rapl events mappings: |
| 17 | * |
| 18 | * pp0 counter: consumption of all physical cores (power plane 0) |
| 19 | * event: rapl_energy_cores |
| 20 | * perf code: 0x1 |
| 21 | * |
| 22 | * pkg counter: consumption of the whole processor package |
| 23 | * event: rapl_energy_pkg |
| 24 | * perf code: 0x2 |
| 25 | * |
| 26 | * dram counter: consumption of the dram domain (servers only) |
| 27 | * event: rapl_energy_dram |
| 28 | * perf code: 0x3 |
| 29 | * |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 30 | * gpu counter: consumption of the builtin-gpu domain (client only) |
Stephane Eranian | f228c5b | 2014-01-08 11:15:53 +0100 | [diff] [blame] | 31 | * event: rapl_energy_gpu |
| 32 | * perf code: 0x4 |
| 33 | * |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 34 | * psys counter: consumption of the builtin-psys domain (client only) |
| 35 | * event: rapl_energy_psys |
| 36 | * perf code: 0x5 |
| 37 | * |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 38 | * We manage those counters as free running (read-only). They may be |
| 39 | * use simultaneously by other tools, such as turbostat. |
| 40 | * |
| 41 | * The events only support system-wide mode counting. There is no |
| 42 | * sampling support because it does not make sense and is not |
| 43 | * supported by the RAPL hardware. |
| 44 | * |
| 45 | * Because we want to avoid floating-point operations in the kernel, |
| 46 | * the events are all reported in fixed point arithmetic (32.32). |
| 47 | * Tools must adjust the counts to convert them to Watts using |
| 48 | * the duration of the measurement. Tools may use a function such as |
| 49 | * ldexp(raw_count, -32); |
| 50 | */ |
Thomas Gleixner | 512089d | 2016-02-22 22:19:23 +0000 | [diff] [blame] | 51 | |
| 52 | #define pr_fmt(fmt) "RAPL PMU: " fmt |
| 53 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 54 | #include <linux/module.h> |
| 55 | #include <linux/slab.h> |
| 56 | #include <linux/perf_event.h> |
| 57 | #include <asm/cpu_device_id.h> |
Dave Hansen | 7f2236d | 2016-06-02 17:19:30 -0700 | [diff] [blame] | 58 | #include <asm/intel-family.h> |
Borislav Petkov | 27f6d22 | 2016-02-10 10:55:23 +0100 | [diff] [blame] | 59 | #include "../perf_event.h" |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 60 | |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 61 | MODULE_LICENSE("GPL"); |
| 62 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 63 | /* |
| 64 | * RAPL energy status counters |
| 65 | */ |
| 66 | #define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */ |
| 67 | #define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */ |
| 68 | #define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */ |
| 69 | #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ |
| 70 | #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ |
| 71 | #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ |
Vince Weaver | e69af46 | 2014-04-02 00:49:55 -0400 | [diff] [blame] | 72 | #define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */ |
Stephane Eranian | f228c5b | 2014-01-08 11:15:53 +0100 | [diff] [blame] | 73 | #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 74 | #define RAPL_IDX_PSYS_NRG_STAT 4 /* psys */ |
| 75 | #define INTEL_RAPL_PSYS 0x5 /* pseudo-encoding */ |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 76 | |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 77 | #define NR_RAPL_DOMAINS 0x5 |
Andi Kleen | da008ee | 2015-11-30 09:48:42 -0800 | [diff] [blame] | 78 | static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = { |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 79 | "pp0-core", |
| 80 | "package", |
| 81 | "dram", |
| 82 | "pp1-gpu", |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 83 | "psys", |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 84 | }; |
| 85 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 86 | /* Clients have PP0, PKG */ |
| 87 | #define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\ |
Stephane Eranian | f228c5b | 2014-01-08 11:15:53 +0100 | [diff] [blame] | 88 | 1<<RAPL_IDX_PKG_NRG_STAT|\ |
| 89 | 1<<RAPL_IDX_PP1_NRG_STAT) |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 90 | |
| 91 | /* Servers have PP0, PKG, RAM */ |
| 92 | #define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\ |
| 93 | 1<<RAPL_IDX_PKG_NRG_STAT|\ |
| 94 | 1<<RAPL_IDX_RAM_NRG_STAT) |
| 95 | |
Vince Weaver | e69af46 | 2014-04-02 00:49:55 -0400 | [diff] [blame] | 96 | /* Servers have PP0, PKG, RAM, PP1 */ |
| 97 | #define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\ |
| 98 | 1<<RAPL_IDX_PKG_NRG_STAT|\ |
| 99 | 1<<RAPL_IDX_RAM_NRG_STAT|\ |
| 100 | 1<<RAPL_IDX_PP1_NRG_STAT) |
| 101 | |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 102 | /* SKL clients have PP0, PKG, RAM, PP1, PSYS */ |
| 103 | #define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\ |
| 104 | 1<<RAPL_IDX_PKG_NRG_STAT|\ |
| 105 | 1<<RAPL_IDX_RAM_NRG_STAT|\ |
| 106 | 1<<RAPL_IDX_PP1_NRG_STAT|\ |
| 107 | 1<<RAPL_IDX_PSYS_NRG_STAT) |
| 108 | |
Dasaratharaman Chandramouli | 3a2a779 | 2015-05-26 11:47:39 -0700 | [diff] [blame] | 109 | /* Knights Landing has PKG, RAM */ |
| 110 | #define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\ |
| 111 | 1<<RAPL_IDX_RAM_NRG_STAT) |
| 112 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 113 | /* |
| 114 | * event code: LSB 8 bits, passed in attr->config |
| 115 | * any other bit is reserved |
| 116 | */ |
| 117 | #define RAPL_EVENT_MASK 0xFFULL |
| 118 | |
| 119 | #define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \ |
| 120 | static ssize_t __rapl_##_var##_show(struct kobject *kobj, \ |
| 121 | struct kobj_attribute *attr, \ |
| 122 | char *page) \ |
| 123 | { \ |
| 124 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ |
| 125 | return sprintf(page, _format "\n"); \ |
| 126 | } \ |
| 127 | static struct kobj_attribute format_attr_##_var = \ |
| 128 | __ATTR(_name, 0444, __rapl_##_var##_show, NULL) |
| 129 | |
Thomas Gleixner | 7162b8f | 2016-02-22 22:19:24 +0000 | [diff] [blame] | 130 | #define RAPL_CNTR_WIDTH 32 |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 131 | |
Huang Rui | d3bcd64 | 2015-12-04 18:07:41 +0800 | [diff] [blame] | 132 | #define RAPL_EVENT_ATTR_STR(_name, v, str) \ |
| 133 | static struct perf_pmu_events_attr event_attr_##v = { \ |
| 134 | .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ |
| 135 | .id = 0, \ |
| 136 | .event_str = str, \ |
Stephane Eranian | 433678b | 2015-01-13 23:59:53 +0100 | [diff] [blame] | 137 | }; |
| 138 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 139 | struct rapl_pmu { |
Thomas Gleixner | a208749 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 140 | raw_spinlock_t lock; |
Thomas Gleixner | 7162b8f | 2016-02-22 22:19:24 +0000 | [diff] [blame] | 141 | int n_active; |
Thomas Gleixner | 8a6d2f8 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 142 | int cpu; |
Thomas Gleixner | 7162b8f | 2016-02-22 22:19:24 +0000 | [diff] [blame] | 143 | struct list_head active_list; |
| 144 | struct pmu *pmu; |
| 145 | ktime_t timer_interval; |
| 146 | struct hrtimer hrtimer; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 147 | }; |
| 148 | |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 149 | struct rapl_pmus { |
| 150 | struct pmu pmu; |
| 151 | unsigned int maxpkg; |
| 152 | struct rapl_pmu *pmus[]; |
| 153 | }; |
| 154 | |
Thomas Gleixner | 7162b8f | 2016-02-22 22:19:24 +0000 | [diff] [blame] | 155 | /* 1/2^hw_unit Joule */ |
| 156 | static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly; |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 157 | static struct rapl_pmus *rapl_pmus; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 158 | static cpumask_t rapl_cpu_mask; |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 159 | static unsigned int rapl_cntr_mask; |
Thomas Gleixner | 75c7003 | 2016-02-22 22:19:22 +0000 | [diff] [blame] | 160 | static u64 rapl_timer_ms; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 161 | |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 162 | static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) |
| 163 | { |
Thomas Gleixner | dd86e37 | 2017-01-31 23:58:38 +0100 | [diff] [blame] | 164 | unsigned int pkgid = topology_logical_package_id(cpu); |
| 165 | |
| 166 | /* |
| 167 | * The unsigned check also catches the '-1' return value for non |
| 168 | * existent mappings in the topology map. |
| 169 | */ |
| 170 | return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL; |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 171 | } |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 172 | |
| 173 | static inline u64 rapl_read_counter(struct perf_event *event) |
| 174 | { |
| 175 | u64 raw; |
| 176 | rdmsrl(event->hw.event_base, raw); |
| 177 | return raw; |
| 178 | } |
| 179 | |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 180 | static inline u64 rapl_scale(u64 v, int cfg) |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 181 | { |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 182 | if (cfg > NR_RAPL_DOMAINS) { |
Thomas Gleixner | 512089d | 2016-02-22 22:19:23 +0000 | [diff] [blame] | 183 | pr_warn("Invalid domain %d, failed to scale data\n", cfg); |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 184 | return v; |
| 185 | } |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 186 | /* |
| 187 | * scale delta to smallest unit (1/2^32) |
| 188 | * users must then scale back: count * 1/(1e9*2^32) to get Joules |
| 189 | * or use ldexp(count, -32). |
| 190 | * Watts = Joules/Time delta |
| 191 | */ |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 192 | return v << (32 - rapl_hw_unit[cfg - 1]); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | static u64 rapl_event_update(struct perf_event *event) |
| 196 | { |
| 197 | struct hw_perf_event *hwc = &event->hw; |
| 198 | u64 prev_raw_count, new_raw_count; |
| 199 | s64 delta, sdelta; |
| 200 | int shift = RAPL_CNTR_WIDTH; |
| 201 | |
| 202 | again: |
| 203 | prev_raw_count = local64_read(&hwc->prev_count); |
| 204 | rdmsrl(event->hw.event_base, new_raw_count); |
| 205 | |
| 206 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
| 207 | new_raw_count) != prev_raw_count) { |
| 208 | cpu_relax(); |
| 209 | goto again; |
| 210 | } |
| 211 | |
| 212 | /* |
| 213 | * Now we have the new raw value and have updated the prev |
| 214 | * timestamp already. We can now calculate the elapsed delta |
| 215 | * (event-)time and add that to the generic event. |
| 216 | * |
| 217 | * Careful, not all hw sign-extends above the physical width |
| 218 | * of the count. |
| 219 | */ |
| 220 | delta = (new_raw_count << shift) - (prev_raw_count << shift); |
| 221 | delta >>= shift; |
| 222 | |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 223 | sdelta = rapl_scale(delta, event->hw.config); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 224 | |
| 225 | local64_add(sdelta, &event->count); |
| 226 | |
| 227 | return new_raw_count; |
| 228 | } |
| 229 | |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 230 | static void rapl_start_hrtimer(struct rapl_pmu *pmu) |
| 231 | { |
Thomas Gleixner | 514c230 | 2015-04-14 21:09:00 +0000 | [diff] [blame] | 232 | hrtimer_start(&pmu->hrtimer, pmu->timer_interval, |
| 233 | HRTIMER_MODE_REL_PINNED); |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 234 | } |
| 235 | |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 236 | static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) |
| 237 | { |
Thomas Gleixner | 8a6d2f8 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 238 | struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer); |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 239 | struct perf_event *event; |
| 240 | unsigned long flags; |
| 241 | |
| 242 | if (!pmu->n_active) |
| 243 | return HRTIMER_NORESTART; |
| 244 | |
Thomas Gleixner | a208749 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 245 | raw_spin_lock_irqsave(&pmu->lock, flags); |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 246 | |
Thomas Gleixner | 7162b8f | 2016-02-22 22:19:24 +0000 | [diff] [blame] | 247 | list_for_each_entry(event, &pmu->active_list, active_entry) |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 248 | rapl_event_update(event); |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 249 | |
Thomas Gleixner | a208749 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 250 | raw_spin_unlock_irqrestore(&pmu->lock, flags); |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 251 | |
| 252 | hrtimer_forward_now(hrtimer, pmu->timer_interval); |
| 253 | |
| 254 | return HRTIMER_RESTART; |
| 255 | } |
| 256 | |
| 257 | static void rapl_hrtimer_init(struct rapl_pmu *pmu) |
| 258 | { |
| 259 | struct hrtimer *hr = &pmu->hrtimer; |
| 260 | |
| 261 | hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 262 | hr->function = rapl_hrtimer_handle; |
| 263 | } |
| 264 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 265 | static void __rapl_pmu_event_start(struct rapl_pmu *pmu, |
| 266 | struct perf_event *event) |
| 267 | { |
| 268 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
| 269 | return; |
| 270 | |
| 271 | event->hw.state = 0; |
| 272 | |
| 273 | list_add_tail(&event->active_entry, &pmu->active_list); |
| 274 | |
| 275 | local64_set(&event->hw.prev_count, rapl_read_counter(event)); |
| 276 | |
| 277 | pmu->n_active++; |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 278 | if (pmu->n_active == 1) |
| 279 | rapl_start_hrtimer(pmu); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 280 | } |
| 281 | |
| 282 | static void rapl_pmu_event_start(struct perf_event *event, int mode) |
| 283 | { |
Thomas Gleixner | 8a6d2f8 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 284 | struct rapl_pmu *pmu = event->pmu_private; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 285 | unsigned long flags; |
| 286 | |
Thomas Gleixner | a208749 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 287 | raw_spin_lock_irqsave(&pmu->lock, flags); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 288 | __rapl_pmu_event_start(pmu, event); |
Thomas Gleixner | a208749 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 289 | raw_spin_unlock_irqrestore(&pmu->lock, flags); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | static void rapl_pmu_event_stop(struct perf_event *event, int mode) |
| 293 | { |
Thomas Gleixner | 8a6d2f8 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 294 | struct rapl_pmu *pmu = event->pmu_private; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 295 | struct hw_perf_event *hwc = &event->hw; |
| 296 | unsigned long flags; |
| 297 | |
Thomas Gleixner | a208749 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 298 | raw_spin_lock_irqsave(&pmu->lock, flags); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 299 | |
| 300 | /* mark event as deactivated and stopped */ |
| 301 | if (!(hwc->state & PERF_HES_STOPPED)) { |
| 302 | WARN_ON_ONCE(pmu->n_active <= 0); |
| 303 | pmu->n_active--; |
Stephane Eranian | 65661f9 | 2013-11-12 17:58:51 +0100 | [diff] [blame] | 304 | if (pmu->n_active == 0) |
Thomas Gleixner | 7162b8f | 2016-02-22 22:19:24 +0000 | [diff] [blame] | 305 | hrtimer_cancel(&pmu->hrtimer); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 306 | |
| 307 | list_del(&event->active_entry); |
| 308 | |
| 309 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); |
| 310 | hwc->state |= PERF_HES_STOPPED; |
| 311 | } |
| 312 | |
| 313 | /* check if update of sw counter is necessary */ |
| 314 | if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { |
| 315 | /* |
| 316 | * Drain the remaining delta count out of a event |
| 317 | * that we are disabling: |
| 318 | */ |
| 319 | rapl_event_update(event); |
| 320 | hwc->state |= PERF_HES_UPTODATE; |
| 321 | } |
| 322 | |
Thomas Gleixner | a208749 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 323 | raw_spin_unlock_irqrestore(&pmu->lock, flags); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | static int rapl_pmu_event_add(struct perf_event *event, int mode) |
| 327 | { |
Thomas Gleixner | 8a6d2f8 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 328 | struct rapl_pmu *pmu = event->pmu_private; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 329 | struct hw_perf_event *hwc = &event->hw; |
| 330 | unsigned long flags; |
| 331 | |
Thomas Gleixner | a208749 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 332 | raw_spin_lock_irqsave(&pmu->lock, flags); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 333 | |
| 334 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
| 335 | |
| 336 | if (mode & PERF_EF_START) |
| 337 | __rapl_pmu_event_start(pmu, event); |
| 338 | |
Thomas Gleixner | a208749 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 339 | raw_spin_unlock_irqrestore(&pmu->lock, flags); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 340 | |
| 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | static void rapl_pmu_event_del(struct perf_event *event, int flags) |
| 345 | { |
| 346 | rapl_pmu_event_stop(event, PERF_EF_UPDATE); |
| 347 | } |
| 348 | |
| 349 | static int rapl_pmu_event_init(struct perf_event *event) |
| 350 | { |
| 351 | u64 cfg = event->attr.config & RAPL_EVENT_MASK; |
| 352 | int bit, msr, ret = 0; |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 353 | struct rapl_pmu *pmu; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 354 | |
| 355 | /* only look at RAPL events */ |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 356 | if (event->attr.type != rapl_pmus->pmu.type) |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 357 | return -ENOENT; |
| 358 | |
| 359 | /* check only supported bits are set */ |
| 360 | if (event->attr.config & ~RAPL_EVENT_MASK) |
| 361 | return -EINVAL; |
| 362 | |
Thomas Gleixner | 8a6d2f8 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 363 | if (event->cpu < 0) |
| 364 | return -EINVAL; |
| 365 | |
David Carrillo-Cisneros | e64cd6f | 2016-08-17 13:55:07 -0700 | [diff] [blame] | 366 | event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; |
| 367 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 368 | /* |
| 369 | * check event is known (determines counter) |
| 370 | */ |
| 371 | switch (cfg) { |
| 372 | case INTEL_RAPL_PP0: |
| 373 | bit = RAPL_IDX_PP0_NRG_STAT; |
| 374 | msr = MSR_PP0_ENERGY_STATUS; |
| 375 | break; |
| 376 | case INTEL_RAPL_PKG: |
| 377 | bit = RAPL_IDX_PKG_NRG_STAT; |
| 378 | msr = MSR_PKG_ENERGY_STATUS; |
| 379 | break; |
| 380 | case INTEL_RAPL_RAM: |
| 381 | bit = RAPL_IDX_RAM_NRG_STAT; |
| 382 | msr = MSR_DRAM_ENERGY_STATUS; |
| 383 | break; |
Stephane Eranian | f228c5b | 2014-01-08 11:15:53 +0100 | [diff] [blame] | 384 | case INTEL_RAPL_PP1: |
| 385 | bit = RAPL_IDX_PP1_NRG_STAT; |
| 386 | msr = MSR_PP1_ENERGY_STATUS; |
| 387 | break; |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 388 | case INTEL_RAPL_PSYS: |
| 389 | bit = RAPL_IDX_PSYS_NRG_STAT; |
| 390 | msr = MSR_PLATFORM_ENERGY_STATUS; |
| 391 | break; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 392 | default: |
| 393 | return -EINVAL; |
| 394 | } |
| 395 | /* check event supported */ |
| 396 | if (!(rapl_cntr_mask & (1 << bit))) |
| 397 | return -EINVAL; |
| 398 | |
| 399 | /* unsupported modes and filters */ |
| 400 | if (event->attr.exclude_user || |
| 401 | event->attr.exclude_kernel || |
| 402 | event->attr.exclude_hv || |
| 403 | event->attr.exclude_idle || |
| 404 | event->attr.exclude_host || |
| 405 | event->attr.exclude_guest || |
| 406 | event->attr.sample_period) /* no sampling */ |
| 407 | return -EINVAL; |
| 408 | |
| 409 | /* must be done before validate_group */ |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 410 | pmu = cpu_to_rapl_pmu(event->cpu); |
Thomas Gleixner | dd86e37 | 2017-01-31 23:58:38 +0100 | [diff] [blame] | 411 | if (!pmu) |
| 412 | return -EINVAL; |
Thomas Gleixner | 8a6d2f8 | 2016-02-22 22:19:25 +0000 | [diff] [blame] | 413 | event->cpu = pmu->cpu; |
| 414 | event->pmu_private = pmu; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 415 | event->hw.event_base = msr; |
| 416 | event->hw.config = cfg; |
| 417 | event->hw.idx = bit; |
| 418 | |
| 419 | return ret; |
| 420 | } |
| 421 | |
| 422 | static void rapl_pmu_event_read(struct perf_event *event) |
| 423 | { |
| 424 | rapl_event_update(event); |
| 425 | } |
| 426 | |
| 427 | static ssize_t rapl_get_attr_cpumask(struct device *dev, |
| 428 | struct device_attribute *attr, char *buf) |
| 429 | { |
Sudeep Holla | 5aaba36 | 2014-09-30 14:48:22 +0100 | [diff] [blame] | 430 | return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 431 | } |
| 432 | |
| 433 | static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL); |
| 434 | |
| 435 | static struct attribute *rapl_pmu_attrs[] = { |
| 436 | &dev_attr_cpumask.attr, |
| 437 | NULL, |
| 438 | }; |
| 439 | |
| 440 | static struct attribute_group rapl_pmu_attr_group = { |
| 441 | .attrs = rapl_pmu_attrs, |
| 442 | }; |
| 443 | |
Stephane Eranian | 433678b | 2015-01-13 23:59:53 +0100 | [diff] [blame] | 444 | RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); |
| 445 | RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); |
| 446 | RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); |
| 447 | RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 448 | RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05"); |
Stephane Eranian | 433678b | 2015-01-13 23:59:53 +0100 | [diff] [blame] | 449 | |
| 450 | RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); |
| 451 | RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); |
| 452 | RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); |
| 453 | RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 454 | RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules"); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 455 | |
| 456 | /* |
| 457 | * we compute in 0.23 nJ increments regardless of MSR |
| 458 | */ |
Stephane Eranian | 433678b | 2015-01-13 23:59:53 +0100 | [diff] [blame] | 459 | RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); |
| 460 | RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); |
| 461 | RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); |
| 462 | RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 463 | RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10"); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 464 | |
| 465 | static struct attribute *rapl_events_srv_attr[] = { |
| 466 | EVENT_PTR(rapl_cores), |
| 467 | EVENT_PTR(rapl_pkg), |
| 468 | EVENT_PTR(rapl_ram), |
| 469 | |
| 470 | EVENT_PTR(rapl_cores_unit), |
| 471 | EVENT_PTR(rapl_pkg_unit), |
| 472 | EVENT_PTR(rapl_ram_unit), |
| 473 | |
| 474 | EVENT_PTR(rapl_cores_scale), |
| 475 | EVENT_PTR(rapl_pkg_scale), |
| 476 | EVENT_PTR(rapl_ram_scale), |
| 477 | NULL, |
| 478 | }; |
| 479 | |
| 480 | static struct attribute *rapl_events_cln_attr[] = { |
| 481 | EVENT_PTR(rapl_cores), |
| 482 | EVENT_PTR(rapl_pkg), |
Stephane Eranian | f228c5b | 2014-01-08 11:15:53 +0100 | [diff] [blame] | 483 | EVENT_PTR(rapl_gpu), |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 484 | |
| 485 | EVENT_PTR(rapl_cores_unit), |
| 486 | EVENT_PTR(rapl_pkg_unit), |
Stephane Eranian | f228c5b | 2014-01-08 11:15:53 +0100 | [diff] [blame] | 487 | EVENT_PTR(rapl_gpu_unit), |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 488 | |
| 489 | EVENT_PTR(rapl_cores_scale), |
| 490 | EVENT_PTR(rapl_pkg_scale), |
Stephane Eranian | f228c5b | 2014-01-08 11:15:53 +0100 | [diff] [blame] | 491 | EVENT_PTR(rapl_gpu_scale), |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 492 | NULL, |
| 493 | }; |
| 494 | |
Vince Weaver | e69af46 | 2014-04-02 00:49:55 -0400 | [diff] [blame] | 495 | static struct attribute *rapl_events_hsw_attr[] = { |
| 496 | EVENT_PTR(rapl_cores), |
| 497 | EVENT_PTR(rapl_pkg), |
| 498 | EVENT_PTR(rapl_gpu), |
| 499 | EVENT_PTR(rapl_ram), |
| 500 | |
| 501 | EVENT_PTR(rapl_cores_unit), |
| 502 | EVENT_PTR(rapl_pkg_unit), |
| 503 | EVENT_PTR(rapl_gpu_unit), |
| 504 | EVENT_PTR(rapl_ram_unit), |
| 505 | |
| 506 | EVENT_PTR(rapl_cores_scale), |
| 507 | EVENT_PTR(rapl_pkg_scale), |
| 508 | EVENT_PTR(rapl_gpu_scale), |
| 509 | EVENT_PTR(rapl_ram_scale), |
| 510 | NULL, |
| 511 | }; |
| 512 | |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 513 | static struct attribute *rapl_events_skl_attr[] = { |
| 514 | EVENT_PTR(rapl_cores), |
| 515 | EVENT_PTR(rapl_pkg), |
| 516 | EVENT_PTR(rapl_gpu), |
| 517 | EVENT_PTR(rapl_ram), |
| 518 | EVENT_PTR(rapl_psys), |
| 519 | |
| 520 | EVENT_PTR(rapl_cores_unit), |
| 521 | EVENT_PTR(rapl_pkg_unit), |
| 522 | EVENT_PTR(rapl_gpu_unit), |
| 523 | EVENT_PTR(rapl_ram_unit), |
| 524 | EVENT_PTR(rapl_psys_unit), |
| 525 | |
| 526 | EVENT_PTR(rapl_cores_scale), |
| 527 | EVENT_PTR(rapl_pkg_scale), |
| 528 | EVENT_PTR(rapl_gpu_scale), |
| 529 | EVENT_PTR(rapl_ram_scale), |
| 530 | EVENT_PTR(rapl_psys_scale), |
| 531 | NULL, |
| 532 | }; |
| 533 | |
Dasaratharaman Chandramouli | 3a2a779 | 2015-05-26 11:47:39 -0700 | [diff] [blame] | 534 | static struct attribute *rapl_events_knl_attr[] = { |
| 535 | EVENT_PTR(rapl_pkg), |
| 536 | EVENT_PTR(rapl_ram), |
| 537 | |
| 538 | EVENT_PTR(rapl_pkg_unit), |
| 539 | EVENT_PTR(rapl_ram_unit), |
| 540 | |
| 541 | EVENT_PTR(rapl_pkg_scale), |
| 542 | EVENT_PTR(rapl_ram_scale), |
| 543 | NULL, |
| 544 | }; |
| 545 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 546 | static struct attribute_group rapl_pmu_events_group = { |
| 547 | .name = "events", |
| 548 | .attrs = NULL, /* patched at runtime */ |
| 549 | }; |
| 550 | |
| 551 | DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7"); |
| 552 | static struct attribute *rapl_formats_attr[] = { |
| 553 | &format_attr_event.attr, |
| 554 | NULL, |
| 555 | }; |
| 556 | |
| 557 | static struct attribute_group rapl_pmu_format_group = { |
| 558 | .name = "format", |
| 559 | .attrs = rapl_formats_attr, |
| 560 | }; |
| 561 | |
Colin Ian King | b45e4c4 | 2017-08-10 16:57:09 +0100 | [diff] [blame] | 562 | static const struct attribute_group *rapl_attr_groups[] = { |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 563 | &rapl_pmu_attr_group, |
| 564 | &rapl_pmu_format_group, |
| 565 | &rapl_pmu_events_group, |
| 566 | NULL, |
| 567 | }; |
| 568 | |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 569 | static int rapl_cpu_offline(unsigned int cpu) |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 570 | { |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 571 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); |
| 572 | int target; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 573 | |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 574 | /* Check if exiting cpu is used for collecting rapl events */ |
| 575 | if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask)) |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 576 | return 0; |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 577 | |
| 578 | pmu->cpu = -1; |
| 579 | /* Find a new cpu to collect rapl events */ |
| 580 | target = cpumask_any_but(topology_core_cpumask(cpu), cpu); |
| 581 | |
| 582 | /* Migrate rapl events to the new target */ |
| 583 | if (target < nr_cpu_ids) { |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 584 | cpumask_set_cpu(target, &rapl_cpu_mask); |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 585 | pmu->cpu = target; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 586 | perf_pmu_migrate_context(pmu->pmu, cpu, target); |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 587 | } |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 588 | return 0; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 589 | } |
| 590 | |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 591 | static int rapl_cpu_online(unsigned int cpu) |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 592 | { |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 593 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); |
| 594 | int target; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 595 | |
Thomas Gleixner | dd86e37 | 2017-01-31 23:58:38 +0100 | [diff] [blame] | 596 | if (!pmu) { |
| 597 | pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); |
| 598 | if (!pmu) |
| 599 | return -ENOMEM; |
| 600 | |
| 601 | raw_spin_lock_init(&pmu->lock); |
| 602 | INIT_LIST_HEAD(&pmu->active_list); |
| 603 | pmu->pmu = &rapl_pmus->pmu; |
| 604 | pmu->timer_interval = ms_to_ktime(rapl_timer_ms); |
| 605 | rapl_hrtimer_init(pmu); |
| 606 | |
| 607 | rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu; |
| 608 | } |
| 609 | |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 610 | /* |
| 611 | * Check if there is an online cpu in the package which collects rapl |
| 612 | * events already. |
| 613 | */ |
| 614 | target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu)); |
| 615 | if (target < nr_cpu_ids) |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 616 | return 0; |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 617 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 618 | cpumask_set_cpu(cpu, &rapl_cpu_mask); |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 619 | pmu->cpu = cpu; |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 620 | return 0; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 621 | } |
| 622 | |
Borislav Petkov | 7a86980 | 2016-03-08 17:40:41 +0100 | [diff] [blame] | 623 | static int rapl_check_hw_unit(bool apply_quirk) |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 624 | { |
| 625 | u64 msr_rapl_power_unit_bits; |
| 626 | int i; |
| 627 | |
| 628 | /* protect rdmsrl() to handle virtualization */ |
| 629 | if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits)) |
| 630 | return -1; |
| 631 | for (i = 0; i < NR_RAPL_DOMAINS; i++) |
| 632 | rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; |
| 633 | |
Borislav Petkov | 7a86980 | 2016-03-08 17:40:41 +0100 | [diff] [blame] | 634 | /* |
| 635 | * DRAM domain on HSW server and KNL has fixed energy unit which can be |
| 636 | * different than the unit from power unit MSR. See |
| 637 | * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2 |
| 638 | * of 2. Datasheet, September 2014, Reference Number: 330784-001 " |
| 639 | */ |
| 640 | if (apply_quirk) |
| 641 | rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16; |
Thomas Gleixner | 75c7003 | 2016-02-22 22:19:22 +0000 | [diff] [blame] | 642 | |
| 643 | /* |
| 644 | * Calculate the timer rate: |
| 645 | * Use reference of 200W for scaling the timeout to avoid counter |
| 646 | * overflows. 200W = 200 Joules/sec |
| 647 | * Divide interval by 2 to avoid lockstep (2 * 100) |
| 648 | * if hw unit is 32, then we use 2 ms 1/200/2 |
| 649 | */ |
| 650 | rapl_timer_ms = 2; |
| 651 | if (rapl_hw_unit[0] < 32) { |
| 652 | rapl_timer_ms = (1000 / (2 * 100)); |
| 653 | rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1)); |
| 654 | } |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 655 | return 0; |
| 656 | } |
| 657 | |
Thomas Gleixner | 512089d | 2016-02-22 22:19:23 +0000 | [diff] [blame] | 658 | static void __init rapl_advertise(void) |
| 659 | { |
| 660 | int i; |
| 661 | |
| 662 | pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n", |
| 663 | hweight32(rapl_cntr_mask), rapl_timer_ms); |
| 664 | |
| 665 | for (i = 0; i < NR_RAPL_DOMAINS; i++) { |
| 666 | if (rapl_cntr_mask & (1 << i)) { |
| 667 | pr_info("hw unit of domain %s 2^-%d Joules\n", |
| 668 | rapl_domain_names[i], rapl_hw_unit[i]); |
| 669 | } |
| 670 | } |
| 671 | } |
| 672 | |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 673 | static void cleanup_rapl_pmus(void) |
Thomas Gleixner | 55f2890 | 2016-02-22 22:19:21 +0000 | [diff] [blame] | 674 | { |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 675 | int i; |
Thomas Gleixner | 55f2890 | 2016-02-22 22:19:21 +0000 | [diff] [blame] | 676 | |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 677 | for (i = 0; i < rapl_pmus->maxpkg; i++) |
Vincent Stehlé | 275ae41 | 2016-05-24 16:53:49 +0200 | [diff] [blame] | 678 | kfree(rapl_pmus->pmus[i]); |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 679 | kfree(rapl_pmus); |
| 680 | } |
| 681 | |
| 682 | static int __init init_rapl_pmus(void) |
| 683 | { |
| 684 | int maxpkg = topology_max_packages(); |
| 685 | size_t size; |
| 686 | |
| 687 | size = sizeof(*rapl_pmus) + maxpkg * sizeof(struct rapl_pmu *); |
| 688 | rapl_pmus = kzalloc(size, GFP_KERNEL); |
| 689 | if (!rapl_pmus) |
| 690 | return -ENOMEM; |
| 691 | |
| 692 | rapl_pmus->maxpkg = maxpkg; |
| 693 | rapl_pmus->pmu.attr_groups = rapl_attr_groups; |
| 694 | rapl_pmus->pmu.task_ctx_nr = perf_invalid_context; |
| 695 | rapl_pmus->pmu.event_init = rapl_pmu_event_init; |
| 696 | rapl_pmus->pmu.add = rapl_pmu_event_add; |
| 697 | rapl_pmus->pmu.del = rapl_pmu_event_del; |
| 698 | rapl_pmus->pmu.start = rapl_pmu_event_start; |
| 699 | rapl_pmus->pmu.stop = rapl_pmu_event_stop; |
| 700 | rapl_pmus->pmu.read = rapl_pmu_event_read; |
David Carrillo-Cisneros | 74545f6 | 2016-12-22 17:17:40 -0800 | [diff] [blame] | 701 | rapl_pmus->pmu.module = THIS_MODULE; |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 702 | return 0; |
Thomas Gleixner | 55f2890 | 2016-02-22 22:19:21 +0000 | [diff] [blame] | 703 | } |
| 704 | |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 705 | #define X86_RAPL_MODEL_MATCH(model, init) \ |
| 706 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init } |
| 707 | |
| 708 | struct intel_rapl_init_fun { |
| 709 | bool apply_quirk; |
| 710 | int cntr_mask; |
| 711 | struct attribute **attrs; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 712 | }; |
| 713 | |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 714 | static const struct intel_rapl_init_fun snb_rapl_init __initconst = { |
| 715 | .apply_quirk = false, |
| 716 | .cntr_mask = RAPL_IDX_CLN, |
| 717 | .attrs = rapl_events_cln_attr, |
| 718 | }; |
| 719 | |
| 720 | static const struct intel_rapl_init_fun hsx_rapl_init __initconst = { |
| 721 | .apply_quirk = true, |
| 722 | .cntr_mask = RAPL_IDX_SRV, |
| 723 | .attrs = rapl_events_srv_attr, |
| 724 | }; |
| 725 | |
| 726 | static const struct intel_rapl_init_fun hsw_rapl_init __initconst = { |
| 727 | .apply_quirk = false, |
| 728 | .cntr_mask = RAPL_IDX_HSW, |
| 729 | .attrs = rapl_events_hsw_attr, |
| 730 | }; |
| 731 | |
| 732 | static const struct intel_rapl_init_fun snbep_rapl_init __initconst = { |
| 733 | .apply_quirk = false, |
| 734 | .cntr_mask = RAPL_IDX_SRV, |
| 735 | .attrs = rapl_events_srv_attr, |
| 736 | }; |
| 737 | |
| 738 | static const struct intel_rapl_init_fun knl_rapl_init __initconst = { |
| 739 | .apply_quirk = true, |
| 740 | .cntr_mask = RAPL_IDX_KNL, |
| 741 | .attrs = rapl_events_knl_attr, |
| 742 | }; |
| 743 | |
Srinivas Pandruvada | dcee75b | 2016-04-17 15:03:00 -0700 | [diff] [blame] | 744 | static const struct intel_rapl_init_fun skl_rapl_init __initconst = { |
| 745 | .apply_quirk = false, |
| 746 | .cntr_mask = RAPL_IDX_SKL_CLN, |
| 747 | .attrs = rapl_events_skl_attr, |
| 748 | }; |
| 749 | |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 750 | static const struct x86_cpu_id rapl_cpu_match[] __initconst = { |
Dave Hansen | 7f2236d | 2016-06-02 17:19:30 -0700 | [diff] [blame] | 751 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_rapl_init), |
| 752 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init), |
Peter Zijlstra | c416e5a | 2016-04-21 15:14:17 +0200 | [diff] [blame] | 753 | |
Dave Hansen | 7f2236d | 2016-06-02 17:19:30 -0700 | [diff] [blame] | 754 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, snb_rapl_init), |
| 755 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init), |
Peter Zijlstra | c416e5a | 2016-04-21 15:14:17 +0200 | [diff] [blame] | 756 | |
Dave Hansen | 7f2236d | 2016-06-02 17:19:30 -0700 | [diff] [blame] | 757 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init), |
Kan Liang | 1289e0e | 2018-01-11 11:15:43 -0800 | [diff] [blame] | 758 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsx_rapl_init), |
Dave Hansen | 7f2236d | 2016-06-02 17:19:30 -0700 | [diff] [blame] | 759 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init), |
| 760 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init), |
Peter Zijlstra | c416e5a | 2016-04-21 15:14:17 +0200 | [diff] [blame] | 761 | |
Dave Hansen | 7f2236d | 2016-06-02 17:19:30 -0700 | [diff] [blame] | 762 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init), |
| 763 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init), |
Vince Weaver | 33b88e7 | 2017-05-02 14:08:50 -0400 | [diff] [blame] | 764 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init), |
Kan Liang | 1289e0e | 2018-01-11 11:15:43 -0800 | [diff] [blame] | 765 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init), |
Peter Zijlstra | c416e5a | 2016-04-21 15:14:17 +0200 | [diff] [blame] | 766 | |
Dave Hansen | 7f2236d | 2016-06-02 17:19:30 -0700 | [diff] [blame] | 767 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init), |
Piotr Luc | 36c4b6c | 2016-10-12 20:27:25 +0200 | [diff] [blame] | 768 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init), |
Peter Zijlstra | c416e5a | 2016-04-21 15:14:17 +0200 | [diff] [blame] | 769 | |
Dave Hansen | 7f2236d | 2016-06-02 17:19:30 -0700 | [diff] [blame] | 770 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init), |
| 771 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init), |
Jacob Pan | 348c5ac | 2016-06-02 17:19:53 -0700 | [diff] [blame] | 772 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, hsx_rapl_init), |
Harry Pan | 2668c61 | 2016-09-08 17:08:57 +0800 | [diff] [blame] | 773 | |
Srinivas Pandruvada | f2029b1 | 2017-02-10 11:38:37 -0800 | [diff] [blame] | 774 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_rapl_init), |
| 775 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init), |
| 776 | |
Harry Pan | 490d03e | 2018-03-09 20:15:47 +0800 | [diff] [blame] | 777 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, skl_rapl_init), |
| 778 | |
Harry Pan | 2668c61 | 2016-09-08 17:08:57 +0800 | [diff] [blame] | 779 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init), |
Kan Liang | 450a978 | 2017-09-08 17:34:49 -0400 | [diff] [blame] | 780 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init), |
| 781 | |
| 782 | X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init), |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 783 | {}, |
| 784 | }; |
| 785 | |
| 786 | MODULE_DEVICE_TABLE(x86cpu, rapl_cpu_match); |
| 787 | |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 788 | static int __init rapl_pmu_init(void) |
| 789 | { |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 790 | const struct x86_cpu_id *id; |
| 791 | struct intel_rapl_init_fun *rapl_init; |
| 792 | bool apply_quirk; |
Thomas Gleixner | 7162b8f | 2016-02-22 22:19:24 +0000 | [diff] [blame] | 793 | int ret; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 794 | |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 795 | id = x86_match_cpu(rapl_cpu_match); |
| 796 | if (!id) |
Thomas Gleixner | 55f2890 | 2016-02-22 22:19:21 +0000 | [diff] [blame] | 797 | return -ENODEV; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 798 | |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 799 | rapl_init = (struct intel_rapl_init_fun *)id->driver_data; |
| 800 | apply_quirk = rapl_init->apply_quirk; |
| 801 | rapl_cntr_mask = rapl_init->cntr_mask; |
| 802 | rapl_pmu_events_group.attrs = rapl_init->attrs; |
Thomas Gleixner | 55f2890 | 2016-02-22 22:19:21 +0000 | [diff] [blame] | 803 | |
Borislav Petkov | 7a86980 | 2016-03-08 17:40:41 +0100 | [diff] [blame] | 804 | ret = rapl_check_hw_unit(apply_quirk); |
Jacob Pan | 6455239 | 2015-03-26 14:28:45 -0700 | [diff] [blame] | 805 | if (ret) |
| 806 | return ret; |
Srivatsa S. Bhat | fd537e5 | 2014-03-11 02:08:09 +0530 | [diff] [blame] | 807 | |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 808 | ret = init_rapl_pmus(); |
| 809 | if (ret) |
| 810 | return ret; |
| 811 | |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 812 | /* |
| 813 | * Install callbacks. Core will call them for each online cpu. |
| 814 | */ |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 815 | ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 816 | "perf/x86/rapl:online", |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 817 | rapl_cpu_online, rapl_cpu_offline); |
| 818 | if (ret) |
Thomas Gleixner | dd86e37 | 2017-01-31 23:58:38 +0100 | [diff] [blame] | 819 | goto out; |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 820 | |
Thomas Gleixner | 9de8d68 | 2016-02-22 22:19:26 +0000 | [diff] [blame] | 821 | ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); |
Thomas Gleixner | 512089d | 2016-02-22 22:19:23 +0000 | [diff] [blame] | 822 | if (ret) |
Thomas Gleixner | dd86e37 | 2017-01-31 23:58:38 +0100 | [diff] [blame] | 823 | goto out1; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 824 | |
Thomas Gleixner | 512089d | 2016-02-22 22:19:23 +0000 | [diff] [blame] | 825 | rapl_advertise(); |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 826 | return 0; |
Thomas Gleixner | 55f2890 | 2016-02-22 22:19:21 +0000 | [diff] [blame] | 827 | |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 828 | out1: |
Thomas Gleixner | dd86e37 | 2017-01-31 23:58:38 +0100 | [diff] [blame] | 829 | cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); |
Thomas Gleixner | 55f2890 | 2016-02-22 22:19:21 +0000 | [diff] [blame] | 830 | out: |
Thomas Gleixner | 512089d | 2016-02-22 22:19:23 +0000 | [diff] [blame] | 831 | pr_warn("Initialization failed (%d), disabled\n", ret); |
Thomas Gleixner | 55f2890 | 2016-02-22 22:19:21 +0000 | [diff] [blame] | 832 | cleanup_rapl_pmus(); |
Thomas Gleixner | 55f2890 | 2016-02-22 22:19:21 +0000 | [diff] [blame] | 833 | return ret; |
Stephane Eranian | 4788e5b | 2013-11-12 17:58:50 +0100 | [diff] [blame] | 834 | } |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 835 | module_init(rapl_pmu_init); |
| 836 | |
| 837 | static void __exit intel_rapl_exit(void) |
| 838 | { |
Richard Cochran | 8b5b773 | 2016-07-13 17:16:15 +0000 | [diff] [blame] | 839 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 840 | perf_pmu_unregister(&rapl_pmus->pmu); |
| 841 | cleanup_rapl_pmus(); |
Kan Liang | 4b6e257 | 2016-03-19 00:20:50 -0700 | [diff] [blame] | 842 | } |
| 843 | module_exit(intel_rapl_exit); |