blob: 19d646a783fd6c71373294fb8e16d626c2afe5fc [file] [log] [blame]
Paul Gortmakereb008eb2016-07-13 20:19:01 -04001#include <linux/module.h>
2
Kan Liange633c652016-03-20 01:33:36 -07003#include <asm/cpu_device_id.h>
Dave Hansena07301a2016-06-02 17:19:42 -07004#include <asm/intel-family.h>
Borislav Petkov6bcb2db2016-02-10 10:55:15 +01005#include "uncore.h"
Yan, Zheng087bfbb2012-06-15 14:31:34 +08006
7static struct intel_uncore_type *empty_uncore[] = { NULL, };
Yan, Zheng514b2342014-07-30 15:22:12 +08008struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
9struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
Yan, Zheng14371cc2012-06-15 14:31:36 +080010
Yan, Zheng514b2342014-07-30 15:22:12 +080011static bool pcidrv_registered;
12struct pci_driver *uncore_pci_driver;
13/* pci bus to socket mapping */
Taku Izumi712df652015-09-24 21:10:21 +090014DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
15struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +000016struct pci_extra_dev *uncore_extra_pci_dev;
17static int max_packages;
Yan, Zheng899396c2013-08-07 14:17:23 +080018
Yan, Zheng087bfbb2012-06-15 14:31:34 +080019/* mask of cpus that collect uncore events */
20static cpumask_t uncore_cpu_mask;
21
22/* constraint for the fixed counter */
Yan, Zheng514b2342014-07-30 15:22:12 +080023static struct event_constraint uncore_constraint_fixed =
Yan, Zheng087bfbb2012-06-15 14:31:34 +080024 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
Yan, Zheng514b2342014-07-30 15:22:12 +080025struct event_constraint uncore_constraint_empty =
Yan, Zheng6a679432012-07-04 14:00:15 +080026 EVENT_CONSTRAINT(0, 0, 0);
Yan, Zheng087bfbb2012-06-15 14:31:34 +080027
Kan Liange633c652016-03-20 01:33:36 -070028MODULE_LICENSE("GPL");
29
Thomas Gleixner1384c702016-02-22 22:19:13 +000030static int uncore_pcibus_to_physid(struct pci_bus *bus)
Taku Izumi712df652015-09-24 21:10:21 +090031{
32 struct pci2phy_map *map;
33 int phys_id = -1;
34
35 raw_spin_lock(&pci2phy_map_lock);
36 list_for_each_entry(map, &pci2phy_map_head, list) {
37 if (map->segment == pci_domain_nr(bus)) {
38 phys_id = map->pbus_to_physid[bus->number];
39 break;
40 }
41 }
42 raw_spin_unlock(&pci2phy_map_lock);
43
44 return phys_id;
45}
46
Thomas Gleixner4f089672016-02-22 22:19:09 +000047static void uncore_free_pcibus_map(void)
48{
49 struct pci2phy_map *map, *tmp;
50
51 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
52 list_del(&map->list);
53 kfree(map);
54 }
55}
56
Taku Izumi712df652015-09-24 21:10:21 +090057struct pci2phy_map *__find_pci2phy_map(int segment)
58{
59 struct pci2phy_map *map, *alloc = NULL;
60 int i;
61
62 lockdep_assert_held(&pci2phy_map_lock);
63
64lookup:
65 list_for_each_entry(map, &pci2phy_map_head, list) {
66 if (map->segment == segment)
67 goto end;
68 }
69
70 if (!alloc) {
71 raw_spin_unlock(&pci2phy_map_lock);
72 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
73 raw_spin_lock(&pci2phy_map_lock);
74
75 if (!alloc)
76 return NULL;
77
78 goto lookup;
79 }
80
81 map = alloc;
82 alloc = NULL;
83 map->segment = segment;
84 for (i = 0; i < 256; i++)
85 map->pbus_to_physid[i] = -1;
86 list_add_tail(&map->list, &pci2phy_map_head);
87
88end:
89 kfree(alloc);
90 return map;
91}
92
Yan, Zheng514b2342014-07-30 15:22:12 +080093ssize_t uncore_event_show(struct kobject *kobj,
94 struct kobj_attribute *attr, char *buf)
95{
96 struct uncore_event_desc *event =
97 container_of(attr, struct uncore_event_desc, attr);
98 return sprintf(buf, "%s", event->config);
99}
100
Yan, Zheng514b2342014-07-30 15:22:12 +0800101struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
Stephane Eranian001e4132014-02-11 16:20:11 +0100102{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000103 return pmu->boxes[topology_logical_package_id(cpu)];
Stephane Eranian001e4132014-02-11 16:20:11 +0100104}
105
Yan, Zheng514b2342014-07-30 15:22:12 +0800106u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng254298c2012-07-05 14:32:17 +0800107{
108 u64 count;
109
110 rdmsrl(event->hw.event_base, count);
111
112 return count;
113}
114
115/*
116 * generic get constraint function for shared match/mask registers.
117 */
Yan, Zheng514b2342014-07-30 15:22:12 +0800118struct event_constraint *
Yan, Zheng254298c2012-07-05 14:32:17 +0800119uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
120{
121 struct intel_uncore_extra_reg *er;
122 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
123 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
124 unsigned long flags;
125 bool ok = false;
126
127 /*
128 * reg->alloc can be set due to existing state, so for fake box we
129 * need to ignore this, otherwise we might fail to allocate proper
130 * fake state for this extra reg constraint.
131 */
132 if (reg1->idx == EXTRA_REG_NONE ||
133 (!uncore_box_is_fake(box) && reg1->alloc))
134 return NULL;
135
136 er = &box->shared_regs[reg1->idx];
137 raw_spin_lock_irqsave(&er->lock, flags);
138 if (!atomic_read(&er->ref) ||
139 (er->config1 == reg1->config && er->config2 == reg2->config)) {
140 atomic_inc(&er->ref);
141 er->config1 = reg1->config;
142 er->config2 = reg2->config;
143 ok = true;
144 }
145 raw_spin_unlock_irqrestore(&er->lock, flags);
146
147 if (ok) {
148 if (!uncore_box_is_fake(box))
149 reg1->alloc = 1;
150 return NULL;
151 }
152
Yan, Zheng514b2342014-07-30 15:22:12 +0800153 return &uncore_constraint_empty;
Yan, Zheng254298c2012-07-05 14:32:17 +0800154}
155
Yan, Zheng514b2342014-07-30 15:22:12 +0800156void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng254298c2012-07-05 14:32:17 +0800157{
158 struct intel_uncore_extra_reg *er;
159 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
160
161 /*
162 * Only put constraint if extra reg was actually allocated. Also
163 * takes care of event which do not use an extra shared reg.
164 *
165 * Also, if this is a fake box we shouldn't touch any event state
166 * (reg->alloc) and we don't care about leaving inconsistent box
167 * state either since it will be thrown out.
168 */
169 if (uncore_box_is_fake(box) || !reg1->alloc)
170 return;
171
172 er = &box->shared_regs[reg1->idx];
173 atomic_dec(&er->ref);
174 reg1->alloc = 0;
175}
176
Yan, Zheng514b2342014-07-30 15:22:12 +0800177u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
Yan, Zheng46bdd902013-04-16 19:51:06 +0800178{
179 struct intel_uncore_extra_reg *er;
180 unsigned long flags;
181 u64 config;
182
183 er = &box->shared_regs[idx];
184
185 raw_spin_lock_irqsave(&er->lock, flags);
186 config = er->config;
187 raw_spin_unlock_irqrestore(&er->lock, flags);
188
189 return config;
190}
191
Thomas Gleixner12297352016-02-22 22:19:12 +0000192static void uncore_assign_hw_event(struct intel_uncore_box *box,
193 struct perf_event *event, int idx)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800194{
195 struct hw_perf_event *hwc = &event->hw;
196
197 hwc->idx = idx;
198 hwc->last_tag = ++box->tags[idx];
199
200 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
Yan, Zheng14371cc2012-06-15 14:31:36 +0800201 hwc->event_base = uncore_fixed_ctr(box);
202 hwc->config_base = uncore_fixed_ctl(box);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800203 return;
204 }
205
Yan, Zheng14371cc2012-06-15 14:31:36 +0800206 hwc->config_base = uncore_event_ctl(box, hwc->idx);
207 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800208}
209
Yan, Zheng514b2342014-07-30 15:22:12 +0800210void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800211{
212 u64 prev_count, new_count, delta;
213 int shift;
214
215 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
216 shift = 64 - uncore_fixed_ctr_bits(box);
217 else
218 shift = 64 - uncore_perf_ctr_bits(box);
219
220 /* the hrtimer might modify the previous event value */
221again:
222 prev_count = local64_read(&event->hw.prev_count);
223 new_count = uncore_read_counter(box, event);
224 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
225 goto again;
226
227 delta = (new_count << shift) - (prev_count << shift);
228 delta >>= shift;
229
230 local64_add(delta, &event->count);
231}
232
233/*
234 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
235 * for SandyBridge. So we use hrtimer to periodically poll the counter
236 * to avoid overflow.
237 */
238static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
239{
240 struct intel_uncore_box *box;
Stephane Eranianced2efb2014-02-11 16:20:13 +0100241 struct perf_event *event;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800242 unsigned long flags;
243 int bit;
244
245 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
246 if (!box->n_active || box->cpu != smp_processor_id())
247 return HRTIMER_NORESTART;
248 /*
249 * disable local interrupt to prevent uncore_pmu_event_start/stop
250 * to interrupt the update process
251 */
252 local_irq_save(flags);
253
Stephane Eranianced2efb2014-02-11 16:20:13 +0100254 /*
255 * handle boxes with an active event list as opposed to active
256 * counters
257 */
258 list_for_each_entry(event, &box->active_list, active_entry) {
259 uncore_perf_event_update(box, event);
260 }
261
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800262 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
263 uncore_perf_event_update(box, box->events[bit]);
264
265 local_irq_restore(flags);
266
Stephane Eranian79859cc2014-02-11 16:20:10 +0100267 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800268 return HRTIMER_RESTART;
269}
270
Yan, Zheng514b2342014-07-30 15:22:12 +0800271void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800272{
Thomas Gleixner576b0702015-04-14 21:09:01 +0000273 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
274 HRTIMER_MODE_REL_PINNED);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800275}
276
Yan, Zheng514b2342014-07-30 15:22:12 +0800277void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800278{
279 hrtimer_cancel(&box->hrtimer);
280}
281
282static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
283{
284 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
285 box->hrtimer.function = uncore_pmu_hrtimer;
286}
287
Thomas Gleixner12297352016-02-22 22:19:12 +0000288static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
289 int node)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800290{
Thomas Gleixner12297352016-02-22 22:19:12 +0000291 int i, size, numshared = type->num_shared_regs ;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800292 struct intel_uncore_box *box;
293
Thomas Gleixner12297352016-02-22 22:19:12 +0000294 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
Yan, Zheng6a679432012-07-04 14:00:15 +0800295
Yan, Zheng73c44272013-09-17 14:48:13 +0800296 box = kzalloc_node(size, GFP_KERNEL, node);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800297 if (!box)
298 return NULL;
299
Thomas Gleixner12297352016-02-22 22:19:12 +0000300 for (i = 0; i < numshared; i++)
Yan, Zheng6a679432012-07-04 14:00:15 +0800301 raw_spin_lock_init(&box->shared_regs[i].lock);
302
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800303 uncore_pmu_init_hrtimer(box);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800304 box->cpu = -1;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000305 box->pci_phys_id = -1;
306 box->pkgid = -1;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800307
Stephane Eranian79859cc2014-02-11 16:20:10 +0100308 /* set default hrtimer timeout */
309 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
310
Stephane Eranianced2efb2014-02-11 16:20:13 +0100311 INIT_LIST_HEAD(&box->active_list);
312
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800313 return box;
314}
315
Jiri Olsaaf915682014-12-10 21:23:50 +0100316/*
317 * Using uncore_pmu_event_init pmu event_init callback
318 * as a detection point for uncore events.
319 */
320static int uncore_pmu_event_init(struct perf_event *event);
321
Peter Zijlstra033ac602016-11-18 13:53:54 +0100322static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
Jiri Olsaaf915682014-12-10 21:23:50 +0100323{
Peter Zijlstra033ac602016-11-18 13:53:54 +0100324 return &box->pmu->pmu == event->pmu;
Jiri Olsaaf915682014-12-10 21:23:50 +0100325}
326
Yan, Zheng254298c2012-07-05 14:32:17 +0800327static int
Thomas Gleixner12297352016-02-22 22:19:12 +0000328uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
329 bool dogrp)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800330{
331 struct perf_event *event;
332 int n, max_count;
333
334 max_count = box->pmu->type->num_counters;
335 if (box->pmu->type->fixed_ctl)
336 max_count++;
337
338 if (box->n_events >= max_count)
339 return -EINVAL;
340
341 n = box->n_events;
Jiri Olsaaf915682014-12-10 21:23:50 +0100342
Peter Zijlstra033ac602016-11-18 13:53:54 +0100343 if (is_box_event(box, leader)) {
Jiri Olsaaf915682014-12-10 21:23:50 +0100344 box->event_list[n] = leader;
345 n++;
346 }
347
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800348 if (!dogrp)
349 return n;
350
351 list_for_each_entry(event, &leader->sibling_list, group_entry) {
Peter Zijlstra033ac602016-11-18 13:53:54 +0100352 if (!is_box_event(box, event) ||
Jiri Olsaaf915682014-12-10 21:23:50 +0100353 event->state <= PERF_EVENT_STATE_OFF)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800354 continue;
355
356 if (n >= max_count)
357 return -EINVAL;
358
359 box->event_list[n] = event;
360 n++;
361 }
362 return n;
363}
364
365static struct event_constraint *
Yan, Zheng254298c2012-07-05 14:32:17 +0800366uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800367{
Yan, Zheng6a679432012-07-04 14:00:15 +0800368 struct intel_uncore_type *type = box->pmu->type;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800369 struct event_constraint *c;
370
Yan, Zheng6a679432012-07-04 14:00:15 +0800371 if (type->ops->get_constraint) {
372 c = type->ops->get_constraint(box, event);
373 if (c)
374 return c;
375 }
376
Stephane Eraniandbc33f72013-09-09 12:53:50 -0700377 if (event->attr.config == UNCORE_FIXED_EVENT)
Yan, Zheng514b2342014-07-30 15:22:12 +0800378 return &uncore_constraint_fixed;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800379
380 if (type->constraints) {
381 for_each_event_constraint(c, type->constraints) {
382 if ((event->hw.config & c->cmask) == c->code)
383 return c;
384 }
385 }
386
387 return &type->unconstrainted;
388}
389
Thomas Gleixner12297352016-02-22 22:19:12 +0000390static void uncore_put_event_constraint(struct intel_uncore_box *box,
391 struct perf_event *event)
Yan, Zheng6a679432012-07-04 14:00:15 +0800392{
393 if (box->pmu->type->ops->put_constraint)
394 box->pmu->type->ops->put_constraint(box, event);
395}
396
Yan, Zheng254298c2012-07-05 14:32:17 +0800397static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800398{
399 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
Andrew Hunter43b457802013-05-23 11:07:03 -0700400 struct event_constraint *c;
Yan, Zheng6a679432012-07-04 14:00:15 +0800401 int i, wmin, wmax, ret = 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800402 struct hw_perf_event *hwc;
403
404 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
405
406 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
Yan, Zheng6a679432012-07-04 14:00:15 +0800407 c = uncore_get_event_constraint(box, box->event_list[i]);
Peter Zijlstrab371b592015-05-21 10:57:13 +0200408 box->event_constraint[i] = c;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800409 wmin = min(wmin, c->weight);
410 wmax = max(wmax, c->weight);
411 }
412
413 /* fastpath, try to reuse previous register */
414 for (i = 0; i < n; i++) {
415 hwc = &box->event_list[i]->hw;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200416 c = box->event_constraint[i];
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800417
418 /* never assigned */
419 if (hwc->idx == -1)
420 break;
421
422 /* constraint still honored */
423 if (!test_bit(hwc->idx, c->idxmsk))
424 break;
425
426 /* not already used */
427 if (test_bit(hwc->idx, used_mask))
428 break;
429
430 __set_bit(hwc->idx, used_mask);
Yan, Zheng6a679432012-07-04 14:00:15 +0800431 if (assign)
432 assign[i] = hwc->idx;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800433 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800434 /* slow path */
Yan, Zheng6a679432012-07-04 14:00:15 +0800435 if (i != n)
Peter Zijlstrab371b592015-05-21 10:57:13 +0200436 ret = perf_assign_events(box->event_constraint, n,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200437 wmin, wmax, n, assign);
Yan, Zheng6a679432012-07-04 14:00:15 +0800438
439 if (!assign || ret) {
440 for (i = 0; i < n; i++)
441 uncore_put_event_constraint(box, box->event_list[i]);
442 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800443 return ret ? -EINVAL : 0;
444}
445
446static void uncore_pmu_event_start(struct perf_event *event, int flags)
447{
448 struct intel_uncore_box *box = uncore_event_to_box(event);
449 int idx = event->hw.idx;
450
451 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
452 return;
453
454 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
455 return;
456
457 event->hw.state = 0;
458 box->events[idx] = event;
459 box->n_active++;
460 __set_bit(idx, box->active_mask);
461
462 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
463 uncore_enable_event(box, event);
464
465 if (box->n_active == 1) {
466 uncore_enable_box(box);
467 uncore_pmu_start_hrtimer(box);
468 }
469}
470
471static void uncore_pmu_event_stop(struct perf_event *event, int flags)
472{
473 struct intel_uncore_box *box = uncore_event_to_box(event);
474 struct hw_perf_event *hwc = &event->hw;
475
476 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
477 uncore_disable_event(box, event);
478 box->n_active--;
479 box->events[hwc->idx] = NULL;
480 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
481 hwc->state |= PERF_HES_STOPPED;
482
483 if (box->n_active == 0) {
484 uncore_disable_box(box);
485 uncore_pmu_cancel_hrtimer(box);
486 }
487 }
488
489 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
490 /*
491 * Drain the remaining delta count out of a event
492 * that we are disabling:
493 */
494 uncore_perf_event_update(box, event);
495 hwc->state |= PERF_HES_UPTODATE;
496 }
497}
498
499static int uncore_pmu_event_add(struct perf_event *event, int flags)
500{
501 struct intel_uncore_box *box = uncore_event_to_box(event);
502 struct hw_perf_event *hwc = &event->hw;
503 int assign[UNCORE_PMC_IDX_MAX];
504 int i, n, ret;
505
506 if (!box)
507 return -ENODEV;
508
509 ret = n = uncore_collect_events(box, event, false);
510 if (ret < 0)
511 return ret;
512
513 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
514 if (!(flags & PERF_EF_START))
515 hwc->state |= PERF_HES_ARCH;
516
517 ret = uncore_assign_events(box, assign, n);
518 if (ret)
519 return ret;
520
521 /* save events moving to new counters */
522 for (i = 0; i < box->n_events; i++) {
523 event = box->event_list[i];
524 hwc = &event->hw;
525
526 if (hwc->idx == assign[i] &&
527 hwc->last_tag == box->tags[assign[i]])
528 continue;
529 /*
530 * Ensure we don't accidentally enable a stopped
531 * counter simply because we rescheduled.
532 */
533 if (hwc->state & PERF_HES_STOPPED)
534 hwc->state |= PERF_HES_ARCH;
535
536 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
537 }
538
539 /* reprogram moved events into new counters */
540 for (i = 0; i < n; i++) {
541 event = box->event_list[i];
542 hwc = &event->hw;
543
544 if (hwc->idx != assign[i] ||
545 hwc->last_tag != box->tags[assign[i]])
546 uncore_assign_hw_event(box, event, assign[i]);
547 else if (i < box->n_events)
548 continue;
549
550 if (hwc->state & PERF_HES_ARCH)
551 continue;
552
553 uncore_pmu_event_start(event, 0);
554 }
555 box->n_events = n;
556
557 return 0;
558}
559
560static void uncore_pmu_event_del(struct perf_event *event, int flags)
561{
562 struct intel_uncore_box *box = uncore_event_to_box(event);
563 int i;
564
565 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
566
567 for (i = 0; i < box->n_events; i++) {
568 if (event == box->event_list[i]) {
Yan, Zheng6a679432012-07-04 14:00:15 +0800569 uncore_put_event_constraint(box, event);
570
Thomas Gleixner12297352016-02-22 22:19:12 +0000571 for (++i; i < box->n_events; i++)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800572 box->event_list[i - 1] = box->event_list[i];
573
574 --box->n_events;
575 break;
576 }
577 }
578
579 event->hw.idx = -1;
580 event->hw.last_tag = ~0ULL;
581}
582
Yan, Zheng514b2342014-07-30 15:22:12 +0800583void uncore_pmu_event_read(struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800584{
585 struct intel_uncore_box *box = uncore_event_to_box(event);
586 uncore_perf_event_update(box, event);
587}
588
589/*
590 * validation ensures the group can be loaded onto the
591 * PMU if it was the only group available.
592 */
593static int uncore_validate_group(struct intel_uncore_pmu *pmu,
594 struct perf_event *event)
595{
596 struct perf_event *leader = event->group_leader;
597 struct intel_uncore_box *fake_box;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800598 int ret = -EINVAL, n;
599
Yan, Zheng73c44272013-09-17 14:48:13 +0800600 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800601 if (!fake_box)
602 return -ENOMEM;
603
604 fake_box->pmu = pmu;
605 /*
606 * the event is not yet connected with its
607 * siblings therefore we must first collect
608 * existing siblings, then add the new event
609 * before we can simulate the scheduling
610 */
611 n = uncore_collect_events(fake_box, leader, true);
612 if (n < 0)
613 goto out;
614
615 fake_box->n_events = n;
616 n = uncore_collect_events(fake_box, event, false);
617 if (n < 0)
618 goto out;
619
620 fake_box->n_events = n;
621
Yan, Zheng6a679432012-07-04 14:00:15 +0800622 ret = uncore_assign_events(fake_box, NULL, n);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800623out:
624 kfree(fake_box);
625 return ret;
626}
627
Yan, Zheng46bdd902013-04-16 19:51:06 +0800628static int uncore_pmu_event_init(struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800629{
630 struct intel_uncore_pmu *pmu;
631 struct intel_uncore_box *box;
632 struct hw_perf_event *hwc = &event->hw;
633 int ret;
634
635 if (event->attr.type != event->pmu->type)
636 return -ENOENT;
637
638 pmu = uncore_event_to_pmu(event);
639 /* no device found for this pmu */
640 if (pmu->func_id < 0)
641 return -ENOENT;
642
643 /*
644 * Uncore PMU does measure at all privilege level all the time.
645 * So it doesn't make sense to specify any exclude bits.
646 */
647 if (event->attr.exclude_user || event->attr.exclude_kernel ||
648 event->attr.exclude_hv || event->attr.exclude_idle)
649 return -EINVAL;
650
651 /* Sampling not supported yet */
652 if (hwc->sample_period)
653 return -EINVAL;
654
655 /*
656 * Place all uncore events for a particular physical package
657 * onto a single cpu
658 */
659 if (event->cpu < 0)
660 return -EINVAL;
661 box = uncore_pmu_to_box(pmu, event->cpu);
662 if (!box || box->cpu < 0)
663 return -EINVAL;
664 event->cpu = box->cpu;
Thomas Gleixner1f2569f2016-02-22 22:19:14 +0000665 event->pmu_private = box;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800666
David Carrillo-Cisnerose64cd6f2016-08-17 13:55:07 -0700667 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
668
Yan, Zheng6a679432012-07-04 14:00:15 +0800669 event->hw.idx = -1;
670 event->hw.last_tag = ~0ULL;
671 event->hw.extra_reg.idx = EXTRA_REG_NONE;
Yan, Zhengebb6cc02012-08-06 13:11:21 +0800672 event->hw.branch_reg.idx = EXTRA_REG_NONE;
Yan, Zheng6a679432012-07-04 14:00:15 +0800673
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800674 if (event->attr.config == UNCORE_FIXED_EVENT) {
675 /* no fixed counter */
676 if (!pmu->type->fixed_ctl)
677 return -EINVAL;
678 /*
679 * if there is only one fixed counter, only the first pmu
680 * can access the fixed counter
681 */
682 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
683 return -EINVAL;
Stephane Eraniandbc33f72013-09-09 12:53:50 -0700684
685 /* fixed counters have event field hardcoded to zero */
686 hwc->config = 0ULL;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800687 } else {
Kan Liangcd34cd92016-08-16 16:09:50 -0400688 hwc->config = event->attr.config &
689 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
Yan, Zheng6a679432012-07-04 14:00:15 +0800690 if (pmu->type->ops->hw_config) {
691 ret = pmu->type->ops->hw_config(box, event);
692 if (ret)
693 return ret;
694 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800695 }
696
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800697 if (event->group_leader != event)
698 ret = uncore_validate_group(pmu, event);
699 else
700 ret = 0;
701
702 return ret;
703}
704
Yan, Zheng314d9f62012-09-10 15:53:49 +0800705static ssize_t uncore_get_attr_cpumask(struct device *dev,
706 struct device_attribute *attr, char *buf)
707{
Sudeep Holla5aaba362014-09-30 14:48:22 +0100708 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
Yan, Zheng314d9f62012-09-10 15:53:49 +0800709}
710
711static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
712
713static struct attribute *uncore_pmu_attrs[] = {
714 &dev_attr_cpumask.attr,
715 NULL,
716};
717
718static struct attribute_group uncore_pmu_attr_group = {
719 .attrs = uncore_pmu_attrs,
720};
721
Andi Kleena08b6762014-08-29 10:20:58 -0700722static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800723{
724 int ret;
725
Stephane Eraniand64b25b2014-02-11 16:20:08 +0100726 if (!pmu->type->pmu) {
727 pmu->pmu = (struct pmu) {
728 .attr_groups = pmu->type->attr_groups,
729 .task_ctx_nr = perf_invalid_context,
730 .event_init = uncore_pmu_event_init,
731 .add = uncore_pmu_event_add,
732 .del = uncore_pmu_event_del,
733 .start = uncore_pmu_event_start,
734 .stop = uncore_pmu_event_stop,
735 .read = uncore_pmu_event_read,
736 };
737 } else {
738 pmu->pmu = *pmu->type->pmu;
739 pmu->pmu.attr_groups = pmu->type->attr_groups;
740 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800741
742 if (pmu->type->num_boxes == 1) {
743 if (strlen(pmu->type->name) > 0)
744 sprintf(pmu->name, "uncore_%s", pmu->type->name);
745 else
746 sprintf(pmu->name, "uncore");
747 } else {
748 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
749 pmu->pmu_idx);
750 }
751
752 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
Thomas Gleixner4f089672016-02-22 22:19:09 +0000753 if (!ret)
754 pmu->registered = true;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800755 return ret;
756}
757
Thomas Gleixner4f089672016-02-22 22:19:09 +0000758static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
759{
760 if (!pmu->registered)
761 return;
762 perf_pmu_unregister(&pmu->pmu);
763 pmu->registered = false;
764}
765
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000766static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
767{
768 int pkg;
769
770 for (pkg = 0; pkg < max_packages; pkg++)
771 kfree(pmu->boxes[pkg]);
772 kfree(pmu->boxes);
773}
774
Kan Liange633c652016-03-20 01:33:36 -0700775static void uncore_type_exit(struct intel_uncore_type *type)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800776{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000777 struct intel_uncore_pmu *pmu = type->pmus;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800778 int i;
779
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000780 if (pmu) {
781 for (i = 0; i < type->num_boxes; i++, pmu++) {
782 uncore_pmu_unregister(pmu);
783 uncore_free_boxes(pmu);
Thomas Gleixner4f089672016-02-22 22:19:09 +0000784 }
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000785 kfree(type->pmus);
786 type->pmus = NULL;
787 }
Yan, Zheng314d9f62012-09-10 15:53:49 +0800788 kfree(type->events_group);
789 type->events_group = NULL;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800790}
791
Kan Liange633c652016-03-20 01:33:36 -0700792static void uncore_types_exit(struct intel_uncore_type **types)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800793{
Thomas Gleixner12297352016-02-22 22:19:12 +0000794 for (; *types; types++)
795 uncore_type_exit(*types);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800796}
797
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000798static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800799{
800 struct intel_uncore_pmu *pmus;
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200801 struct attribute_group *attr_group;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800802 struct attribute **attrs;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000803 size_t size;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800804 int i, j;
805
806 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
807 if (!pmus)
808 return -ENOMEM;
809
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000810 size = max_packages * sizeof(struct intel_uncore_box *);
Dave Jonesb7b48392014-03-06 12:20:28 -0500811
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000812 for (i = 0; i < type->num_boxes; i++) {
813 pmus[i].func_id = setid ? i : -1;
814 pmus[i].pmu_idx = i;
815 pmus[i].type = type;
816 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
817 if (!pmus[i].boxes)
818 return -ENOMEM;
819 }
820
821 type->pmus = pmus;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800822 type->unconstrainted = (struct event_constraint)
823 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
Stephane Eranian9fac2cf2013-01-24 16:10:27 +0100824 0, type->num_counters, 0, 0);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800825
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800826 if (type->event_descs) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000827 for (i = 0; type->event_descs[i].attr.attr.name; i++);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800828
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200829 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
830 sizeof(*attr_group), GFP_KERNEL);
831 if (!attr_group)
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000832 return -ENOMEM;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800833
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200834 attrs = (struct attribute **)(attr_group + 1);
835 attr_group->name = "events";
836 attr_group->attrs = attrs;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800837
838 for (j = 0; j < i; j++)
839 attrs[j] = &type->event_descs[j].attr.attr;
840
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200841 type->events_group = attr_group;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800842 }
843
Yan, Zheng314d9f62012-09-10 15:53:49 +0800844 type->pmu_group = &uncore_pmu_attr_group;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800845 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800846}
847
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000848static int __init
849uncore_types_init(struct intel_uncore_type **types, bool setid)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800850{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000851 int ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800852
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000853 for (; *types; types++) {
854 ret = uncore_type_init(*types, setid);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800855 if (ret)
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000856 return ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800857 }
858 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800859}
860
Yan, Zheng14371cc2012-06-15 14:31:36 +0800861/*
862 * add a pci uncore device
863 */
Yan, Zheng899396c2013-08-07 14:17:23 +0800864static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800865{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000866 struct intel_uncore_type *type;
Kan Lianga54fa072016-05-15 23:18:24 -0700867 struct intel_uncore_pmu *pmu = NULL;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800868 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000869 int phys_id, pkg, ret;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800870
Taku Izumi712df652015-09-24 21:10:21 +0900871 phys_id = uncore_pcibus_to_physid(pdev->bus);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000872 if (phys_id < 0)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800873 return -ENODEV;
874
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000875 pkg = topology_phys_to_logical_pkg(phys_id);
Jiri Olsaef3f00a2016-05-18 08:16:10 +0200876 if (pkg < 0)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000877 return -EINVAL;
878
Yan, Zheng899396c2013-08-07 14:17:23 +0800879 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
Yan, Zheng514b2342014-07-30 15:22:12 +0800880 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000881
882 uncore_extra_pci_dev[pkg].dev[idx] = pdev;
Yan, Zheng899396c2013-08-07 14:17:23 +0800883 pci_set_drvdata(pdev, NULL);
884 return 0;
885 }
886
Yan, Zheng514b2342014-07-30 15:22:12 +0800887 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
Kan Lianga54fa072016-05-15 23:18:24 -0700888
Yan, Zheng14371cc2012-06-15 14:31:36 +0800889 /*
Kan Lianga54fa072016-05-15 23:18:24 -0700890 * Some platforms, e.g. Knights Landing, use a common PCI device ID
891 * for multiple instances of an uncore PMU device type. We should check
892 * PCI slot and func to indicate the uncore box.
Yan, Zheng14371cc2012-06-15 14:31:36 +0800893 */
Kan Lianga54fa072016-05-15 23:18:24 -0700894 if (id->driver_data & ~0xffff) {
895 struct pci_driver *pci_drv = pdev->driver;
896 const struct pci_device_id *ids = pci_drv->id_table;
897 unsigned int devfn;
898
899 while (ids && ids->vendor) {
900 if ((ids->vendor == pdev->vendor) &&
901 (ids->device == pdev->device)) {
902 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
903 UNCORE_PCI_DEV_FUNC(ids->driver_data));
904 if (devfn == pdev->devfn) {
905 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
906 break;
907 }
908 }
909 ids++;
910 }
911 if (pmu == NULL)
912 return -ENODEV;
913 } else {
914 /*
915 * for performance monitoring unit with multiple boxes,
916 * each box has a different function id.
917 */
918 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
Thomas Gleixner12297352016-02-22 22:19:12 +0000919 }
920
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000921 if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
922 return -EINVAL;
923
924 box = uncore_alloc_box(type, NUMA_NO_NODE);
925 if (!box)
926 return -ENOMEM;
927
Yan, Zheng899396c2013-08-07 14:17:23 +0800928 if (pmu->func_id < 0)
929 pmu->func_id = pdev->devfn;
930 else
931 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800932
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000933 atomic_inc(&box->refcnt);
934 box->pci_phys_id = phys_id;
935 box->pkgid = pkg;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800936 box->pci_dev = pdev;
937 box->pmu = pmu;
Ingo Molnar15c12472015-06-09 11:40:28 +0200938 uncore_box_init(box);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800939 pci_set_drvdata(pdev, box);
940
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000941 pmu->boxes[pkg] = box;
942 if (atomic_inc_return(&pmu->activeboxes) > 1)
Thomas Gleixner4f089672016-02-22 22:19:09 +0000943 return 0;
944
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000945 /* First active box registers the pmu */
Thomas Gleixner4f089672016-02-22 22:19:09 +0000946 ret = uncore_pmu_register(pmu);
947 if (ret) {
948 pci_set_drvdata(pdev, NULL);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000949 pmu->boxes[pkg] = NULL;
Thomas Gleixnera46195f2016-02-22 22:19:11 +0000950 uncore_box_exit(box);
Thomas Gleixner4f089672016-02-22 22:19:09 +0000951 kfree(box);
952 }
953 return ret;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800954}
955
Robert Richter357398e2012-06-20 18:39:27 +0200956static void uncore_pci_remove(struct pci_dev *pdev)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800957{
Bjorn Helgaas281ee052016-05-31 16:25:27 -0500958 struct intel_uncore_box *box;
Yan, Zheng899396c2013-08-07 14:17:23 +0800959 struct intel_uncore_pmu *pmu;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000960 int i, phys_id, pkg;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800961
Taku Izumi712df652015-09-24 21:10:21 +0900962 phys_id = uncore_pcibus_to_physid(pdev->bus);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000963 pkg = topology_phys_to_logical_pkg(phys_id);
964
Yan, Zheng899396c2013-08-07 14:17:23 +0800965 box = pci_get_drvdata(pdev);
966 if (!box) {
967 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000968 if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
969 uncore_extra_pci_dev[pkg].dev[i] = NULL;
Yan, Zheng899396c2013-08-07 14:17:23 +0800970 break;
971 }
972 }
973 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
974 return;
975 }
976
977 pmu = box->pmu;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000978 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
Yan, Zheng14371cc2012-06-15 14:31:36 +0800979 return;
980
Yan, Zhenge850f9c2013-04-16 19:51:07 +0800981 pci_set_drvdata(pdev, NULL);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000982 pmu->boxes[pkg] = NULL;
983 if (atomic_dec_return(&pmu->activeboxes) == 0)
984 uncore_pmu_unregister(pmu);
Thomas Gleixnera46195f2016-02-22 22:19:11 +0000985 uncore_box_exit(box);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800986 kfree(box);
987}
988
Yan, Zheng14371cc2012-06-15 14:31:36 +0800989static int __init uncore_pci_init(void)
990{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000991 size_t size;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800992 int ret;
993
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000994 size = max_packages * sizeof(struct pci_extra_dev);
995 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
996 if (!uncore_extra_pci_dev) {
997 ret = -ENOMEM;
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000998 goto err;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000999 }
1000
1001 ret = uncore_types_init(uncore_pci_uncores, false);
1002 if (ret)
1003 goto errtype;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001004
1005 uncore_pci_driver->probe = uncore_pci_probe;
1006 uncore_pci_driver->remove = uncore_pci_remove;
1007
1008 ret = pci_register_driver(uncore_pci_driver);
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001009 if (ret)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001010 goto errtype;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001011
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001012 pcidrv_registered = true;
1013 return 0;
1014
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001015errtype:
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001016 uncore_types_exit(uncore_pci_uncores);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001017 kfree(uncore_extra_pci_dev);
1018 uncore_extra_pci_dev = NULL;
Thomas Gleixner4f089672016-02-22 22:19:09 +00001019 uncore_free_pcibus_map();
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001020err:
1021 uncore_pci_uncores = empty_uncore;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001022 return ret;
1023}
1024
Kan Liange633c652016-03-20 01:33:36 -07001025static void uncore_pci_exit(void)
Yan, Zheng14371cc2012-06-15 14:31:36 +08001026{
1027 if (pcidrv_registered) {
1028 pcidrv_registered = false;
1029 pci_unregister_driver(uncore_pci_driver);
Yan, Zheng514b2342014-07-30 15:22:12 +08001030 uncore_types_exit(uncore_pci_uncores);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001031 kfree(uncore_extra_pci_dev);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001032 uncore_free_pcibus_map();
Yan, Zheng14371cc2012-06-15 14:31:36 +08001033 }
1034}
1035
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001036static int uncore_cpu_dying(unsigned int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001037{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001038 struct intel_uncore_type *type, **types = uncore_msr_uncores;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001039 struct intel_uncore_pmu *pmu;
1040 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001041 int i, pkg;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001042
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001043 pkg = topology_logical_package_id(cpu);
1044 for (; *types; types++) {
1045 type = *types;
1046 pmu = type->pmus;
1047 for (i = 0; i < type->num_boxes; i++, pmu++) {
1048 box = pmu->boxes[pkg];
1049 if (box && atomic_dec_return(&box->refcnt) == 0)
Thomas Gleixnera46195f2016-02-22 22:19:11 +00001050 uncore_box_exit(box);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001051 }
1052 }
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001053 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001054}
1055
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001056static int uncore_cpu_starting(unsigned int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001057{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001058 struct intel_uncore_type *type, **types = uncore_msr_uncores;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001059 struct intel_uncore_pmu *pmu;
1060 struct intel_uncore_box *box;
Thomas Gleixnerbebb9d72017-01-31 23:58:39 +01001061 int i, pkg;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001062
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001063 pkg = topology_logical_package_id(cpu);
1064 for (; *types; types++) {
1065 type = *types;
1066 pmu = type->pmus;
1067 for (i = 0; i < type->num_boxes; i++, pmu++) {
1068 box = pmu->boxes[pkg];
1069 if (!box)
1070 continue;
1071 /* The first cpu on a package activates the box */
Thomas Gleixnerbebb9d72017-01-31 23:58:39 +01001072 if (atomic_inc_return(&box->refcnt) == 1)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001073 uncore_box_init(box);
1074 }
1075 }
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001076
1077 return 0;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001078}
1079
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001080static int uncore_cpu_prepare(unsigned int cpu)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001081{
1082 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1083 struct intel_uncore_pmu *pmu;
1084 struct intel_uncore_box *box;
1085 int i, pkg;
1086
1087 pkg = topology_logical_package_id(cpu);
1088 for (; *types; types++) {
1089 type = *types;
1090 pmu = type->pmus;
1091 for (i = 0; i < type->num_boxes; i++, pmu++) {
1092 if (pmu->boxes[pkg])
1093 continue;
1094 /* First cpu of a package allocates the box */
Yan, Zheng73c44272013-09-17 14:48:13 +08001095 box = uncore_alloc_box(type, cpu_to_node(cpu));
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001096 if (!box)
1097 return -ENOMEM;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001098 box->pmu = pmu;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001099 box->pkgid = pkg;
1100 pmu->boxes[pkg] = box;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001101 }
1102 }
1103 return 0;
1104}
1105
Thomas Gleixner12297352016-02-22 22:19:12 +00001106static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1107 int new_cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001108{
Thomas Gleixner12297352016-02-22 22:19:12 +00001109 struct intel_uncore_pmu *pmu = type->pmus;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001110 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001111 int i, pkg;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001112
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001113 pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
Thomas Gleixner12297352016-02-22 22:19:12 +00001114 for (i = 0; i < type->num_boxes; i++, pmu++) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001115 box = pmu->boxes[pkg];
Thomas Gleixner12297352016-02-22 22:19:12 +00001116 if (!box)
1117 continue;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001118
Thomas Gleixner12297352016-02-22 22:19:12 +00001119 if (old_cpu < 0) {
1120 WARN_ON_ONCE(box->cpu != -1);
1121 box->cpu = new_cpu;
1122 continue;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001123 }
Thomas Gleixner12297352016-02-22 22:19:12 +00001124
1125 WARN_ON_ONCE(box->cpu != old_cpu);
1126 box->cpu = -1;
1127 if (new_cpu < 0)
1128 continue;
1129
1130 uncore_pmu_cancel_hrtimer(box);
1131 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1132 box->cpu = new_cpu;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001133 }
1134}
1135
Thomas Gleixner12297352016-02-22 22:19:12 +00001136static void uncore_change_context(struct intel_uncore_type **uncores,
1137 int old_cpu, int new_cpu)
1138{
1139 for (; *uncores; uncores++)
1140 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1141}
1142
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001143static int uncore_event_cpu_offline(unsigned int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001144{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001145 int target;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001146
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001147 /* Check if exiting cpu is used for collecting uncore events */
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001148 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001149 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001150
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001151 /* Find a new cpu to collect uncore events */
1152 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001153
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001154 /* Migrate uncore events to the new target */
1155 if (target < nr_cpu_ids)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001156 cpumask_set_cpu(target, &uncore_cpu_mask);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001157 else
1158 target = -1;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001159
Yan, Zheng514b2342014-07-30 15:22:12 +08001160 uncore_change_context(uncore_msr_uncores, cpu, target);
1161 uncore_change_context(uncore_pci_uncores, cpu, target);
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001162 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001163}
1164
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001165static int uncore_event_cpu_online(unsigned int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001166{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001167 int target;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001168
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001169 /*
1170 * Check if there is an online cpu in the package
1171 * which collects uncore events already.
1172 */
1173 target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1174 if (target < nr_cpu_ids)
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001175 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001176
1177 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1178
Yan, Zheng514b2342014-07-30 15:22:12 +08001179 uncore_change_context(uncore_msr_uncores, -1, cpu);
1180 uncore_change_context(uncore_pci_uncores, -1, cpu);
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001181 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001182}
1183
Thomas Gleixner4f089672016-02-22 22:19:09 +00001184static int __init type_pmu_register(struct intel_uncore_type *type)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001185{
Thomas Gleixner4f089672016-02-22 22:19:09 +00001186 int i, ret;
1187
1188 for (i = 0; i < type->num_boxes; i++) {
1189 ret = uncore_pmu_register(&type->pmus[i]);
1190 if (ret)
1191 return ret;
1192 }
1193 return 0;
1194}
1195
1196static int __init uncore_msr_pmus_register(void)
1197{
1198 struct intel_uncore_type **types = uncore_msr_uncores;
1199 int ret;
1200
Thomas Gleixner12297352016-02-22 22:19:12 +00001201 for (; *types; types++) {
1202 ret = type_pmu_register(*types);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001203 if (ret)
1204 return ret;
1205 }
1206 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001207}
1208
1209static int __init uncore_cpu_init(void)
1210{
Yan, Zhengc1e46582014-07-30 15:22:15 +08001211 int ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001212
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001213 ret = uncore_types_init(uncore_msr_uncores, true);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001214 if (ret)
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001215 goto err;
Thomas Gleixner4f089672016-02-22 22:19:09 +00001216
1217 ret = uncore_msr_pmus_register();
1218 if (ret)
1219 goto err;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001220 return 0;
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001221err:
1222 uncore_types_exit(uncore_msr_uncores);
1223 uncore_msr_uncores = empty_uncore;
1224 return ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001225}
1226
Kan Liange633c652016-03-20 01:33:36 -07001227#define X86_UNCORE_MODEL_MATCH(model, init) \
1228 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1229
1230struct intel_uncore_init_fun {
1231 void (*cpu_init)(void);
1232 int (*pci_init)(void);
1233};
1234
1235static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1236 .cpu_init = nhm_uncore_cpu_init,
1237};
1238
1239static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1240 .cpu_init = snb_uncore_cpu_init,
1241 .pci_init = snb_uncore_pci_init,
1242};
1243
1244static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1245 .cpu_init = snb_uncore_cpu_init,
1246 .pci_init = ivb_uncore_pci_init,
1247};
1248
1249static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1250 .cpu_init = snb_uncore_cpu_init,
1251 .pci_init = hsw_uncore_pci_init,
1252};
1253
1254static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1255 .cpu_init = snb_uncore_cpu_init,
1256 .pci_init = bdw_uncore_pci_init,
1257};
1258
1259static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1260 .cpu_init = snbep_uncore_cpu_init,
1261 .pci_init = snbep_uncore_pci_init,
1262};
1263
1264static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1265 .cpu_init = nhmex_uncore_cpu_init,
1266};
1267
1268static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1269 .cpu_init = ivbep_uncore_cpu_init,
1270 .pci_init = ivbep_uncore_pci_init,
1271};
1272
1273static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1274 .cpu_init = hswep_uncore_cpu_init,
1275 .pci_init = hswep_uncore_pci_init,
1276};
1277
1278static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1279 .cpu_init = bdx_uncore_cpu_init,
1280 .pci_init = bdx_uncore_pci_init,
1281};
1282
1283static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1284 .cpu_init = knl_uncore_cpu_init,
1285 .pci_init = knl_uncore_pci_init,
1286};
1287
1288static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
Kan Liang46866b52016-06-29 07:01:51 -07001289 .cpu_init = skl_uncore_cpu_init,
Kan Liange633c652016-03-20 01:33:36 -07001290 .pci_init = skl_uncore_pci_init,
1291};
1292
Kan Liangcd34cd92016-08-16 16:09:50 -04001293static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1294 .cpu_init = skx_uncore_cpu_init,
1295 .pci_init = skx_uncore_pci_init,
1296};
1297
Kan Liange633c652016-03-20 01:33:36 -07001298static const struct x86_cpu_id intel_uncore_match[] __initconst = {
Dave Hansena07301a2016-06-02 17:19:42 -07001299 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
1300 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
1301 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
1302 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
1303 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
1304 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
1305 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
1306 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
1307 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
1308 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1309 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1310 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
1311 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
1312 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
1313 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
1314 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
1315 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
1316 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1317 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
Piotr Lucba2f8152016-10-12 20:27:58 +02001318 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
Dave Hansena07301a2016-06-02 17:19:42 -07001319 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
Kan Liang46866b52016-06-29 07:01:51 -07001320 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
Kan Liangcd34cd92016-08-16 16:09:50 -04001321 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
Kan Liange633c652016-03-20 01:33:36 -07001322 {},
1323};
1324
1325MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1326
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001327static int __init intel_uncore_init(void)
1328{
Kan Liange633c652016-03-20 01:33:36 -07001329 const struct x86_cpu_id *id;
1330 struct intel_uncore_init_fun *uncore_init;
1331 int pret = 0, cret = 0, ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001332
Kan Liange633c652016-03-20 01:33:36 -07001333 id = x86_match_cpu(intel_uncore_match);
1334 if (!id)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001335 return -ENODEV;
1336
Borislav Petkov0c9f3532016-03-29 17:41:55 +02001337 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
Yan, Zhenga05123b2012-08-21 17:08:37 +08001338 return -ENODEV;
1339
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001340 max_packages = topology_max_packages();
1341
Kan Liange633c652016-03-20 01:33:36 -07001342 uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1343 if (uncore_init->pci_init) {
1344 pret = uncore_init->pci_init();
1345 if (!pret)
1346 pret = uncore_pci_init();
1347 }
1348
1349 if (uncore_init->cpu_init) {
1350 uncore_init->cpu_init();
1351 cret = uncore_cpu_init();
1352 }
Thomas Gleixner54855922016-02-22 22:19:17 +00001353
1354 if (cret && pret)
1355 return -ENODEV;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001356
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001357 /*
1358 * Install callbacks. Core will call them for each online cpu.
1359 *
1360 * The first online cpu of each package allocates and takes
1361 * the refcounts for all other online cpus in that package.
1362 * If msrs are not enabled no allocation is required and
1363 * uncore_cpu_prepare() is not called for each online cpu.
1364 */
1365 if (!cret) {
1366 ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
1367 "PERF_X86_UNCORE_PREP",
1368 uncore_cpu_prepare, NULL);
1369 if (ret)
1370 goto err;
1371 } else {
1372 cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
1373 "PERF_X86_UNCORE_PREP",
1374 uncore_cpu_prepare, NULL);
1375 }
Thomas Gleixnerbebb9d72017-01-31 23:58:39 +01001376
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001377 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
1378 "AP_PERF_X86_UNCORE_STARTING",
1379 uncore_cpu_starting, uncore_cpu_dying);
Thomas Gleixnerbebb9d72017-01-31 23:58:39 +01001380
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001381 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1382 "AP_PERF_X86_UNCORE_ONLINE",
1383 uncore_event_cpu_online, uncore_event_cpu_offline);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001384 return 0;
Thomas Gleixner4f089672016-02-22 22:19:09 +00001385
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001386err:
Thomas Gleixner4f089672016-02-22 22:19:09 +00001387 uncore_types_exit(uncore_msr_uncores);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001388 uncore_pci_exit();
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001389 return ret;
1390}
Kan Liange633c652016-03-20 01:33:36 -07001391module_init(intel_uncore_init);
1392
1393static void __exit intel_uncore_exit(void)
1394{
Thomas Gleixner1a246b92016-07-13 17:16:12 +00001395 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1396 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
1397 cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
Kan Liange633c652016-03-20 01:33:36 -07001398 uncore_types_exit(uncore_msr_uncores);
1399 uncore_pci_exit();
Kan Liange633c652016-03-20 01:33:36 -07001400}
1401module_exit(intel_uncore_exit);