blob: 5ad8e52cbc369cd0d9fb881a424804dbde100c45 [file] [log] [blame]
Borislav Petkov6bcb2db2016-02-10 10:55:15 +01001#include "uncore.h"
Yan, Zheng087bfbb2012-06-15 14:31:34 +08002
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
Yan, Zheng514b2342014-07-30 15:22:12 +08004struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
5struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
Yan, Zheng14371cc2012-06-15 14:31:36 +08006
Yan, Zheng514b2342014-07-30 15:22:12 +08007static bool pcidrv_registered;
8struct pci_driver *uncore_pci_driver;
9/* pci bus to socket mapping */
Taku Izumi712df652015-09-24 21:10:21 +090010DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
11struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +000012struct pci_extra_dev *uncore_extra_pci_dev;
13static int max_packages;
Yan, Zheng899396c2013-08-07 14:17:23 +080014
Yan, Zheng087bfbb2012-06-15 14:31:34 +080015/* mask of cpus that collect uncore events */
16static cpumask_t uncore_cpu_mask;
17
18/* constraint for the fixed counter */
Yan, Zheng514b2342014-07-30 15:22:12 +080019static struct event_constraint uncore_constraint_fixed =
Yan, Zheng087bfbb2012-06-15 14:31:34 +080020 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
Yan, Zheng514b2342014-07-30 15:22:12 +080021struct event_constraint uncore_constraint_empty =
Yan, Zheng6a679432012-07-04 14:00:15 +080022 EVENT_CONSTRAINT(0, 0, 0);
Yan, Zheng087bfbb2012-06-15 14:31:34 +080023
Thomas Gleixner1384c702016-02-22 22:19:13 +000024static int uncore_pcibus_to_physid(struct pci_bus *bus)
Taku Izumi712df652015-09-24 21:10:21 +090025{
26 struct pci2phy_map *map;
27 int phys_id = -1;
28
29 raw_spin_lock(&pci2phy_map_lock);
30 list_for_each_entry(map, &pci2phy_map_head, list) {
31 if (map->segment == pci_domain_nr(bus)) {
32 phys_id = map->pbus_to_physid[bus->number];
33 break;
34 }
35 }
36 raw_spin_unlock(&pci2phy_map_lock);
37
38 return phys_id;
39}
40
Thomas Gleixner4f089672016-02-22 22:19:09 +000041static void uncore_free_pcibus_map(void)
42{
43 struct pci2phy_map *map, *tmp;
44
45 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
46 list_del(&map->list);
47 kfree(map);
48 }
49}
50
Taku Izumi712df652015-09-24 21:10:21 +090051struct pci2phy_map *__find_pci2phy_map(int segment)
52{
53 struct pci2phy_map *map, *alloc = NULL;
54 int i;
55
56 lockdep_assert_held(&pci2phy_map_lock);
57
58lookup:
59 list_for_each_entry(map, &pci2phy_map_head, list) {
60 if (map->segment == segment)
61 goto end;
62 }
63
64 if (!alloc) {
65 raw_spin_unlock(&pci2phy_map_lock);
66 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
67 raw_spin_lock(&pci2phy_map_lock);
68
69 if (!alloc)
70 return NULL;
71
72 goto lookup;
73 }
74
75 map = alloc;
76 alloc = NULL;
77 map->segment = segment;
78 for (i = 0; i < 256; i++)
79 map->pbus_to_physid[i] = -1;
80 list_add_tail(&map->list, &pci2phy_map_head);
81
82end:
83 kfree(alloc);
84 return map;
85}
86
Yan, Zheng514b2342014-07-30 15:22:12 +080087ssize_t uncore_event_show(struct kobject *kobj,
88 struct kobj_attribute *attr, char *buf)
89{
90 struct uncore_event_desc *event =
91 container_of(attr, struct uncore_event_desc, attr);
92 return sprintf(buf, "%s", event->config);
93}
94
Yan, Zheng514b2342014-07-30 15:22:12 +080095struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
Stephane Eranian001e4132014-02-11 16:20:11 +010096{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +000097 return pmu->boxes[topology_logical_package_id(cpu)];
Stephane Eranian001e4132014-02-11 16:20:11 +010098}
99
Yan, Zheng514b2342014-07-30 15:22:12 +0800100u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng254298c2012-07-05 14:32:17 +0800101{
102 u64 count;
103
104 rdmsrl(event->hw.event_base, count);
105
106 return count;
107}
108
109/*
110 * generic get constraint function for shared match/mask registers.
111 */
Yan, Zheng514b2342014-07-30 15:22:12 +0800112struct event_constraint *
Yan, Zheng254298c2012-07-05 14:32:17 +0800113uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
114{
115 struct intel_uncore_extra_reg *er;
116 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
117 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
118 unsigned long flags;
119 bool ok = false;
120
121 /*
122 * reg->alloc can be set due to existing state, so for fake box we
123 * need to ignore this, otherwise we might fail to allocate proper
124 * fake state for this extra reg constraint.
125 */
126 if (reg1->idx == EXTRA_REG_NONE ||
127 (!uncore_box_is_fake(box) && reg1->alloc))
128 return NULL;
129
130 er = &box->shared_regs[reg1->idx];
131 raw_spin_lock_irqsave(&er->lock, flags);
132 if (!atomic_read(&er->ref) ||
133 (er->config1 == reg1->config && er->config2 == reg2->config)) {
134 atomic_inc(&er->ref);
135 er->config1 = reg1->config;
136 er->config2 = reg2->config;
137 ok = true;
138 }
139 raw_spin_unlock_irqrestore(&er->lock, flags);
140
141 if (ok) {
142 if (!uncore_box_is_fake(box))
143 reg1->alloc = 1;
144 return NULL;
145 }
146
Yan, Zheng514b2342014-07-30 15:22:12 +0800147 return &uncore_constraint_empty;
Yan, Zheng254298c2012-07-05 14:32:17 +0800148}
149
Yan, Zheng514b2342014-07-30 15:22:12 +0800150void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng254298c2012-07-05 14:32:17 +0800151{
152 struct intel_uncore_extra_reg *er;
153 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
154
155 /*
156 * Only put constraint if extra reg was actually allocated. Also
157 * takes care of event which do not use an extra shared reg.
158 *
159 * Also, if this is a fake box we shouldn't touch any event state
160 * (reg->alloc) and we don't care about leaving inconsistent box
161 * state either since it will be thrown out.
162 */
163 if (uncore_box_is_fake(box) || !reg1->alloc)
164 return;
165
166 er = &box->shared_regs[reg1->idx];
167 atomic_dec(&er->ref);
168 reg1->alloc = 0;
169}
170
Yan, Zheng514b2342014-07-30 15:22:12 +0800171u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
Yan, Zheng46bdd902013-04-16 19:51:06 +0800172{
173 struct intel_uncore_extra_reg *er;
174 unsigned long flags;
175 u64 config;
176
177 er = &box->shared_regs[idx];
178
179 raw_spin_lock_irqsave(&er->lock, flags);
180 config = er->config;
181 raw_spin_unlock_irqrestore(&er->lock, flags);
182
183 return config;
184}
185
Thomas Gleixner12297352016-02-22 22:19:12 +0000186static void uncore_assign_hw_event(struct intel_uncore_box *box,
187 struct perf_event *event, int idx)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800188{
189 struct hw_perf_event *hwc = &event->hw;
190
191 hwc->idx = idx;
192 hwc->last_tag = ++box->tags[idx];
193
194 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
Yan, Zheng14371cc2012-06-15 14:31:36 +0800195 hwc->event_base = uncore_fixed_ctr(box);
196 hwc->config_base = uncore_fixed_ctl(box);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800197 return;
198 }
199
Yan, Zheng14371cc2012-06-15 14:31:36 +0800200 hwc->config_base = uncore_event_ctl(box, hwc->idx);
201 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800202}
203
Yan, Zheng514b2342014-07-30 15:22:12 +0800204void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800205{
206 u64 prev_count, new_count, delta;
207 int shift;
208
209 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
210 shift = 64 - uncore_fixed_ctr_bits(box);
211 else
212 shift = 64 - uncore_perf_ctr_bits(box);
213
214 /* the hrtimer might modify the previous event value */
215again:
216 prev_count = local64_read(&event->hw.prev_count);
217 new_count = uncore_read_counter(box, event);
218 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
219 goto again;
220
221 delta = (new_count << shift) - (prev_count << shift);
222 delta >>= shift;
223
224 local64_add(delta, &event->count);
225}
226
227/*
228 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
229 * for SandyBridge. So we use hrtimer to periodically poll the counter
230 * to avoid overflow.
231 */
232static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
233{
234 struct intel_uncore_box *box;
Stephane Eranianced2efb2014-02-11 16:20:13 +0100235 struct perf_event *event;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800236 unsigned long flags;
237 int bit;
238
239 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
240 if (!box->n_active || box->cpu != smp_processor_id())
241 return HRTIMER_NORESTART;
242 /*
243 * disable local interrupt to prevent uncore_pmu_event_start/stop
244 * to interrupt the update process
245 */
246 local_irq_save(flags);
247
Stephane Eranianced2efb2014-02-11 16:20:13 +0100248 /*
249 * handle boxes with an active event list as opposed to active
250 * counters
251 */
252 list_for_each_entry(event, &box->active_list, active_entry) {
253 uncore_perf_event_update(box, event);
254 }
255
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800256 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
257 uncore_perf_event_update(box, box->events[bit]);
258
259 local_irq_restore(flags);
260
Stephane Eranian79859cc2014-02-11 16:20:10 +0100261 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800262 return HRTIMER_RESTART;
263}
264
Yan, Zheng514b2342014-07-30 15:22:12 +0800265void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800266{
Thomas Gleixner576b0702015-04-14 21:09:01 +0000267 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
268 HRTIMER_MODE_REL_PINNED);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800269}
270
Yan, Zheng514b2342014-07-30 15:22:12 +0800271void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800272{
273 hrtimer_cancel(&box->hrtimer);
274}
275
276static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
277{
278 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
279 box->hrtimer.function = uncore_pmu_hrtimer;
280}
281
Thomas Gleixner12297352016-02-22 22:19:12 +0000282static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
283 int node)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800284{
Thomas Gleixner12297352016-02-22 22:19:12 +0000285 int i, size, numshared = type->num_shared_regs ;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800286 struct intel_uncore_box *box;
287
Thomas Gleixner12297352016-02-22 22:19:12 +0000288 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
Yan, Zheng6a679432012-07-04 14:00:15 +0800289
Yan, Zheng73c44272013-09-17 14:48:13 +0800290 box = kzalloc_node(size, GFP_KERNEL, node);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800291 if (!box)
292 return NULL;
293
Thomas Gleixner12297352016-02-22 22:19:12 +0000294 for (i = 0; i < numshared; i++)
Yan, Zheng6a679432012-07-04 14:00:15 +0800295 raw_spin_lock_init(&box->shared_regs[i].lock);
296
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800297 uncore_pmu_init_hrtimer(box);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800298 box->cpu = -1;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000299 box->pci_phys_id = -1;
300 box->pkgid = -1;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800301
Stephane Eranian79859cc2014-02-11 16:20:10 +0100302 /* set default hrtimer timeout */
303 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
304
Stephane Eranianced2efb2014-02-11 16:20:13 +0100305 INIT_LIST_HEAD(&box->active_list);
306
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800307 return box;
308}
309
Jiri Olsaaf915682014-12-10 21:23:50 +0100310/*
311 * Using uncore_pmu_event_init pmu event_init callback
312 * as a detection point for uncore events.
313 */
314static int uncore_pmu_event_init(struct perf_event *event);
315
316static bool is_uncore_event(struct perf_event *event)
317{
318 return event->pmu->event_init == uncore_pmu_event_init;
319}
320
Yan, Zheng254298c2012-07-05 14:32:17 +0800321static int
Thomas Gleixner12297352016-02-22 22:19:12 +0000322uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
323 bool dogrp)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800324{
325 struct perf_event *event;
326 int n, max_count;
327
328 max_count = box->pmu->type->num_counters;
329 if (box->pmu->type->fixed_ctl)
330 max_count++;
331
332 if (box->n_events >= max_count)
333 return -EINVAL;
334
335 n = box->n_events;
Jiri Olsaaf915682014-12-10 21:23:50 +0100336
337 if (is_uncore_event(leader)) {
338 box->event_list[n] = leader;
339 n++;
340 }
341
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800342 if (!dogrp)
343 return n;
344
345 list_for_each_entry(event, &leader->sibling_list, group_entry) {
Jiri Olsaaf915682014-12-10 21:23:50 +0100346 if (!is_uncore_event(event) ||
347 event->state <= PERF_EVENT_STATE_OFF)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800348 continue;
349
350 if (n >= max_count)
351 return -EINVAL;
352
353 box->event_list[n] = event;
354 n++;
355 }
356 return n;
357}
358
359static struct event_constraint *
Yan, Zheng254298c2012-07-05 14:32:17 +0800360uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800361{
Yan, Zheng6a679432012-07-04 14:00:15 +0800362 struct intel_uncore_type *type = box->pmu->type;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800363 struct event_constraint *c;
364
Yan, Zheng6a679432012-07-04 14:00:15 +0800365 if (type->ops->get_constraint) {
366 c = type->ops->get_constraint(box, event);
367 if (c)
368 return c;
369 }
370
Stephane Eraniandbc33f72013-09-09 12:53:50 -0700371 if (event->attr.config == UNCORE_FIXED_EVENT)
Yan, Zheng514b2342014-07-30 15:22:12 +0800372 return &uncore_constraint_fixed;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800373
374 if (type->constraints) {
375 for_each_event_constraint(c, type->constraints) {
376 if ((event->hw.config & c->cmask) == c->code)
377 return c;
378 }
379 }
380
381 return &type->unconstrainted;
382}
383
Thomas Gleixner12297352016-02-22 22:19:12 +0000384static void uncore_put_event_constraint(struct intel_uncore_box *box,
385 struct perf_event *event)
Yan, Zheng6a679432012-07-04 14:00:15 +0800386{
387 if (box->pmu->type->ops->put_constraint)
388 box->pmu->type->ops->put_constraint(box, event);
389}
390
Yan, Zheng254298c2012-07-05 14:32:17 +0800391static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800392{
393 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
Andrew Hunter43b457802013-05-23 11:07:03 -0700394 struct event_constraint *c;
Yan, Zheng6a679432012-07-04 14:00:15 +0800395 int i, wmin, wmax, ret = 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800396 struct hw_perf_event *hwc;
397
398 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
399
400 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
Yan, Zheng6a679432012-07-04 14:00:15 +0800401 c = uncore_get_event_constraint(box, box->event_list[i]);
Peter Zijlstrab371b592015-05-21 10:57:13 +0200402 box->event_constraint[i] = c;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800403 wmin = min(wmin, c->weight);
404 wmax = max(wmax, c->weight);
405 }
406
407 /* fastpath, try to reuse previous register */
408 for (i = 0; i < n; i++) {
409 hwc = &box->event_list[i]->hw;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200410 c = box->event_constraint[i];
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800411
412 /* never assigned */
413 if (hwc->idx == -1)
414 break;
415
416 /* constraint still honored */
417 if (!test_bit(hwc->idx, c->idxmsk))
418 break;
419
420 /* not already used */
421 if (test_bit(hwc->idx, used_mask))
422 break;
423
424 __set_bit(hwc->idx, used_mask);
Yan, Zheng6a679432012-07-04 14:00:15 +0800425 if (assign)
426 assign[i] = hwc->idx;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800427 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800428 /* slow path */
Yan, Zheng6a679432012-07-04 14:00:15 +0800429 if (i != n)
Peter Zijlstrab371b592015-05-21 10:57:13 +0200430 ret = perf_assign_events(box->event_constraint, n,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200431 wmin, wmax, n, assign);
Yan, Zheng6a679432012-07-04 14:00:15 +0800432
433 if (!assign || ret) {
434 for (i = 0; i < n; i++)
435 uncore_put_event_constraint(box, box->event_list[i]);
436 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800437 return ret ? -EINVAL : 0;
438}
439
440static void uncore_pmu_event_start(struct perf_event *event, int flags)
441{
442 struct intel_uncore_box *box = uncore_event_to_box(event);
443 int idx = event->hw.idx;
444
445 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
446 return;
447
448 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
449 return;
450
451 event->hw.state = 0;
452 box->events[idx] = event;
453 box->n_active++;
454 __set_bit(idx, box->active_mask);
455
456 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
457 uncore_enable_event(box, event);
458
459 if (box->n_active == 1) {
460 uncore_enable_box(box);
461 uncore_pmu_start_hrtimer(box);
462 }
463}
464
465static void uncore_pmu_event_stop(struct perf_event *event, int flags)
466{
467 struct intel_uncore_box *box = uncore_event_to_box(event);
468 struct hw_perf_event *hwc = &event->hw;
469
470 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
471 uncore_disable_event(box, event);
472 box->n_active--;
473 box->events[hwc->idx] = NULL;
474 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
475 hwc->state |= PERF_HES_STOPPED;
476
477 if (box->n_active == 0) {
478 uncore_disable_box(box);
479 uncore_pmu_cancel_hrtimer(box);
480 }
481 }
482
483 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
484 /*
485 * Drain the remaining delta count out of a event
486 * that we are disabling:
487 */
488 uncore_perf_event_update(box, event);
489 hwc->state |= PERF_HES_UPTODATE;
490 }
491}
492
493static int uncore_pmu_event_add(struct perf_event *event, int flags)
494{
495 struct intel_uncore_box *box = uncore_event_to_box(event);
496 struct hw_perf_event *hwc = &event->hw;
497 int assign[UNCORE_PMC_IDX_MAX];
498 int i, n, ret;
499
500 if (!box)
501 return -ENODEV;
502
503 ret = n = uncore_collect_events(box, event, false);
504 if (ret < 0)
505 return ret;
506
507 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
508 if (!(flags & PERF_EF_START))
509 hwc->state |= PERF_HES_ARCH;
510
511 ret = uncore_assign_events(box, assign, n);
512 if (ret)
513 return ret;
514
515 /* save events moving to new counters */
516 for (i = 0; i < box->n_events; i++) {
517 event = box->event_list[i];
518 hwc = &event->hw;
519
520 if (hwc->idx == assign[i] &&
521 hwc->last_tag == box->tags[assign[i]])
522 continue;
523 /*
524 * Ensure we don't accidentally enable a stopped
525 * counter simply because we rescheduled.
526 */
527 if (hwc->state & PERF_HES_STOPPED)
528 hwc->state |= PERF_HES_ARCH;
529
530 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
531 }
532
533 /* reprogram moved events into new counters */
534 for (i = 0; i < n; i++) {
535 event = box->event_list[i];
536 hwc = &event->hw;
537
538 if (hwc->idx != assign[i] ||
539 hwc->last_tag != box->tags[assign[i]])
540 uncore_assign_hw_event(box, event, assign[i]);
541 else if (i < box->n_events)
542 continue;
543
544 if (hwc->state & PERF_HES_ARCH)
545 continue;
546
547 uncore_pmu_event_start(event, 0);
548 }
549 box->n_events = n;
550
551 return 0;
552}
553
554static void uncore_pmu_event_del(struct perf_event *event, int flags)
555{
556 struct intel_uncore_box *box = uncore_event_to_box(event);
557 int i;
558
559 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
560
561 for (i = 0; i < box->n_events; i++) {
562 if (event == box->event_list[i]) {
Yan, Zheng6a679432012-07-04 14:00:15 +0800563 uncore_put_event_constraint(box, event);
564
Thomas Gleixner12297352016-02-22 22:19:12 +0000565 for (++i; i < box->n_events; i++)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800566 box->event_list[i - 1] = box->event_list[i];
567
568 --box->n_events;
569 break;
570 }
571 }
572
573 event->hw.idx = -1;
574 event->hw.last_tag = ~0ULL;
575}
576
Yan, Zheng514b2342014-07-30 15:22:12 +0800577void uncore_pmu_event_read(struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800578{
579 struct intel_uncore_box *box = uncore_event_to_box(event);
580 uncore_perf_event_update(box, event);
581}
582
583/*
584 * validation ensures the group can be loaded onto the
585 * PMU if it was the only group available.
586 */
587static int uncore_validate_group(struct intel_uncore_pmu *pmu,
588 struct perf_event *event)
589{
590 struct perf_event *leader = event->group_leader;
591 struct intel_uncore_box *fake_box;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800592 int ret = -EINVAL, n;
593
Yan, Zheng73c44272013-09-17 14:48:13 +0800594 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800595 if (!fake_box)
596 return -ENOMEM;
597
598 fake_box->pmu = pmu;
599 /*
600 * the event is not yet connected with its
601 * siblings therefore we must first collect
602 * existing siblings, then add the new event
603 * before we can simulate the scheduling
604 */
605 n = uncore_collect_events(fake_box, leader, true);
606 if (n < 0)
607 goto out;
608
609 fake_box->n_events = n;
610 n = uncore_collect_events(fake_box, event, false);
611 if (n < 0)
612 goto out;
613
614 fake_box->n_events = n;
615
Yan, Zheng6a679432012-07-04 14:00:15 +0800616 ret = uncore_assign_events(fake_box, NULL, n);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800617out:
618 kfree(fake_box);
619 return ret;
620}
621
Yan, Zheng46bdd902013-04-16 19:51:06 +0800622static int uncore_pmu_event_init(struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800623{
624 struct intel_uncore_pmu *pmu;
625 struct intel_uncore_box *box;
626 struct hw_perf_event *hwc = &event->hw;
627 int ret;
628
629 if (event->attr.type != event->pmu->type)
630 return -ENOENT;
631
632 pmu = uncore_event_to_pmu(event);
633 /* no device found for this pmu */
634 if (pmu->func_id < 0)
635 return -ENOENT;
636
637 /*
638 * Uncore PMU does measure at all privilege level all the time.
639 * So it doesn't make sense to specify any exclude bits.
640 */
641 if (event->attr.exclude_user || event->attr.exclude_kernel ||
642 event->attr.exclude_hv || event->attr.exclude_idle)
643 return -EINVAL;
644
645 /* Sampling not supported yet */
646 if (hwc->sample_period)
647 return -EINVAL;
648
649 /*
650 * Place all uncore events for a particular physical package
651 * onto a single cpu
652 */
653 if (event->cpu < 0)
654 return -EINVAL;
655 box = uncore_pmu_to_box(pmu, event->cpu);
656 if (!box || box->cpu < 0)
657 return -EINVAL;
658 event->cpu = box->cpu;
Thomas Gleixner1f2569f2016-02-22 22:19:14 +0000659 event->pmu_private = box;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800660
Yan, Zheng6a679432012-07-04 14:00:15 +0800661 event->hw.idx = -1;
662 event->hw.last_tag = ~0ULL;
663 event->hw.extra_reg.idx = EXTRA_REG_NONE;
Yan, Zhengebb6cc02012-08-06 13:11:21 +0800664 event->hw.branch_reg.idx = EXTRA_REG_NONE;
Yan, Zheng6a679432012-07-04 14:00:15 +0800665
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800666 if (event->attr.config == UNCORE_FIXED_EVENT) {
667 /* no fixed counter */
668 if (!pmu->type->fixed_ctl)
669 return -EINVAL;
670 /*
671 * if there is only one fixed counter, only the first pmu
672 * can access the fixed counter
673 */
674 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
675 return -EINVAL;
Stephane Eraniandbc33f72013-09-09 12:53:50 -0700676
677 /* fixed counters have event field hardcoded to zero */
678 hwc->config = 0ULL;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800679 } else {
680 hwc->config = event->attr.config & pmu->type->event_mask;
Yan, Zheng6a679432012-07-04 14:00:15 +0800681 if (pmu->type->ops->hw_config) {
682 ret = pmu->type->ops->hw_config(box, event);
683 if (ret)
684 return ret;
685 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800686 }
687
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800688 if (event->group_leader != event)
689 ret = uncore_validate_group(pmu, event);
690 else
691 ret = 0;
692
693 return ret;
694}
695
Yan, Zheng314d9f62012-09-10 15:53:49 +0800696static ssize_t uncore_get_attr_cpumask(struct device *dev,
697 struct device_attribute *attr, char *buf)
698{
Sudeep Holla5aaba362014-09-30 14:48:22 +0100699 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
Yan, Zheng314d9f62012-09-10 15:53:49 +0800700}
701
702static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
703
704static struct attribute *uncore_pmu_attrs[] = {
705 &dev_attr_cpumask.attr,
706 NULL,
707};
708
709static struct attribute_group uncore_pmu_attr_group = {
710 .attrs = uncore_pmu_attrs,
711};
712
Andi Kleena08b6762014-08-29 10:20:58 -0700713static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800714{
715 int ret;
716
Stephane Eraniand64b25b2014-02-11 16:20:08 +0100717 if (!pmu->type->pmu) {
718 pmu->pmu = (struct pmu) {
719 .attr_groups = pmu->type->attr_groups,
720 .task_ctx_nr = perf_invalid_context,
721 .event_init = uncore_pmu_event_init,
722 .add = uncore_pmu_event_add,
723 .del = uncore_pmu_event_del,
724 .start = uncore_pmu_event_start,
725 .stop = uncore_pmu_event_stop,
726 .read = uncore_pmu_event_read,
727 };
728 } else {
729 pmu->pmu = *pmu->type->pmu;
730 pmu->pmu.attr_groups = pmu->type->attr_groups;
731 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800732
733 if (pmu->type->num_boxes == 1) {
734 if (strlen(pmu->type->name) > 0)
735 sprintf(pmu->name, "uncore_%s", pmu->type->name);
736 else
737 sprintf(pmu->name, "uncore");
738 } else {
739 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
740 pmu->pmu_idx);
741 }
742
743 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
Thomas Gleixner4f089672016-02-22 22:19:09 +0000744 if (!ret)
745 pmu->registered = true;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800746 return ret;
747}
748
Thomas Gleixner4f089672016-02-22 22:19:09 +0000749static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
750{
751 if (!pmu->registered)
752 return;
753 perf_pmu_unregister(&pmu->pmu);
754 pmu->registered = false;
755}
756
Thomas Gleixner7b672d62016-02-22 22:19:17 +0000757static void __init __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
758{
759 struct intel_uncore_pmu *pmu = type->pmus;
760 struct intel_uncore_box *box;
761 int i, pkg;
762
763 if (pmu) {
764 pkg = topology_physical_package_id(cpu);
765 for (i = 0; i < type->num_boxes; i++, pmu++) {
766 box = pmu->boxes[pkg];
767 if (box)
768 uncore_box_exit(box);
769 }
770 }
771}
772
773static void __init uncore_exit_boxes(void *dummy)
774{
775 struct intel_uncore_type **types;
776
777 for (types = uncore_msr_uncores; *types; types++)
778 __uncore_exit_boxes(*types++, smp_processor_id());
779}
780
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000781static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
782{
783 int pkg;
784
785 for (pkg = 0; pkg < max_packages; pkg++)
786 kfree(pmu->boxes[pkg]);
787 kfree(pmu->boxes);
788}
789
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800790static void __init uncore_type_exit(struct intel_uncore_type *type)
791{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000792 struct intel_uncore_pmu *pmu = type->pmus;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800793 int i;
794
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000795 if (pmu) {
796 for (i = 0; i < type->num_boxes; i++, pmu++) {
797 uncore_pmu_unregister(pmu);
798 uncore_free_boxes(pmu);
Thomas Gleixner4f089672016-02-22 22:19:09 +0000799 }
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000800 kfree(type->pmus);
801 type->pmus = NULL;
802 }
Yan, Zheng314d9f62012-09-10 15:53:49 +0800803 kfree(type->events_group);
804 type->events_group = NULL;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800805}
806
Borislav Petkovcffa59b2012-08-02 12:55:27 +0200807static void __init uncore_types_exit(struct intel_uncore_type **types)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800808{
Thomas Gleixner12297352016-02-22 22:19:12 +0000809 for (; *types; types++)
810 uncore_type_exit(*types);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800811}
812
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000813static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800814{
815 struct intel_uncore_pmu *pmus;
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200816 struct attribute_group *attr_group;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800817 struct attribute **attrs;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000818 size_t size;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800819 int i, j;
820
821 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
822 if (!pmus)
823 return -ENOMEM;
824
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000825 size = max_packages * sizeof(struct intel_uncore_box *);
Dave Jonesb7b48392014-03-06 12:20:28 -0500826
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000827 for (i = 0; i < type->num_boxes; i++) {
828 pmus[i].func_id = setid ? i : -1;
829 pmus[i].pmu_idx = i;
830 pmus[i].type = type;
831 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
832 if (!pmus[i].boxes)
833 return -ENOMEM;
834 }
835
836 type->pmus = pmus;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800837 type->unconstrainted = (struct event_constraint)
838 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
Stephane Eranian9fac2cf2013-01-24 16:10:27 +0100839 0, type->num_counters, 0, 0);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800840
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800841 if (type->event_descs) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000842 for (i = 0; type->event_descs[i].attr.attr.name; i++);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800843
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200844 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
845 sizeof(*attr_group), GFP_KERNEL);
846 if (!attr_group)
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000847 return -ENOMEM;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800848
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200849 attrs = (struct attribute **)(attr_group + 1);
850 attr_group->name = "events";
851 attr_group->attrs = attrs;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800852
853 for (j = 0; j < i; j++)
854 attrs[j] = &type->event_descs[j].attr.attr;
855
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200856 type->events_group = attr_group;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800857 }
858
Yan, Zheng314d9f62012-09-10 15:53:49 +0800859 type->pmu_group = &uncore_pmu_attr_group;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800860 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800861}
862
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000863static int __init
864uncore_types_init(struct intel_uncore_type **types, bool setid)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800865{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000866 int ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800867
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000868 for (; *types; types++) {
869 ret = uncore_type_init(*types, setid);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800870 if (ret)
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000871 return ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800872 }
873 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800874}
875
Yan, Zheng14371cc2012-06-15 14:31:36 +0800876/*
877 * add a pci uncore device
878 */
Yan, Zheng899396c2013-08-07 14:17:23 +0800879static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800880{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000881 struct intel_uncore_type *type;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800882 struct intel_uncore_pmu *pmu;
883 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000884 int phys_id, pkg, ret;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800885
Taku Izumi712df652015-09-24 21:10:21 +0900886 phys_id = uncore_pcibus_to_physid(pdev->bus);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000887 if (phys_id < 0)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800888 return -ENODEV;
889
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000890 pkg = topology_phys_to_logical_pkg(phys_id);
891 if (WARN_ON_ONCE(pkg < 0))
892 return -EINVAL;
893
Yan, Zheng899396c2013-08-07 14:17:23 +0800894 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
Yan, Zheng514b2342014-07-30 15:22:12 +0800895 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000896
897 uncore_extra_pci_dev[pkg].dev[idx] = pdev;
Yan, Zheng899396c2013-08-07 14:17:23 +0800898 pci_set_drvdata(pdev, NULL);
899 return 0;
900 }
901
Yan, Zheng514b2342014-07-30 15:22:12 +0800902 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
Yan, Zheng14371cc2012-06-15 14:31:36 +0800903 /*
904 * for performance monitoring unit with multiple boxes,
905 * each box has a different function id.
906 */
Yan, Zheng899396c2013-08-07 14:17:23 +0800907 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
Harish Chegondi77af0032015-12-07 14:32:32 -0800908 /* Knights Landing uses a common PCI device ID for multiple instances of
909 * an uncore PMU device type. There is only one entry per device type in
910 * the knl_uncore_pci_ids table inspite of multiple devices present for
911 * some device types. Hence PCI device idx would be 0 for all devices.
912 * So increment pmu pointer to point to an unused array element.
913 */
Thomas Gleixner12297352016-02-22 22:19:12 +0000914 if (boot_cpu_data.x86_model == 87) {
Harish Chegondi77af0032015-12-07 14:32:32 -0800915 while (pmu->func_id >= 0)
916 pmu++;
Thomas Gleixner12297352016-02-22 22:19:12 +0000917 }
918
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000919 if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
920 return -EINVAL;
921
922 box = uncore_alloc_box(type, NUMA_NO_NODE);
923 if (!box)
924 return -ENOMEM;
925
Yan, Zheng899396c2013-08-07 14:17:23 +0800926 if (pmu->func_id < 0)
927 pmu->func_id = pdev->devfn;
928 else
929 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800930
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000931 atomic_inc(&box->refcnt);
932 box->pci_phys_id = phys_id;
933 box->pkgid = pkg;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800934 box->pci_dev = pdev;
935 box->pmu = pmu;
Ingo Molnar15c12472015-06-09 11:40:28 +0200936 uncore_box_init(box);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800937 pci_set_drvdata(pdev, box);
938
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000939 pmu->boxes[pkg] = box;
940 if (atomic_inc_return(&pmu->activeboxes) > 1)
Thomas Gleixner4f089672016-02-22 22:19:09 +0000941 return 0;
942
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000943 /* First active box registers the pmu */
Thomas Gleixner4f089672016-02-22 22:19:09 +0000944 ret = uncore_pmu_register(pmu);
945 if (ret) {
946 pci_set_drvdata(pdev, NULL);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000947 pmu->boxes[pkg] = NULL;
Thomas Gleixnera46195f2016-02-22 22:19:11 +0000948 uncore_box_exit(box);
Thomas Gleixner4f089672016-02-22 22:19:09 +0000949 kfree(box);
950 }
951 return ret;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800952}
953
Robert Richter357398e2012-06-20 18:39:27 +0200954static void uncore_pci_remove(struct pci_dev *pdev)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800955{
956 struct intel_uncore_box *box = pci_get_drvdata(pdev);
Yan, Zheng899396c2013-08-07 14:17:23 +0800957 struct intel_uncore_pmu *pmu;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000958 int i, phys_id, pkg;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800959
Taku Izumi712df652015-09-24 21:10:21 +0900960 phys_id = uncore_pcibus_to_physid(pdev->bus);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000961 pkg = topology_phys_to_logical_pkg(phys_id);
962
Yan, Zheng899396c2013-08-07 14:17:23 +0800963 box = pci_get_drvdata(pdev);
964 if (!box) {
965 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000966 if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
967 uncore_extra_pci_dev[pkg].dev[i] = NULL;
Yan, Zheng899396c2013-08-07 14:17:23 +0800968 break;
969 }
970 }
971 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
972 return;
973 }
974
975 pmu = box->pmu;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000976 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
Yan, Zheng14371cc2012-06-15 14:31:36 +0800977 return;
978
Yan, Zhenge850f9c2013-04-16 19:51:07 +0800979 pci_set_drvdata(pdev, NULL);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000980 pmu->boxes[pkg] = NULL;
981 if (atomic_dec_return(&pmu->activeboxes) == 0)
982 uncore_pmu_unregister(pmu);
Thomas Gleixnera46195f2016-02-22 22:19:11 +0000983 uncore_box_exit(box);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800984 kfree(box);
985}
986
Yan, Zheng14371cc2012-06-15 14:31:36 +0800987static int __init uncore_pci_init(void)
988{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000989 size_t size;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800990 int ret;
991
992 switch (boot_cpu_data.x86_model) {
Yan, Zheng7c94ee22012-06-15 14:31:37 +0800993 case 45: /* Sandy Bridge-EP */
Yan, Zheng8268fdf2014-07-30 15:22:14 +0800994 ret = snbep_uncore_pci_init();
Yan, Zheng7c94ee22012-06-15 14:31:37 +0800995 break;
Peter Zijlstraddcd0972014-08-12 09:15:25 +0200996 case 62: /* Ivy Bridge-EP */
997 ret = ivbep_uncore_pci_init();
Yan, Zhenge850f9c2013-04-16 19:51:07 +0800998 break;
Yan, Zhenge735b9d2014-09-04 16:08:26 -0700999 case 63: /* Haswell-EP */
1000 ret = hswep_uncore_pci_init();
1001 break;
Kan Liangd6980ef2015-12-03 16:00:11 -05001002 case 79: /* BDX-EP */
Kan Liang070e9882015-07-02 08:12:52 -04001003 case 86: /* BDX-DE */
1004 ret = bdx_uncore_pci_init();
1005 break;
Stephane Eranianb9e1ab62014-02-11 16:20:12 +01001006 case 42: /* Sandy Bridge */
Yan, Zheng92807ffd2014-07-30 15:22:13 +08001007 ret = snb_uncore_pci_init();
Stephane Eranianb9e1ab62014-02-11 16:20:12 +01001008 break;
1009 case 58: /* Ivy Bridge */
Yan, Zheng92807ffd2014-07-30 15:22:13 +08001010 ret = ivb_uncore_pci_init();
Stephane Eranianb9e1ab62014-02-11 16:20:12 +01001011 break;
1012 case 60: /* Haswell */
1013 case 69: /* Haswell Celeron */
Yan, Zheng92807ffd2014-07-30 15:22:13 +08001014 ret = hsw_uncore_pci_init();
Stephane Eranianb9e1ab62014-02-11 16:20:12 +01001015 break;
Stephane Eraniana41f3c82015-04-23 08:56:42 +02001016 case 61: /* Broadwell */
1017 ret = bdw_uncore_pci_init();
1018 break;
Harish Chegondi77af0032015-12-07 14:32:32 -08001019 case 87: /* Knights Landing */
1020 ret = knl_uncore_pci_init();
1021 break;
Stephane Eranian0e1eb0a2016-01-07 08:25:46 +01001022 case 94: /* SkyLake */
1023 ret = skl_uncore_pci_init();
1024 break;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001025 default:
1026 return 0;
1027 }
1028
Yan, Zheng92807ffd2014-07-30 15:22:13 +08001029 if (ret)
1030 return ret;
1031
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001032 size = max_packages * sizeof(struct pci_extra_dev);
1033 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1034 if (!uncore_extra_pci_dev) {
1035 ret = -ENOMEM;
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001036 goto err;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001037 }
1038
1039 ret = uncore_types_init(uncore_pci_uncores, false);
1040 if (ret)
1041 goto errtype;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001042
1043 uncore_pci_driver->probe = uncore_pci_probe;
1044 uncore_pci_driver->remove = uncore_pci_remove;
1045
1046 ret = pci_register_driver(uncore_pci_driver);
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001047 if (ret)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001048 goto errtype;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001049
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001050 pcidrv_registered = true;
1051 return 0;
1052
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001053errtype:
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001054 uncore_types_exit(uncore_pci_uncores);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001055 kfree(uncore_extra_pci_dev);
1056 uncore_extra_pci_dev = NULL;
Thomas Gleixner4f089672016-02-22 22:19:09 +00001057 uncore_free_pcibus_map();
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001058err:
1059 uncore_pci_uncores = empty_uncore;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001060 return ret;
1061}
1062
1063static void __init uncore_pci_exit(void)
1064{
1065 if (pcidrv_registered) {
1066 pcidrv_registered = false;
1067 pci_unregister_driver(uncore_pci_driver);
Yan, Zheng514b2342014-07-30 15:22:12 +08001068 uncore_types_exit(uncore_pci_uncores);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001069 kfree(uncore_extra_pci_dev);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001070 uncore_free_pcibus_map();
Yan, Zheng14371cc2012-06-15 14:31:36 +08001071 }
1072}
1073
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001074static void uncore_cpu_dying(int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001075{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001076 struct intel_uncore_type *type, **types = uncore_msr_uncores;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001077 struct intel_uncore_pmu *pmu;
1078 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001079 int i, pkg;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001080
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001081 pkg = topology_logical_package_id(cpu);
1082 for (; *types; types++) {
1083 type = *types;
1084 pmu = type->pmus;
1085 for (i = 0; i < type->num_boxes; i++, pmu++) {
1086 box = pmu->boxes[pkg];
1087 if (box && atomic_dec_return(&box->refcnt) == 0)
Thomas Gleixnera46195f2016-02-22 22:19:11 +00001088 uncore_box_exit(box);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001089 }
1090 }
1091}
1092
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001093static void uncore_cpu_starting(int cpu, bool init)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001094{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001095 struct intel_uncore_type *type, **types = uncore_msr_uncores;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001096 struct intel_uncore_pmu *pmu;
1097 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001098 int i, pkg, ncpus = 1;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001099
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001100 if (init) {
1101 /*
1102 * On init we get the number of online cpus in the package
1103 * and set refcount for all of them.
1104 */
1105 ncpus = cpumask_weight(topology_core_cpumask(cpu));
1106 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001107
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001108 pkg = topology_logical_package_id(cpu);
1109 for (; *types; types++) {
1110 type = *types;
1111 pmu = type->pmus;
1112 for (i = 0; i < type->num_boxes; i++, pmu++) {
1113 box = pmu->boxes[pkg];
1114 if (!box)
1115 continue;
1116 /* The first cpu on a package activates the box */
1117 if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
1118 uncore_box_init(box);
1119 }
1120 }
1121}
1122
1123static int uncore_cpu_prepare(int cpu)
1124{
1125 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1126 struct intel_uncore_pmu *pmu;
1127 struct intel_uncore_box *box;
1128 int i, pkg;
1129
1130 pkg = topology_logical_package_id(cpu);
1131 for (; *types; types++) {
1132 type = *types;
1133 pmu = type->pmus;
1134 for (i = 0; i < type->num_boxes; i++, pmu++) {
1135 if (pmu->boxes[pkg])
1136 continue;
1137 /* First cpu of a package allocates the box */
Yan, Zheng73c44272013-09-17 14:48:13 +08001138 box = uncore_alloc_box(type, cpu_to_node(cpu));
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001139 if (!box)
1140 return -ENOMEM;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001141 box->pmu = pmu;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001142 box->pkgid = pkg;
1143 pmu->boxes[pkg] = box;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001144 }
1145 }
1146 return 0;
1147}
1148
Thomas Gleixner12297352016-02-22 22:19:12 +00001149static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1150 int new_cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001151{
Thomas Gleixner12297352016-02-22 22:19:12 +00001152 struct intel_uncore_pmu *pmu = type->pmus;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001153 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001154 int i, pkg;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001155
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001156 pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
Thomas Gleixner12297352016-02-22 22:19:12 +00001157 for (i = 0; i < type->num_boxes; i++, pmu++) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001158 box = pmu->boxes[pkg];
Thomas Gleixner12297352016-02-22 22:19:12 +00001159 if (!box)
1160 continue;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001161
Thomas Gleixner12297352016-02-22 22:19:12 +00001162 if (old_cpu < 0) {
1163 WARN_ON_ONCE(box->cpu != -1);
1164 box->cpu = new_cpu;
1165 continue;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001166 }
Thomas Gleixner12297352016-02-22 22:19:12 +00001167
1168 WARN_ON_ONCE(box->cpu != old_cpu);
1169 box->cpu = -1;
1170 if (new_cpu < 0)
1171 continue;
1172
1173 uncore_pmu_cancel_hrtimer(box);
1174 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1175 box->cpu = new_cpu;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001176 }
1177}
1178
Thomas Gleixner12297352016-02-22 22:19:12 +00001179static void uncore_change_context(struct intel_uncore_type **uncores,
1180 int old_cpu, int new_cpu)
1181{
1182 for (; *uncores; uncores++)
1183 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1184}
1185
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001186static void uncore_event_exit_cpu(int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001187{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001188 int target;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001189
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001190 /* Check if exiting cpu is used for collecting uncore events */
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001191 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1192 return;
1193
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001194 /* Find a new cpu to collect uncore events */
1195 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001196
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001197 /* Migrate uncore events to the new target */
1198 if (target < nr_cpu_ids)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001199 cpumask_set_cpu(target, &uncore_cpu_mask);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001200 else
1201 target = -1;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001202
Yan, Zheng514b2342014-07-30 15:22:12 +08001203 uncore_change_context(uncore_msr_uncores, cpu, target);
1204 uncore_change_context(uncore_pci_uncores, cpu, target);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001205}
1206
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001207static void uncore_event_init_cpu(int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001208{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001209 int target;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001210
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001211 /*
1212 * Check if there is an online cpu in the package
1213 * which collects uncore events already.
1214 */
1215 target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1216 if (target < nr_cpu_ids)
1217 return;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001218
1219 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1220
Yan, Zheng514b2342014-07-30 15:22:12 +08001221 uncore_change_context(uncore_msr_uncores, -1, cpu);
1222 uncore_change_context(uncore_pci_uncores, -1, cpu);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001223}
1224
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001225static int uncore_cpu_notifier(struct notifier_block *self,
1226 unsigned long action, void *hcpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001227{
1228 unsigned int cpu = (long)hcpu;
1229
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001230 switch (action & ~CPU_TASKS_FROZEN) {
1231 case CPU_UP_PREPARE:
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001232 return notifier_from_errno(uncore_cpu_prepare(cpu));
1233
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001234 case CPU_STARTING:
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001235 uncore_cpu_starting(cpu, false);
1236 case CPU_DOWN_FAILED:
1237 uncore_event_init_cpu(cpu);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001238 break;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001239
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001240 case CPU_UP_CANCELED:
1241 case CPU_DYING:
1242 uncore_cpu_dying(cpu);
1243 break;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001244
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001245 case CPU_DOWN_PREPARE:
1246 uncore_event_exit_cpu(cpu);
1247 break;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001248 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001249 return NOTIFY_OK;
1250}
1251
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001252static struct notifier_block uncore_cpu_nb = {
Yan, Zheng254298c2012-07-05 14:32:17 +08001253 .notifier_call = uncore_cpu_notifier,
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001254 /*
1255 * to migrate uncore events, our notifier should be executed
1256 * before perf core's notifier.
1257 */
Yan, Zheng254298c2012-07-05 14:32:17 +08001258 .priority = CPU_PRI_PERF + 1,
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001259};
1260
Thomas Gleixner4f089672016-02-22 22:19:09 +00001261static int __init type_pmu_register(struct intel_uncore_type *type)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001262{
Thomas Gleixner4f089672016-02-22 22:19:09 +00001263 int i, ret;
1264
1265 for (i = 0; i < type->num_boxes; i++) {
1266 ret = uncore_pmu_register(&type->pmus[i]);
1267 if (ret)
1268 return ret;
1269 }
1270 return 0;
1271}
1272
1273static int __init uncore_msr_pmus_register(void)
1274{
1275 struct intel_uncore_type **types = uncore_msr_uncores;
1276 int ret;
1277
Thomas Gleixner12297352016-02-22 22:19:12 +00001278 for (; *types; types++) {
1279 ret = type_pmu_register(*types);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001280 if (ret)
1281 return ret;
1282 }
1283 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001284}
1285
1286static int __init uncore_cpu_init(void)
1287{
Yan, Zhengc1e46582014-07-30 15:22:15 +08001288 int ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001289
1290 switch (boot_cpu_data.x86_model) {
Yan, Zhengfcde10e2012-06-15 14:31:35 +08001291 case 26: /* Nehalem */
1292 case 30:
1293 case 37: /* Westmere */
1294 case 44:
Yan, Zheng92807ffd2014-07-30 15:22:13 +08001295 nhm_uncore_cpu_init();
Yan, Zhengfcde10e2012-06-15 14:31:35 +08001296 break;
1297 case 42: /* Sandy Bridge */
Vince Weaver9a6bc1432013-04-29 15:52:27 -04001298 case 58: /* Ivy Bridge */
Andi Kleen3a999582015-06-14 22:57:41 -07001299 case 60: /* Haswell */
1300 case 69: /* Haswell */
1301 case 70: /* Haswell */
1302 case 61: /* Broadwell */
1303 case 71: /* Broadwell */
Yan, Zheng92807ffd2014-07-30 15:22:13 +08001304 snb_uncore_cpu_init();
Yan, Zhengfcde10e2012-06-15 14:31:35 +08001305 break;
Vince Weaver80e217e2013-04-29 15:49:28 -04001306 case 45: /* Sandy Bridge-EP */
Yan, Zheng8268fdf2014-07-30 15:22:14 +08001307 snbep_uncore_cpu_init();
Yan, Zheng7c94ee22012-06-15 14:31:37 +08001308 break;
Yan, Zhengcb37af72012-08-06 13:11:22 +08001309 case 46: /* Nehalem-EX */
Yan, Zhengcb37af72012-08-06 13:11:22 +08001310 case 47: /* Westmere-EX aka. Xeon E7 */
Yan, Zhengc1e46582014-07-30 15:22:15 +08001311 nhmex_uncore_cpu_init();
Yan, Zheng254298c2012-07-05 14:32:17 +08001312 break;
Peter Zijlstraddcd0972014-08-12 09:15:25 +02001313 case 62: /* Ivy Bridge-EP */
1314 ivbep_uncore_cpu_init();
Yan, Zhenge850f9c2013-04-16 19:51:07 +08001315 break;
Yan, Zhenge735b9d2014-09-04 16:08:26 -07001316 case 63: /* Haswell-EP */
1317 hswep_uncore_cpu_init();
1318 break;
Kan Liangd6980ef2015-12-03 16:00:11 -05001319 case 79: /* BDX-EP */
Kan Liang070e9882015-07-02 08:12:52 -04001320 case 86: /* BDX-DE */
1321 bdx_uncore_cpu_init();
1322 break;
Harish Chegondi77af0032015-12-07 14:32:32 -08001323 case 87: /* Knights Landing */
1324 knl_uncore_cpu_init();
1325 break;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001326 default:
1327 return 0;
1328 }
1329
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001330 ret = uncore_types_init(uncore_msr_uncores, true);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001331 if (ret)
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001332 goto err;
Thomas Gleixner4f089672016-02-22 22:19:09 +00001333
1334 ret = uncore_msr_pmus_register();
1335 if (ret)
1336 goto err;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001337 return 0;
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001338err:
1339 uncore_types_exit(uncore_msr_uncores);
1340 uncore_msr_uncores = empty_uncore;
1341 return ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001342}
1343
Thomas Gleixner4f089672016-02-22 22:19:09 +00001344static void __init uncore_cpu_setup(void *dummy)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001345{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001346 uncore_cpu_starting(smp_processor_id(), true);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001347}
1348
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001349/* Lazy to avoid allocation of a few bytes for the normal case */
1350static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
1351
Thomas Gleixner4f089672016-02-22 22:19:09 +00001352static int __init uncore_cpumask_init(void)
Stephane Eranian411cf182014-02-11 16:20:07 +01001353{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001354 unsigned int cpu;
Stephane Eranian411cf182014-02-11 16:20:07 +01001355
1356 for_each_online_cpu(cpu) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001357 unsigned int pkg = topology_logical_package_id(cpu);
1358 int ret;
Stephane Eranian411cf182014-02-11 16:20:07 +01001359
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001360 if (test_and_set_bit(pkg, packages))
Stephane Eranian411cf182014-02-11 16:20:07 +01001361 continue;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001362 /*
1363 * The first online cpu of each package takes the refcounts
1364 * for all other online cpus in that package.
1365 */
1366 ret = uncore_cpu_prepare(cpu);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001367 if (ret)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001368 return ret;
Stephane Eranian411cf182014-02-11 16:20:07 +01001369 uncore_event_init_cpu(cpu);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001370 smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
Stephane Eranian411cf182014-02-11 16:20:07 +01001371 }
Linus Torvalds467a9e12014-04-07 14:55:46 -07001372 __register_cpu_notifier(&uncore_cpu_nb);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001373 return 0;
Stephane Eranian411cf182014-02-11 16:20:07 +01001374}
1375
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001376static int __init intel_uncore_init(void)
1377{
1378 int ret;
1379
1380 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1381 return -ENODEV;
1382
Yan, Zhenga05123b2012-08-21 17:08:37 +08001383 if (cpu_has_hypervisor)
1384 return -ENODEV;
1385
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001386 max_packages = topology_max_packages();
1387
Yan, Zheng14371cc2012-06-15 14:31:36 +08001388 ret = uncore_pci_init();
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001389 if (ret)
Thomas Gleixner4f089672016-02-22 22:19:09 +00001390 return ret;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001391 ret = uncore_cpu_init();
Thomas Gleixner4f089672016-02-22 22:19:09 +00001392 if (ret)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001393 goto err;
1394
1395 cpu_notifier_register_begin();
Thomas Gleixner4f089672016-02-22 22:19:09 +00001396 ret = uncore_cpumask_init();
1397 if (ret)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001398 goto err;
1399 cpu_notifier_register_done();
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001400 return 0;
Thomas Gleixner4f089672016-02-22 22:19:09 +00001401
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001402err:
Thomas Gleixner7b672d62016-02-22 22:19:17 +00001403 /* Undo box->init_box() */
1404 on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001405 uncore_types_exit(uncore_msr_uncores);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001406 uncore_pci_exit();
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001407 cpu_notifier_register_done();
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001408 return ret;
1409}
1410device_initcall(intel_uncore_init);