blob: 096fc0de1bcd03f171a0ff82e8978b2b7bee148d [file] [log] [blame]
Borislav Petkov6bcb2db2016-02-10 10:55:15 +01001#include "uncore.h"
Yan, Zheng087bfbb2012-06-15 14:31:34 +08002
3static struct intel_uncore_type *empty_uncore[] = { NULL, };
Yan, Zheng514b2342014-07-30 15:22:12 +08004struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
5struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
Yan, Zheng14371cc2012-06-15 14:31:36 +08006
Yan, Zheng514b2342014-07-30 15:22:12 +08007static bool pcidrv_registered;
8struct pci_driver *uncore_pci_driver;
9/* pci bus to socket mapping */
Taku Izumi712df652015-09-24 21:10:21 +090010DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
11struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +000012struct pci_extra_dev *uncore_extra_pci_dev;
13static int max_packages;
Yan, Zheng899396c2013-08-07 14:17:23 +080014
Yan, Zheng087bfbb2012-06-15 14:31:34 +080015/* mask of cpus that collect uncore events */
16static cpumask_t uncore_cpu_mask;
17
18/* constraint for the fixed counter */
Yan, Zheng514b2342014-07-30 15:22:12 +080019static struct event_constraint uncore_constraint_fixed =
Yan, Zheng087bfbb2012-06-15 14:31:34 +080020 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
Yan, Zheng514b2342014-07-30 15:22:12 +080021struct event_constraint uncore_constraint_empty =
Yan, Zheng6a679432012-07-04 14:00:15 +080022 EVENT_CONSTRAINT(0, 0, 0);
Yan, Zheng087bfbb2012-06-15 14:31:34 +080023
Thomas Gleixner1384c702016-02-22 22:19:13 +000024static int uncore_pcibus_to_physid(struct pci_bus *bus)
Taku Izumi712df652015-09-24 21:10:21 +090025{
26 struct pci2phy_map *map;
27 int phys_id = -1;
28
29 raw_spin_lock(&pci2phy_map_lock);
30 list_for_each_entry(map, &pci2phy_map_head, list) {
31 if (map->segment == pci_domain_nr(bus)) {
32 phys_id = map->pbus_to_physid[bus->number];
33 break;
34 }
35 }
36 raw_spin_unlock(&pci2phy_map_lock);
37
38 return phys_id;
39}
40
Thomas Gleixner4f089672016-02-22 22:19:09 +000041static void uncore_free_pcibus_map(void)
42{
43 struct pci2phy_map *map, *tmp;
44
45 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
46 list_del(&map->list);
47 kfree(map);
48 }
49}
50
Taku Izumi712df652015-09-24 21:10:21 +090051struct pci2phy_map *__find_pci2phy_map(int segment)
52{
53 struct pci2phy_map *map, *alloc = NULL;
54 int i;
55
56 lockdep_assert_held(&pci2phy_map_lock);
57
58lookup:
59 list_for_each_entry(map, &pci2phy_map_head, list) {
60 if (map->segment == segment)
61 goto end;
62 }
63
64 if (!alloc) {
65 raw_spin_unlock(&pci2phy_map_lock);
66 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
67 raw_spin_lock(&pci2phy_map_lock);
68
69 if (!alloc)
70 return NULL;
71
72 goto lookup;
73 }
74
75 map = alloc;
76 alloc = NULL;
77 map->segment = segment;
78 for (i = 0; i < 256; i++)
79 map->pbus_to_physid[i] = -1;
80 list_add_tail(&map->list, &pci2phy_map_head);
81
82end:
83 kfree(alloc);
84 return map;
85}
86
Yan, Zheng514b2342014-07-30 15:22:12 +080087ssize_t uncore_event_show(struct kobject *kobj,
88 struct kobj_attribute *attr, char *buf)
89{
90 struct uncore_event_desc *event =
91 container_of(attr, struct uncore_event_desc, attr);
92 return sprintf(buf, "%s", event->config);
93}
94
Yan, Zheng514b2342014-07-30 15:22:12 +080095struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
Stephane Eranian001e4132014-02-11 16:20:11 +010096{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +000097 return pmu->boxes[topology_logical_package_id(cpu)];
Stephane Eranian001e4132014-02-11 16:20:11 +010098}
99
Yan, Zheng514b2342014-07-30 15:22:12 +0800100u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng254298c2012-07-05 14:32:17 +0800101{
102 u64 count;
103
104 rdmsrl(event->hw.event_base, count);
105
106 return count;
107}
108
109/*
110 * generic get constraint function for shared match/mask registers.
111 */
Yan, Zheng514b2342014-07-30 15:22:12 +0800112struct event_constraint *
Yan, Zheng254298c2012-07-05 14:32:17 +0800113uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
114{
115 struct intel_uncore_extra_reg *er;
116 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
117 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
118 unsigned long flags;
119 bool ok = false;
120
121 /*
122 * reg->alloc can be set due to existing state, so for fake box we
123 * need to ignore this, otherwise we might fail to allocate proper
124 * fake state for this extra reg constraint.
125 */
126 if (reg1->idx == EXTRA_REG_NONE ||
127 (!uncore_box_is_fake(box) && reg1->alloc))
128 return NULL;
129
130 er = &box->shared_regs[reg1->idx];
131 raw_spin_lock_irqsave(&er->lock, flags);
132 if (!atomic_read(&er->ref) ||
133 (er->config1 == reg1->config && er->config2 == reg2->config)) {
134 atomic_inc(&er->ref);
135 er->config1 = reg1->config;
136 er->config2 = reg2->config;
137 ok = true;
138 }
139 raw_spin_unlock_irqrestore(&er->lock, flags);
140
141 if (ok) {
142 if (!uncore_box_is_fake(box))
143 reg1->alloc = 1;
144 return NULL;
145 }
146
Yan, Zheng514b2342014-07-30 15:22:12 +0800147 return &uncore_constraint_empty;
Yan, Zheng254298c2012-07-05 14:32:17 +0800148}
149
Yan, Zheng514b2342014-07-30 15:22:12 +0800150void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng254298c2012-07-05 14:32:17 +0800151{
152 struct intel_uncore_extra_reg *er;
153 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
154
155 /*
156 * Only put constraint if extra reg was actually allocated. Also
157 * takes care of event which do not use an extra shared reg.
158 *
159 * Also, if this is a fake box we shouldn't touch any event state
160 * (reg->alloc) and we don't care about leaving inconsistent box
161 * state either since it will be thrown out.
162 */
163 if (uncore_box_is_fake(box) || !reg1->alloc)
164 return;
165
166 er = &box->shared_regs[reg1->idx];
167 atomic_dec(&er->ref);
168 reg1->alloc = 0;
169}
170
Yan, Zheng514b2342014-07-30 15:22:12 +0800171u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
Yan, Zheng46bdd902013-04-16 19:51:06 +0800172{
173 struct intel_uncore_extra_reg *er;
174 unsigned long flags;
175 u64 config;
176
177 er = &box->shared_regs[idx];
178
179 raw_spin_lock_irqsave(&er->lock, flags);
180 config = er->config;
181 raw_spin_unlock_irqrestore(&er->lock, flags);
182
183 return config;
184}
185
Thomas Gleixner12297352016-02-22 22:19:12 +0000186static void uncore_assign_hw_event(struct intel_uncore_box *box,
187 struct perf_event *event, int idx)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800188{
189 struct hw_perf_event *hwc = &event->hw;
190
191 hwc->idx = idx;
192 hwc->last_tag = ++box->tags[idx];
193
194 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
Yan, Zheng14371cc2012-06-15 14:31:36 +0800195 hwc->event_base = uncore_fixed_ctr(box);
196 hwc->config_base = uncore_fixed_ctl(box);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800197 return;
198 }
199
Yan, Zheng14371cc2012-06-15 14:31:36 +0800200 hwc->config_base = uncore_event_ctl(box, hwc->idx);
201 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800202}
203
Yan, Zheng514b2342014-07-30 15:22:12 +0800204void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800205{
206 u64 prev_count, new_count, delta;
207 int shift;
208
209 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
210 shift = 64 - uncore_fixed_ctr_bits(box);
211 else
212 shift = 64 - uncore_perf_ctr_bits(box);
213
214 /* the hrtimer might modify the previous event value */
215again:
216 prev_count = local64_read(&event->hw.prev_count);
217 new_count = uncore_read_counter(box, event);
218 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
219 goto again;
220
221 delta = (new_count << shift) - (prev_count << shift);
222 delta >>= shift;
223
224 local64_add(delta, &event->count);
225}
226
227/*
228 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
229 * for SandyBridge. So we use hrtimer to periodically poll the counter
230 * to avoid overflow.
231 */
232static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
233{
234 struct intel_uncore_box *box;
Stephane Eranianced2efb2014-02-11 16:20:13 +0100235 struct perf_event *event;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800236 unsigned long flags;
237 int bit;
238
239 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
240 if (!box->n_active || box->cpu != smp_processor_id())
241 return HRTIMER_NORESTART;
242 /*
243 * disable local interrupt to prevent uncore_pmu_event_start/stop
244 * to interrupt the update process
245 */
246 local_irq_save(flags);
247
Stephane Eranianced2efb2014-02-11 16:20:13 +0100248 /*
249 * handle boxes with an active event list as opposed to active
250 * counters
251 */
252 list_for_each_entry(event, &box->active_list, active_entry) {
253 uncore_perf_event_update(box, event);
254 }
255
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800256 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
257 uncore_perf_event_update(box, box->events[bit]);
258
259 local_irq_restore(flags);
260
Stephane Eranian79859cc2014-02-11 16:20:10 +0100261 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800262 return HRTIMER_RESTART;
263}
264
Yan, Zheng514b2342014-07-30 15:22:12 +0800265void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800266{
Thomas Gleixner576b0702015-04-14 21:09:01 +0000267 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
268 HRTIMER_MODE_REL_PINNED);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800269}
270
Yan, Zheng514b2342014-07-30 15:22:12 +0800271void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800272{
273 hrtimer_cancel(&box->hrtimer);
274}
275
276static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
277{
278 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
279 box->hrtimer.function = uncore_pmu_hrtimer;
280}
281
Thomas Gleixner12297352016-02-22 22:19:12 +0000282static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
283 int node)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800284{
Thomas Gleixner12297352016-02-22 22:19:12 +0000285 int i, size, numshared = type->num_shared_regs ;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800286 struct intel_uncore_box *box;
287
Thomas Gleixner12297352016-02-22 22:19:12 +0000288 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
Yan, Zheng6a679432012-07-04 14:00:15 +0800289
Yan, Zheng73c44272013-09-17 14:48:13 +0800290 box = kzalloc_node(size, GFP_KERNEL, node);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800291 if (!box)
292 return NULL;
293
Thomas Gleixner12297352016-02-22 22:19:12 +0000294 for (i = 0; i < numshared; i++)
Yan, Zheng6a679432012-07-04 14:00:15 +0800295 raw_spin_lock_init(&box->shared_regs[i].lock);
296
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800297 uncore_pmu_init_hrtimer(box);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800298 box->cpu = -1;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000299 box->pci_phys_id = -1;
300 box->pkgid = -1;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800301
Stephane Eranian79859cc2014-02-11 16:20:10 +0100302 /* set default hrtimer timeout */
303 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
304
Stephane Eranianced2efb2014-02-11 16:20:13 +0100305 INIT_LIST_HEAD(&box->active_list);
306
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800307 return box;
308}
309
Jiri Olsaaf915682014-12-10 21:23:50 +0100310/*
311 * Using uncore_pmu_event_init pmu event_init callback
312 * as a detection point for uncore events.
313 */
314static int uncore_pmu_event_init(struct perf_event *event);
315
316static bool is_uncore_event(struct perf_event *event)
317{
318 return event->pmu->event_init == uncore_pmu_event_init;
319}
320
Yan, Zheng254298c2012-07-05 14:32:17 +0800321static int
Thomas Gleixner12297352016-02-22 22:19:12 +0000322uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
323 bool dogrp)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800324{
325 struct perf_event *event;
326 int n, max_count;
327
328 max_count = box->pmu->type->num_counters;
329 if (box->pmu->type->fixed_ctl)
330 max_count++;
331
332 if (box->n_events >= max_count)
333 return -EINVAL;
334
335 n = box->n_events;
Jiri Olsaaf915682014-12-10 21:23:50 +0100336
337 if (is_uncore_event(leader)) {
338 box->event_list[n] = leader;
339 n++;
340 }
341
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800342 if (!dogrp)
343 return n;
344
345 list_for_each_entry(event, &leader->sibling_list, group_entry) {
Jiri Olsaaf915682014-12-10 21:23:50 +0100346 if (!is_uncore_event(event) ||
347 event->state <= PERF_EVENT_STATE_OFF)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800348 continue;
349
350 if (n >= max_count)
351 return -EINVAL;
352
353 box->event_list[n] = event;
354 n++;
355 }
356 return n;
357}
358
359static struct event_constraint *
Yan, Zheng254298c2012-07-05 14:32:17 +0800360uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800361{
Yan, Zheng6a679432012-07-04 14:00:15 +0800362 struct intel_uncore_type *type = box->pmu->type;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800363 struct event_constraint *c;
364
Yan, Zheng6a679432012-07-04 14:00:15 +0800365 if (type->ops->get_constraint) {
366 c = type->ops->get_constraint(box, event);
367 if (c)
368 return c;
369 }
370
Stephane Eraniandbc33f72013-09-09 12:53:50 -0700371 if (event->attr.config == UNCORE_FIXED_EVENT)
Yan, Zheng514b2342014-07-30 15:22:12 +0800372 return &uncore_constraint_fixed;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800373
374 if (type->constraints) {
375 for_each_event_constraint(c, type->constraints) {
376 if ((event->hw.config & c->cmask) == c->code)
377 return c;
378 }
379 }
380
381 return &type->unconstrainted;
382}
383
Thomas Gleixner12297352016-02-22 22:19:12 +0000384static void uncore_put_event_constraint(struct intel_uncore_box *box,
385 struct perf_event *event)
Yan, Zheng6a679432012-07-04 14:00:15 +0800386{
387 if (box->pmu->type->ops->put_constraint)
388 box->pmu->type->ops->put_constraint(box, event);
389}
390
Yan, Zheng254298c2012-07-05 14:32:17 +0800391static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800392{
393 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
Andrew Hunter43b457802013-05-23 11:07:03 -0700394 struct event_constraint *c;
Yan, Zheng6a679432012-07-04 14:00:15 +0800395 int i, wmin, wmax, ret = 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800396 struct hw_perf_event *hwc;
397
398 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
399
400 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
Yan, Zheng6a679432012-07-04 14:00:15 +0800401 c = uncore_get_event_constraint(box, box->event_list[i]);
Peter Zijlstrab371b592015-05-21 10:57:13 +0200402 box->event_constraint[i] = c;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800403 wmin = min(wmin, c->weight);
404 wmax = max(wmax, c->weight);
405 }
406
407 /* fastpath, try to reuse previous register */
408 for (i = 0; i < n; i++) {
409 hwc = &box->event_list[i]->hw;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200410 c = box->event_constraint[i];
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800411
412 /* never assigned */
413 if (hwc->idx == -1)
414 break;
415
416 /* constraint still honored */
417 if (!test_bit(hwc->idx, c->idxmsk))
418 break;
419
420 /* not already used */
421 if (test_bit(hwc->idx, used_mask))
422 break;
423
424 __set_bit(hwc->idx, used_mask);
Yan, Zheng6a679432012-07-04 14:00:15 +0800425 if (assign)
426 assign[i] = hwc->idx;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800427 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800428 /* slow path */
Yan, Zheng6a679432012-07-04 14:00:15 +0800429 if (i != n)
Peter Zijlstrab371b592015-05-21 10:57:13 +0200430 ret = perf_assign_events(box->event_constraint, n,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200431 wmin, wmax, n, assign);
Yan, Zheng6a679432012-07-04 14:00:15 +0800432
433 if (!assign || ret) {
434 for (i = 0; i < n; i++)
435 uncore_put_event_constraint(box, box->event_list[i]);
436 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800437 return ret ? -EINVAL : 0;
438}
439
440static void uncore_pmu_event_start(struct perf_event *event, int flags)
441{
442 struct intel_uncore_box *box = uncore_event_to_box(event);
443 int idx = event->hw.idx;
444
445 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
446 return;
447
448 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
449 return;
450
451 event->hw.state = 0;
452 box->events[idx] = event;
453 box->n_active++;
454 __set_bit(idx, box->active_mask);
455
456 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
457 uncore_enable_event(box, event);
458
459 if (box->n_active == 1) {
460 uncore_enable_box(box);
461 uncore_pmu_start_hrtimer(box);
462 }
463}
464
465static void uncore_pmu_event_stop(struct perf_event *event, int flags)
466{
467 struct intel_uncore_box *box = uncore_event_to_box(event);
468 struct hw_perf_event *hwc = &event->hw;
469
470 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
471 uncore_disable_event(box, event);
472 box->n_active--;
473 box->events[hwc->idx] = NULL;
474 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
475 hwc->state |= PERF_HES_STOPPED;
476
477 if (box->n_active == 0) {
478 uncore_disable_box(box);
479 uncore_pmu_cancel_hrtimer(box);
480 }
481 }
482
483 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
484 /*
485 * Drain the remaining delta count out of a event
486 * that we are disabling:
487 */
488 uncore_perf_event_update(box, event);
489 hwc->state |= PERF_HES_UPTODATE;
490 }
491}
492
493static int uncore_pmu_event_add(struct perf_event *event, int flags)
494{
495 struct intel_uncore_box *box = uncore_event_to_box(event);
496 struct hw_perf_event *hwc = &event->hw;
497 int assign[UNCORE_PMC_IDX_MAX];
498 int i, n, ret;
499
500 if (!box)
501 return -ENODEV;
502
503 ret = n = uncore_collect_events(box, event, false);
504 if (ret < 0)
505 return ret;
506
507 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
508 if (!(flags & PERF_EF_START))
509 hwc->state |= PERF_HES_ARCH;
510
511 ret = uncore_assign_events(box, assign, n);
512 if (ret)
513 return ret;
514
515 /* save events moving to new counters */
516 for (i = 0; i < box->n_events; i++) {
517 event = box->event_list[i];
518 hwc = &event->hw;
519
520 if (hwc->idx == assign[i] &&
521 hwc->last_tag == box->tags[assign[i]])
522 continue;
523 /*
524 * Ensure we don't accidentally enable a stopped
525 * counter simply because we rescheduled.
526 */
527 if (hwc->state & PERF_HES_STOPPED)
528 hwc->state |= PERF_HES_ARCH;
529
530 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
531 }
532
533 /* reprogram moved events into new counters */
534 for (i = 0; i < n; i++) {
535 event = box->event_list[i];
536 hwc = &event->hw;
537
538 if (hwc->idx != assign[i] ||
539 hwc->last_tag != box->tags[assign[i]])
540 uncore_assign_hw_event(box, event, assign[i]);
541 else if (i < box->n_events)
542 continue;
543
544 if (hwc->state & PERF_HES_ARCH)
545 continue;
546
547 uncore_pmu_event_start(event, 0);
548 }
549 box->n_events = n;
550
551 return 0;
552}
553
554static void uncore_pmu_event_del(struct perf_event *event, int flags)
555{
556 struct intel_uncore_box *box = uncore_event_to_box(event);
557 int i;
558
559 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
560
561 for (i = 0; i < box->n_events; i++) {
562 if (event == box->event_list[i]) {
Yan, Zheng6a679432012-07-04 14:00:15 +0800563 uncore_put_event_constraint(box, event);
564
Thomas Gleixner12297352016-02-22 22:19:12 +0000565 for (++i; i < box->n_events; i++)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800566 box->event_list[i - 1] = box->event_list[i];
567
568 --box->n_events;
569 break;
570 }
571 }
572
573 event->hw.idx = -1;
574 event->hw.last_tag = ~0ULL;
575}
576
Yan, Zheng514b2342014-07-30 15:22:12 +0800577void uncore_pmu_event_read(struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800578{
579 struct intel_uncore_box *box = uncore_event_to_box(event);
580 uncore_perf_event_update(box, event);
581}
582
583/*
584 * validation ensures the group can be loaded onto the
585 * PMU if it was the only group available.
586 */
587static int uncore_validate_group(struct intel_uncore_pmu *pmu,
588 struct perf_event *event)
589{
590 struct perf_event *leader = event->group_leader;
591 struct intel_uncore_box *fake_box;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800592 int ret = -EINVAL, n;
593
Yan, Zheng73c44272013-09-17 14:48:13 +0800594 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800595 if (!fake_box)
596 return -ENOMEM;
597
598 fake_box->pmu = pmu;
599 /*
600 * the event is not yet connected with its
601 * siblings therefore we must first collect
602 * existing siblings, then add the new event
603 * before we can simulate the scheduling
604 */
605 n = uncore_collect_events(fake_box, leader, true);
606 if (n < 0)
607 goto out;
608
609 fake_box->n_events = n;
610 n = uncore_collect_events(fake_box, event, false);
611 if (n < 0)
612 goto out;
613
614 fake_box->n_events = n;
615
Yan, Zheng6a679432012-07-04 14:00:15 +0800616 ret = uncore_assign_events(fake_box, NULL, n);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800617out:
618 kfree(fake_box);
619 return ret;
620}
621
Yan, Zheng46bdd902013-04-16 19:51:06 +0800622static int uncore_pmu_event_init(struct perf_event *event)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800623{
624 struct intel_uncore_pmu *pmu;
625 struct intel_uncore_box *box;
626 struct hw_perf_event *hwc = &event->hw;
627 int ret;
628
629 if (event->attr.type != event->pmu->type)
630 return -ENOENT;
631
632 pmu = uncore_event_to_pmu(event);
633 /* no device found for this pmu */
634 if (pmu->func_id < 0)
635 return -ENOENT;
636
637 /*
638 * Uncore PMU does measure at all privilege level all the time.
639 * So it doesn't make sense to specify any exclude bits.
640 */
641 if (event->attr.exclude_user || event->attr.exclude_kernel ||
642 event->attr.exclude_hv || event->attr.exclude_idle)
643 return -EINVAL;
644
645 /* Sampling not supported yet */
646 if (hwc->sample_period)
647 return -EINVAL;
648
649 /*
650 * Place all uncore events for a particular physical package
651 * onto a single cpu
652 */
653 if (event->cpu < 0)
654 return -EINVAL;
655 box = uncore_pmu_to_box(pmu, event->cpu);
656 if (!box || box->cpu < 0)
657 return -EINVAL;
658 event->cpu = box->cpu;
Thomas Gleixner1f2569f2016-02-22 22:19:14 +0000659 event->pmu_private = box;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800660
Yan, Zheng6a679432012-07-04 14:00:15 +0800661 event->hw.idx = -1;
662 event->hw.last_tag = ~0ULL;
663 event->hw.extra_reg.idx = EXTRA_REG_NONE;
Yan, Zhengebb6cc02012-08-06 13:11:21 +0800664 event->hw.branch_reg.idx = EXTRA_REG_NONE;
Yan, Zheng6a679432012-07-04 14:00:15 +0800665
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800666 if (event->attr.config == UNCORE_FIXED_EVENT) {
667 /* no fixed counter */
668 if (!pmu->type->fixed_ctl)
669 return -EINVAL;
670 /*
671 * if there is only one fixed counter, only the first pmu
672 * can access the fixed counter
673 */
674 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
675 return -EINVAL;
Stephane Eraniandbc33f72013-09-09 12:53:50 -0700676
677 /* fixed counters have event field hardcoded to zero */
678 hwc->config = 0ULL;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800679 } else {
680 hwc->config = event->attr.config & pmu->type->event_mask;
Yan, Zheng6a679432012-07-04 14:00:15 +0800681 if (pmu->type->ops->hw_config) {
682 ret = pmu->type->ops->hw_config(box, event);
683 if (ret)
684 return ret;
685 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800686 }
687
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800688 if (event->group_leader != event)
689 ret = uncore_validate_group(pmu, event);
690 else
691 ret = 0;
692
693 return ret;
694}
695
Yan, Zheng314d9f62012-09-10 15:53:49 +0800696static ssize_t uncore_get_attr_cpumask(struct device *dev,
697 struct device_attribute *attr, char *buf)
698{
Sudeep Holla5aaba362014-09-30 14:48:22 +0100699 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
Yan, Zheng314d9f62012-09-10 15:53:49 +0800700}
701
702static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
703
704static struct attribute *uncore_pmu_attrs[] = {
705 &dev_attr_cpumask.attr,
706 NULL,
707};
708
709static struct attribute_group uncore_pmu_attr_group = {
710 .attrs = uncore_pmu_attrs,
711};
712
Andi Kleena08b6762014-08-29 10:20:58 -0700713static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800714{
715 int ret;
716
Stephane Eraniand64b25b2014-02-11 16:20:08 +0100717 if (!pmu->type->pmu) {
718 pmu->pmu = (struct pmu) {
719 .attr_groups = pmu->type->attr_groups,
720 .task_ctx_nr = perf_invalid_context,
721 .event_init = uncore_pmu_event_init,
722 .add = uncore_pmu_event_add,
723 .del = uncore_pmu_event_del,
724 .start = uncore_pmu_event_start,
725 .stop = uncore_pmu_event_stop,
726 .read = uncore_pmu_event_read,
727 };
728 } else {
729 pmu->pmu = *pmu->type->pmu;
730 pmu->pmu.attr_groups = pmu->type->attr_groups;
731 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800732
733 if (pmu->type->num_boxes == 1) {
734 if (strlen(pmu->type->name) > 0)
735 sprintf(pmu->name, "uncore_%s", pmu->type->name);
736 else
737 sprintf(pmu->name, "uncore");
738 } else {
739 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
740 pmu->pmu_idx);
741 }
742
743 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
Thomas Gleixner4f089672016-02-22 22:19:09 +0000744 if (!ret)
745 pmu->registered = true;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800746 return ret;
747}
748
Thomas Gleixner4f089672016-02-22 22:19:09 +0000749static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
750{
751 if (!pmu->registered)
752 return;
753 perf_pmu_unregister(&pmu->pmu);
754 pmu->registered = false;
755}
756
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000757static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
758{
759 int pkg;
760
761 for (pkg = 0; pkg < max_packages; pkg++)
762 kfree(pmu->boxes[pkg]);
763 kfree(pmu->boxes);
764}
765
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800766static void __init uncore_type_exit(struct intel_uncore_type *type)
767{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000768 struct intel_uncore_pmu *pmu = type->pmus;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800769 int i;
770
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000771 if (pmu) {
772 for (i = 0; i < type->num_boxes; i++, pmu++) {
773 uncore_pmu_unregister(pmu);
774 uncore_free_boxes(pmu);
Thomas Gleixner4f089672016-02-22 22:19:09 +0000775 }
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000776 kfree(type->pmus);
777 type->pmus = NULL;
778 }
Yan, Zheng314d9f62012-09-10 15:53:49 +0800779 kfree(type->events_group);
780 type->events_group = NULL;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800781}
782
Borislav Petkovcffa59b2012-08-02 12:55:27 +0200783static void __init uncore_types_exit(struct intel_uncore_type **types)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800784{
Thomas Gleixner12297352016-02-22 22:19:12 +0000785 for (; *types; types++)
786 uncore_type_exit(*types);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800787}
788
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000789static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800790{
791 struct intel_uncore_pmu *pmus;
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200792 struct attribute_group *attr_group;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800793 struct attribute **attrs;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000794 size_t size;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800795 int i, j;
796
797 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
798 if (!pmus)
799 return -ENOMEM;
800
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000801 size = max_packages * sizeof(struct intel_uncore_box *);
Dave Jonesb7b48392014-03-06 12:20:28 -0500802
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000803 for (i = 0; i < type->num_boxes; i++) {
804 pmus[i].func_id = setid ? i : -1;
805 pmus[i].pmu_idx = i;
806 pmus[i].type = type;
807 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
808 if (!pmus[i].boxes)
809 return -ENOMEM;
810 }
811
812 type->pmus = pmus;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800813 type->unconstrainted = (struct event_constraint)
814 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
Stephane Eranian9fac2cf2013-01-24 16:10:27 +0100815 0, type->num_counters, 0, 0);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800816
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800817 if (type->event_descs) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000818 for (i = 0; type->event_descs[i].attr.attr.name; i++);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800819
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200820 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
821 sizeof(*attr_group), GFP_KERNEL);
822 if (!attr_group)
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000823 return -ENOMEM;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800824
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200825 attrs = (struct attribute **)(attr_group + 1);
826 attr_group->name = "events";
827 attr_group->attrs = attrs;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800828
829 for (j = 0; j < i; j++)
830 attrs[j] = &type->event_descs[j].attr.attr;
831
Jan-Simon Möller1b0dac22013-04-30 12:02:33 +0200832 type->events_group = attr_group;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800833 }
834
Yan, Zheng314d9f62012-09-10 15:53:49 +0800835 type->pmu_group = &uncore_pmu_attr_group;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800836 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800837}
838
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000839static int __init
840uncore_types_init(struct intel_uncore_type **types, bool setid)
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800841{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000842 int ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800843
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000844 for (; *types; types++) {
845 ret = uncore_type_init(*types, setid);
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800846 if (ret)
Thomas Gleixnerffeda002016-02-22 22:19:09 +0000847 return ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800848 }
849 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +0800850}
851
Yan, Zheng14371cc2012-06-15 14:31:36 +0800852/*
853 * add a pci uncore device
854 */
Yan, Zheng899396c2013-08-07 14:17:23 +0800855static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800856{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000857 struct intel_uncore_type *type;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800858 struct intel_uncore_pmu *pmu;
859 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000860 int phys_id, pkg, ret;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800861
Taku Izumi712df652015-09-24 21:10:21 +0900862 phys_id = uncore_pcibus_to_physid(pdev->bus);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000863 if (phys_id < 0)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800864 return -ENODEV;
865
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000866 pkg = topology_phys_to_logical_pkg(phys_id);
867 if (WARN_ON_ONCE(pkg < 0))
868 return -EINVAL;
869
Yan, Zheng899396c2013-08-07 14:17:23 +0800870 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
Yan, Zheng514b2342014-07-30 15:22:12 +0800871 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000872
873 uncore_extra_pci_dev[pkg].dev[idx] = pdev;
Yan, Zheng899396c2013-08-07 14:17:23 +0800874 pci_set_drvdata(pdev, NULL);
875 return 0;
876 }
877
Yan, Zheng514b2342014-07-30 15:22:12 +0800878 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
Yan, Zheng14371cc2012-06-15 14:31:36 +0800879 /*
880 * for performance monitoring unit with multiple boxes,
881 * each box has a different function id.
882 */
Yan, Zheng899396c2013-08-07 14:17:23 +0800883 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
Harish Chegondi77af0032015-12-07 14:32:32 -0800884 /* Knights Landing uses a common PCI device ID for multiple instances of
885 * an uncore PMU device type. There is only one entry per device type in
886 * the knl_uncore_pci_ids table inspite of multiple devices present for
887 * some device types. Hence PCI device idx would be 0 for all devices.
888 * So increment pmu pointer to point to an unused array element.
889 */
Thomas Gleixner12297352016-02-22 22:19:12 +0000890 if (boot_cpu_data.x86_model == 87) {
Harish Chegondi77af0032015-12-07 14:32:32 -0800891 while (pmu->func_id >= 0)
892 pmu++;
Thomas Gleixner12297352016-02-22 22:19:12 +0000893 }
894
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000895 if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
896 return -EINVAL;
897
898 box = uncore_alloc_box(type, NUMA_NO_NODE);
899 if (!box)
900 return -ENOMEM;
901
Yan, Zheng899396c2013-08-07 14:17:23 +0800902 if (pmu->func_id < 0)
903 pmu->func_id = pdev->devfn;
904 else
905 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800906
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000907 atomic_inc(&box->refcnt);
908 box->pci_phys_id = phys_id;
909 box->pkgid = pkg;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800910 box->pci_dev = pdev;
911 box->pmu = pmu;
Ingo Molnar15c12472015-06-09 11:40:28 +0200912 uncore_box_init(box);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800913 pci_set_drvdata(pdev, box);
914
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000915 pmu->boxes[pkg] = box;
916 if (atomic_inc_return(&pmu->activeboxes) > 1)
Thomas Gleixner4f089672016-02-22 22:19:09 +0000917 return 0;
918
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000919 /* First active box registers the pmu */
Thomas Gleixner4f089672016-02-22 22:19:09 +0000920 ret = uncore_pmu_register(pmu);
921 if (ret) {
922 pci_set_drvdata(pdev, NULL);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000923 pmu->boxes[pkg] = NULL;
Thomas Gleixnera46195f2016-02-22 22:19:11 +0000924 uncore_box_exit(box);
Thomas Gleixner4f089672016-02-22 22:19:09 +0000925 kfree(box);
926 }
927 return ret;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800928}
929
Robert Richter357398e2012-06-20 18:39:27 +0200930static void uncore_pci_remove(struct pci_dev *pdev)
Yan, Zheng14371cc2012-06-15 14:31:36 +0800931{
932 struct intel_uncore_box *box = pci_get_drvdata(pdev);
Yan, Zheng899396c2013-08-07 14:17:23 +0800933 struct intel_uncore_pmu *pmu;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000934 int i, phys_id, pkg;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800935
Taku Izumi712df652015-09-24 21:10:21 +0900936 phys_id = uncore_pcibus_to_physid(pdev->bus);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000937 pkg = topology_phys_to_logical_pkg(phys_id);
938
Yan, Zheng899396c2013-08-07 14:17:23 +0800939 box = pci_get_drvdata(pdev);
940 if (!box) {
941 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000942 if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
943 uncore_extra_pci_dev[pkg].dev[i] = NULL;
Yan, Zheng899396c2013-08-07 14:17:23 +0800944 break;
945 }
946 }
947 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
948 return;
949 }
950
951 pmu = box->pmu;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000952 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
Yan, Zheng14371cc2012-06-15 14:31:36 +0800953 return;
954
Yan, Zhenge850f9c2013-04-16 19:51:07 +0800955 pci_set_drvdata(pdev, NULL);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000956 pmu->boxes[pkg] = NULL;
957 if (atomic_dec_return(&pmu->activeboxes) == 0)
958 uncore_pmu_unregister(pmu);
Thomas Gleixnera46195f2016-02-22 22:19:11 +0000959 uncore_box_exit(box);
Yan, Zheng14371cc2012-06-15 14:31:36 +0800960 kfree(box);
961}
962
Yan, Zheng14371cc2012-06-15 14:31:36 +0800963static int __init uncore_pci_init(void)
964{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +0000965 size_t size;
Yan, Zheng14371cc2012-06-15 14:31:36 +0800966 int ret;
967
968 switch (boot_cpu_data.x86_model) {
Yan, Zheng7c94ee22012-06-15 14:31:37 +0800969 case 45: /* Sandy Bridge-EP */
Yan, Zheng8268fdf2014-07-30 15:22:14 +0800970 ret = snbep_uncore_pci_init();
Yan, Zheng7c94ee22012-06-15 14:31:37 +0800971 break;
Peter Zijlstraddcd0972014-08-12 09:15:25 +0200972 case 62: /* Ivy Bridge-EP */
973 ret = ivbep_uncore_pci_init();
Yan, Zhenge850f9c2013-04-16 19:51:07 +0800974 break;
Yan, Zhenge735b9d2014-09-04 16:08:26 -0700975 case 63: /* Haswell-EP */
976 ret = hswep_uncore_pci_init();
977 break;
Kan Liangd6980ef2015-12-03 16:00:11 -0500978 case 79: /* BDX-EP */
Kan Liang070e9882015-07-02 08:12:52 -0400979 case 86: /* BDX-DE */
980 ret = bdx_uncore_pci_init();
981 break;
Stephane Eranianb9e1ab62014-02-11 16:20:12 +0100982 case 42: /* Sandy Bridge */
Yan, Zheng92807ffd2014-07-30 15:22:13 +0800983 ret = snb_uncore_pci_init();
Stephane Eranianb9e1ab62014-02-11 16:20:12 +0100984 break;
985 case 58: /* Ivy Bridge */
Yan, Zheng92807ffd2014-07-30 15:22:13 +0800986 ret = ivb_uncore_pci_init();
Stephane Eranianb9e1ab62014-02-11 16:20:12 +0100987 break;
988 case 60: /* Haswell */
989 case 69: /* Haswell Celeron */
Yan, Zheng92807ffd2014-07-30 15:22:13 +0800990 ret = hsw_uncore_pci_init();
Stephane Eranianb9e1ab62014-02-11 16:20:12 +0100991 break;
Stephane Eraniana41f3c82015-04-23 08:56:42 +0200992 case 61: /* Broadwell */
993 ret = bdw_uncore_pci_init();
994 break;
Harish Chegondi77af0032015-12-07 14:32:32 -0800995 case 87: /* Knights Landing */
996 ret = knl_uncore_pci_init();
997 break;
Stephane Eranian0e1eb0a2016-01-07 08:25:46 +0100998 case 94: /* SkyLake */
999 ret = skl_uncore_pci_init();
1000 break;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001001 default:
1002 return 0;
1003 }
1004
Yan, Zheng92807ffd2014-07-30 15:22:13 +08001005 if (ret)
1006 return ret;
1007
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001008 size = max_packages * sizeof(struct pci_extra_dev);
1009 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1010 if (!uncore_extra_pci_dev) {
1011 ret = -ENOMEM;
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001012 goto err;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001013 }
1014
1015 ret = uncore_types_init(uncore_pci_uncores, false);
1016 if (ret)
1017 goto errtype;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001018
1019 uncore_pci_driver->probe = uncore_pci_probe;
1020 uncore_pci_driver->remove = uncore_pci_remove;
1021
1022 ret = pci_register_driver(uncore_pci_driver);
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001023 if (ret)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001024 goto errtype;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001025
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001026 pcidrv_registered = true;
1027 return 0;
1028
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001029errtype:
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001030 uncore_types_exit(uncore_pci_uncores);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001031 kfree(uncore_extra_pci_dev);
1032 uncore_extra_pci_dev = NULL;
Thomas Gleixner4f089672016-02-22 22:19:09 +00001033 uncore_free_pcibus_map();
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001034err:
1035 uncore_pci_uncores = empty_uncore;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001036 return ret;
1037}
1038
1039static void __init uncore_pci_exit(void)
1040{
1041 if (pcidrv_registered) {
1042 pcidrv_registered = false;
1043 pci_unregister_driver(uncore_pci_driver);
Yan, Zheng514b2342014-07-30 15:22:12 +08001044 uncore_types_exit(uncore_pci_uncores);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001045 kfree(uncore_extra_pci_dev);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001046 uncore_free_pcibus_map();
Yan, Zheng14371cc2012-06-15 14:31:36 +08001047 }
1048}
1049
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001050static void uncore_cpu_dying(int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001051{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001052 struct intel_uncore_type *type, **types = uncore_msr_uncores;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001053 struct intel_uncore_pmu *pmu;
1054 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001055 int i, pkg;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001056
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001057 pkg = topology_logical_package_id(cpu);
1058 for (; *types; types++) {
1059 type = *types;
1060 pmu = type->pmus;
1061 for (i = 0; i < type->num_boxes; i++, pmu++) {
1062 box = pmu->boxes[pkg];
1063 if (box && atomic_dec_return(&box->refcnt) == 0)
Thomas Gleixnera46195f2016-02-22 22:19:11 +00001064 uncore_box_exit(box);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001065 }
1066 }
1067}
1068
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001069static void uncore_cpu_starting(int cpu, bool init)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001070{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001071 struct intel_uncore_type *type, **types = uncore_msr_uncores;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001072 struct intel_uncore_pmu *pmu;
1073 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001074 int i, pkg, ncpus = 1;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001075
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001076 if (init) {
1077 /*
1078 * On init we get the number of online cpus in the package
1079 * and set refcount for all of them.
1080 */
1081 ncpus = cpumask_weight(topology_core_cpumask(cpu));
1082 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001083
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001084 pkg = topology_logical_package_id(cpu);
1085 for (; *types; types++) {
1086 type = *types;
1087 pmu = type->pmus;
1088 for (i = 0; i < type->num_boxes; i++, pmu++) {
1089 box = pmu->boxes[pkg];
1090 if (!box)
1091 continue;
1092 /* The first cpu on a package activates the box */
1093 if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
1094 uncore_box_init(box);
1095 }
1096 }
1097}
1098
1099static int uncore_cpu_prepare(int cpu)
1100{
1101 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1102 struct intel_uncore_pmu *pmu;
1103 struct intel_uncore_box *box;
1104 int i, pkg;
1105
1106 pkg = topology_logical_package_id(cpu);
1107 for (; *types; types++) {
1108 type = *types;
1109 pmu = type->pmus;
1110 for (i = 0; i < type->num_boxes; i++, pmu++) {
1111 if (pmu->boxes[pkg])
1112 continue;
1113 /* First cpu of a package allocates the box */
Yan, Zheng73c44272013-09-17 14:48:13 +08001114 box = uncore_alloc_box(type, cpu_to_node(cpu));
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001115 if (!box)
1116 return -ENOMEM;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001117 box->pmu = pmu;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001118 box->pkgid = pkg;
1119 pmu->boxes[pkg] = box;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001120 }
1121 }
1122 return 0;
1123}
1124
Thomas Gleixner12297352016-02-22 22:19:12 +00001125static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1126 int new_cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001127{
Thomas Gleixner12297352016-02-22 22:19:12 +00001128 struct intel_uncore_pmu *pmu = type->pmus;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001129 struct intel_uncore_box *box;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001130 int i, pkg;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001131
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001132 pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
Thomas Gleixner12297352016-02-22 22:19:12 +00001133 for (i = 0; i < type->num_boxes; i++, pmu++) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001134 box = pmu->boxes[pkg];
Thomas Gleixner12297352016-02-22 22:19:12 +00001135 if (!box)
1136 continue;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001137
Thomas Gleixner12297352016-02-22 22:19:12 +00001138 if (old_cpu < 0) {
1139 WARN_ON_ONCE(box->cpu != -1);
1140 box->cpu = new_cpu;
1141 continue;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001142 }
Thomas Gleixner12297352016-02-22 22:19:12 +00001143
1144 WARN_ON_ONCE(box->cpu != old_cpu);
1145 box->cpu = -1;
1146 if (new_cpu < 0)
1147 continue;
1148
1149 uncore_pmu_cancel_hrtimer(box);
1150 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1151 box->cpu = new_cpu;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001152 }
1153}
1154
Thomas Gleixner12297352016-02-22 22:19:12 +00001155static void uncore_change_context(struct intel_uncore_type **uncores,
1156 int old_cpu, int new_cpu)
1157{
1158 for (; *uncores; uncores++)
1159 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1160}
1161
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001162static void uncore_event_exit_cpu(int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001163{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001164 int target;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001165
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001166 /* Check if exiting cpu is used for collecting uncore events */
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001167 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1168 return;
1169
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001170 /* Find a new cpu to collect uncore events */
1171 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001172
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001173 /* Migrate uncore events to the new target */
1174 if (target < nr_cpu_ids)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001175 cpumask_set_cpu(target, &uncore_cpu_mask);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001176 else
1177 target = -1;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001178
Yan, Zheng514b2342014-07-30 15:22:12 +08001179 uncore_change_context(uncore_msr_uncores, cpu, target);
1180 uncore_change_context(uncore_pci_uncores, cpu, target);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001181}
1182
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001183static void uncore_event_init_cpu(int cpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001184{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001185 int target;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001186
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001187 /*
1188 * Check if there is an online cpu in the package
1189 * which collects uncore events already.
1190 */
1191 target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1192 if (target < nr_cpu_ids)
1193 return;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001194
1195 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1196
Yan, Zheng514b2342014-07-30 15:22:12 +08001197 uncore_change_context(uncore_msr_uncores, -1, cpu);
1198 uncore_change_context(uncore_pci_uncores, -1, cpu);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001199}
1200
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001201static int uncore_cpu_notifier(struct notifier_block *self,
1202 unsigned long action, void *hcpu)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001203{
1204 unsigned int cpu = (long)hcpu;
1205
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001206 switch (action & ~CPU_TASKS_FROZEN) {
1207 case CPU_UP_PREPARE:
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001208 return notifier_from_errno(uncore_cpu_prepare(cpu));
1209
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001210 case CPU_STARTING:
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001211 uncore_cpu_starting(cpu, false);
1212 case CPU_DOWN_FAILED:
1213 uncore_event_init_cpu(cpu);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001214 break;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001215
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001216 case CPU_UP_CANCELED:
1217 case CPU_DYING:
1218 uncore_cpu_dying(cpu);
1219 break;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001220
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001221 case CPU_DOWN_PREPARE:
1222 uncore_event_exit_cpu(cpu);
1223 break;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001224 }
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001225 return NOTIFY_OK;
1226}
1227
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001228static struct notifier_block uncore_cpu_nb = {
Yan, Zheng254298c2012-07-05 14:32:17 +08001229 .notifier_call = uncore_cpu_notifier,
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001230 /*
1231 * to migrate uncore events, our notifier should be executed
1232 * before perf core's notifier.
1233 */
Yan, Zheng254298c2012-07-05 14:32:17 +08001234 .priority = CPU_PRI_PERF + 1,
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001235};
1236
Thomas Gleixner4f089672016-02-22 22:19:09 +00001237static int __init type_pmu_register(struct intel_uncore_type *type)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001238{
Thomas Gleixner4f089672016-02-22 22:19:09 +00001239 int i, ret;
1240
1241 for (i = 0; i < type->num_boxes; i++) {
1242 ret = uncore_pmu_register(&type->pmus[i]);
1243 if (ret)
1244 return ret;
1245 }
1246 return 0;
1247}
1248
1249static int __init uncore_msr_pmus_register(void)
1250{
1251 struct intel_uncore_type **types = uncore_msr_uncores;
1252 int ret;
1253
Thomas Gleixner12297352016-02-22 22:19:12 +00001254 for (; *types; types++) {
1255 ret = type_pmu_register(*types);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001256 if (ret)
1257 return ret;
1258 }
1259 return 0;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001260}
1261
1262static int __init uncore_cpu_init(void)
1263{
Yan, Zhengc1e46582014-07-30 15:22:15 +08001264 int ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001265
1266 switch (boot_cpu_data.x86_model) {
Yan, Zhengfcde10e2012-06-15 14:31:35 +08001267 case 26: /* Nehalem */
1268 case 30:
1269 case 37: /* Westmere */
1270 case 44:
Yan, Zheng92807ffd2014-07-30 15:22:13 +08001271 nhm_uncore_cpu_init();
Yan, Zhengfcde10e2012-06-15 14:31:35 +08001272 break;
1273 case 42: /* Sandy Bridge */
Vince Weaver9a6bc1432013-04-29 15:52:27 -04001274 case 58: /* Ivy Bridge */
Andi Kleen3a999582015-06-14 22:57:41 -07001275 case 60: /* Haswell */
1276 case 69: /* Haswell */
1277 case 70: /* Haswell */
1278 case 61: /* Broadwell */
1279 case 71: /* Broadwell */
Yan, Zheng92807ffd2014-07-30 15:22:13 +08001280 snb_uncore_cpu_init();
Yan, Zhengfcde10e2012-06-15 14:31:35 +08001281 break;
Vince Weaver80e217e2013-04-29 15:49:28 -04001282 case 45: /* Sandy Bridge-EP */
Yan, Zheng8268fdf2014-07-30 15:22:14 +08001283 snbep_uncore_cpu_init();
Yan, Zheng7c94ee22012-06-15 14:31:37 +08001284 break;
Yan, Zhengcb37af72012-08-06 13:11:22 +08001285 case 46: /* Nehalem-EX */
Yan, Zhengcb37af72012-08-06 13:11:22 +08001286 case 47: /* Westmere-EX aka. Xeon E7 */
Yan, Zhengc1e46582014-07-30 15:22:15 +08001287 nhmex_uncore_cpu_init();
Yan, Zheng254298c2012-07-05 14:32:17 +08001288 break;
Peter Zijlstraddcd0972014-08-12 09:15:25 +02001289 case 62: /* Ivy Bridge-EP */
1290 ivbep_uncore_cpu_init();
Yan, Zhenge850f9c2013-04-16 19:51:07 +08001291 break;
Yan, Zhenge735b9d2014-09-04 16:08:26 -07001292 case 63: /* Haswell-EP */
1293 hswep_uncore_cpu_init();
1294 break;
Kan Liangd6980ef2015-12-03 16:00:11 -05001295 case 79: /* BDX-EP */
Kan Liang070e9882015-07-02 08:12:52 -04001296 case 86: /* BDX-DE */
1297 bdx_uncore_cpu_init();
1298 break;
Harish Chegondi77af0032015-12-07 14:32:32 -08001299 case 87: /* Knights Landing */
1300 knl_uncore_cpu_init();
1301 break;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001302 default:
1303 return 0;
1304 }
1305
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001306 ret = uncore_types_init(uncore_msr_uncores, true);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001307 if (ret)
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001308 goto err;
Thomas Gleixner4f089672016-02-22 22:19:09 +00001309
1310 ret = uncore_msr_pmus_register();
1311 if (ret)
1312 goto err;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001313 return 0;
Thomas Gleixnerffeda002016-02-22 22:19:09 +00001314err:
1315 uncore_types_exit(uncore_msr_uncores);
1316 uncore_msr_uncores = empty_uncore;
1317 return ret;
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001318}
1319
Thomas Gleixner4f089672016-02-22 22:19:09 +00001320static void __init uncore_cpu_setup(void *dummy)
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001321{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001322 uncore_cpu_starting(smp_processor_id(), true);
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001323}
1324
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001325/* Lazy to avoid allocation of a few bytes for the normal case */
1326static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
1327
Thomas Gleixner4f089672016-02-22 22:19:09 +00001328static int __init uncore_cpumask_init(void)
Stephane Eranian411cf182014-02-11 16:20:07 +01001329{
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001330 unsigned int cpu;
Stephane Eranian411cf182014-02-11 16:20:07 +01001331
1332 for_each_online_cpu(cpu) {
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001333 unsigned int pkg = topology_logical_package_id(cpu);
1334 int ret;
Stephane Eranian411cf182014-02-11 16:20:07 +01001335
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001336 if (test_and_set_bit(pkg, packages))
Stephane Eranian411cf182014-02-11 16:20:07 +01001337 continue;
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001338 /*
1339 * The first online cpu of each package takes the refcounts
1340 * for all other online cpus in that package.
1341 */
1342 ret = uncore_cpu_prepare(cpu);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001343 if (ret)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001344 return ret;
Stephane Eranian411cf182014-02-11 16:20:07 +01001345 uncore_event_init_cpu(cpu);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001346 smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
Stephane Eranian411cf182014-02-11 16:20:07 +01001347 }
Linus Torvalds467a9e12014-04-07 14:55:46 -07001348 __register_cpu_notifier(&uncore_cpu_nb);
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001349 return 0;
Stephane Eranian411cf182014-02-11 16:20:07 +01001350}
1351
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001352static int __init intel_uncore_init(void)
1353{
1354 int ret;
1355
1356 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1357 return -ENODEV;
1358
Yan, Zhenga05123b2012-08-21 17:08:37 +08001359 if (cpu_has_hypervisor)
1360 return -ENODEV;
1361
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001362 max_packages = topology_max_packages();
1363
Yan, Zheng14371cc2012-06-15 14:31:36 +08001364 ret = uncore_pci_init();
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001365 if (ret)
Thomas Gleixner4f089672016-02-22 22:19:09 +00001366 return ret;
Yan, Zheng14371cc2012-06-15 14:31:36 +08001367 ret = uncore_cpu_init();
Thomas Gleixner4f089672016-02-22 22:19:09 +00001368 if (ret)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001369 goto err;
1370
1371 cpu_notifier_register_begin();
Thomas Gleixner4f089672016-02-22 22:19:09 +00001372 ret = uncore_cpumask_init();
1373 if (ret)
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001374 goto err;
1375 cpu_notifier_register_done();
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001376 return 0;
Thomas Gleixner4f089672016-02-22 22:19:09 +00001377
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001378err:
Thomas Gleixner4f089672016-02-22 22:19:09 +00001379 uncore_types_exit(uncore_msr_uncores);
Thomas Gleixner4f089672016-02-22 22:19:09 +00001380 uncore_pci_exit();
Thomas Gleixnercf6d4452016-02-22 22:19:16 +00001381 cpu_notifier_register_done();
Yan, Zheng087bfbb2012-06-15 14:31:34 +08001382 return ret;
1383}
1384device_initcall(intel_uncore_init);