blob: c16c99bc2a1096435d93959ccc91105a0b1686e3 [file] [log] [blame]
Jacob Shinc43ca502013-04-19 16:34:28 -05001/*
2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
3 *
4 * Author: Jacob Shin <jacob.shin@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/perf_event.h>
12#include <linux/percpu.h>
13#include <linux/types.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/cpumask.h>
18
19#include <asm/cpufeature.h>
20#include <asm/perf_event.h>
21#include <asm/msr.h>
Suravee Suthikulpanitf448eb02018-04-27 16:34:35 -050022#include <asm/smp.h>
Jacob Shinc43ca502013-04-19 16:34:28 -050023
24#define NUM_COUNTERS_NB 4
25#define NUM_COUNTERS_L2 4
26#define MAX_COUNTERS NUM_COUNTERS_NB
27
28#define RDPMC_BASE_NB 6
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -060029#define RDPMC_BASE_LLC 10
Jacob Shinc43ca502013-04-19 16:34:28 -050030
31#define COUNTER_SHIFT 16
32
Sebastian Andrzej Siewior7d762e42016-09-09 18:08:23 +020033static HLIST_HEAD(uncore_unused_list);
34
Jacob Shinc43ca502013-04-19 16:34:28 -050035struct amd_uncore {
36 int id;
37 int refcnt;
38 int cpu;
39 int num_counters;
40 int rdpmc_base;
41 u32 msr_base;
42 cpumask_t *active_mask;
43 struct pmu *pmu;
44 struct perf_event *events[MAX_COUNTERS];
Sebastian Andrzej Siewior7d762e42016-09-09 18:08:23 +020045 struct hlist_node node;
Jacob Shinc43ca502013-04-19 16:34:28 -050046};
47
48static struct amd_uncore * __percpu *amd_uncore_nb;
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -060049static struct amd_uncore * __percpu *amd_uncore_llc;
Jacob Shinc43ca502013-04-19 16:34:28 -050050
51static struct pmu amd_nb_pmu;
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -060052static struct pmu amd_llc_pmu;
Jacob Shinc43ca502013-04-19 16:34:28 -050053
54static cpumask_t amd_nb_active_mask;
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -060055static cpumask_t amd_llc_active_mask;
Jacob Shinc43ca502013-04-19 16:34:28 -050056
57static bool is_nb_event(struct perf_event *event)
58{
59 return event->pmu->type == amd_nb_pmu.type;
60}
61
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -060062static bool is_llc_event(struct perf_event *event)
Jacob Shinc43ca502013-04-19 16:34:28 -050063{
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -060064 return event->pmu->type == amd_llc_pmu.type;
Jacob Shinc43ca502013-04-19 16:34:28 -050065}
66
67static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
68{
69 if (is_nb_event(event) && amd_uncore_nb)
70 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -060071 else if (is_llc_event(event) && amd_uncore_llc)
72 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
Jacob Shinc43ca502013-04-19 16:34:28 -050073
74 return NULL;
75}
76
77static void amd_uncore_read(struct perf_event *event)
78{
79 struct hw_perf_event *hwc = &event->hw;
80 u64 prev, new;
81 s64 delta;
82
83 /*
84 * since we do not enable counter overflow interrupts,
85 * we do not have to worry about prev_count changing on us
86 */
87
88 prev = local64_read(&hwc->prev_count);
89 rdpmcl(hwc->event_base_rdpmc, new);
90 local64_set(&hwc->prev_count, new);
91 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
92 delta >>= COUNTER_SHIFT;
93 local64_add(delta, &event->count);
94}
95
96static void amd_uncore_start(struct perf_event *event, int flags)
97{
98 struct hw_perf_event *hwc = &event->hw;
99
100 if (flags & PERF_EF_RELOAD)
101 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
102
103 hwc->state = 0;
104 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
105 perf_event_update_userpage(event);
106}
107
108static void amd_uncore_stop(struct perf_event *event, int flags)
109{
110 struct hw_perf_event *hwc = &event->hw;
111
112 wrmsrl(hwc->config_base, hwc->config);
113 hwc->state |= PERF_HES_STOPPED;
114
115 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
116 amd_uncore_read(event);
117 hwc->state |= PERF_HES_UPTODATE;
118 }
119}
120
121static int amd_uncore_add(struct perf_event *event, int flags)
122{
123 int i;
124 struct amd_uncore *uncore = event_to_amd_uncore(event);
125 struct hw_perf_event *hwc = &event->hw;
126
127 /* are we already assigned? */
128 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
129 goto out;
130
131 for (i = 0; i < uncore->num_counters; i++) {
132 if (uncore->events[i] == event) {
133 hwc->idx = i;
134 goto out;
135 }
136 }
137
138 /* if not, take the first available counter */
139 hwc->idx = -1;
140 for (i = 0; i < uncore->num_counters; i++) {
141 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
142 hwc->idx = i;
143 break;
144 }
145 }
146
147out:
148 if (hwc->idx == -1)
149 return -EBUSY;
150
151 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
152 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
153 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
154 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
155
156 if (flags & PERF_EF_START)
157 amd_uncore_start(event, PERF_EF_RELOAD);
158
159 return 0;
160}
161
162static void amd_uncore_del(struct perf_event *event, int flags)
163{
164 int i;
165 struct amd_uncore *uncore = event_to_amd_uncore(event);
166 struct hw_perf_event *hwc = &event->hw;
167
168 amd_uncore_stop(event, PERF_EF_UPDATE);
169
170 for (i = 0; i < uncore->num_counters; i++) {
171 if (cmpxchg(&uncore->events[i], event, NULL) == event)
172 break;
173 }
174
175 hwc->idx = -1;
176}
177
178static int amd_uncore_event_init(struct perf_event *event)
179{
180 struct amd_uncore *uncore;
181 struct hw_perf_event *hwc = &event->hw;
182
183 if (event->attr.type != event->pmu->type)
184 return -ENOENT;
185
186 /*
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600187 * NB and Last level cache counters (MSRs) are shared across all cores
188 * that share the same NB / Last level cache. Interrupts can be directed
189 * to a single target core, however, event counts generated by processes
190 * running on other cores cannot be masked out. So we do not support
191 * sampling and per-thread events.
Jacob Shinc43ca502013-04-19 16:34:28 -0500192 */
193 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
194 return -EINVAL;
195
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600196 /* NB and Last level cache counters do not have usr/os/guest/host bits */
Jacob Shinc43ca502013-04-19 16:34:28 -0500197 if (event->attr.exclude_user || event->attr.exclude_kernel ||
198 event->attr.exclude_host || event->attr.exclude_guest)
199 return -EINVAL;
200
201 /* and we do not enable counter overflow interrupts */
202 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
203 hwc->idx = -1;
204
205 if (event->cpu < 0)
206 return -EINVAL;
207
208 uncore = event_to_amd_uncore(event);
209 if (!uncore)
210 return -ENODEV;
211
212 /*
213 * since request can come in to any of the shared cores, we will remap
214 * to a single common cpu.
215 */
216 event->cpu = uncore->cpu;
217
218 return 0;
219}
220
221static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
222 struct device_attribute *attr,
223 char *buf)
224{
Jacob Shinc43ca502013-04-19 16:34:28 -0500225 cpumask_t *active_mask;
226 struct pmu *pmu = dev_get_drvdata(dev);
227
228 if (pmu->type == amd_nb_pmu.type)
229 active_mask = &amd_nb_active_mask;
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600230 else if (pmu->type == amd_llc_pmu.type)
231 active_mask = &amd_llc_active_mask;
Jacob Shinc43ca502013-04-19 16:34:28 -0500232 else
233 return 0;
234
Sudeep Holla5aaba362014-09-30 14:48:22 +0100235 return cpumap_print_to_pagebuf(true, buf, active_mask);
Jacob Shinc43ca502013-04-19 16:34:28 -0500236}
237static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
238
239static struct attribute *amd_uncore_attrs[] = {
240 &dev_attr_cpumask.attr,
241 NULL,
242};
243
244static struct attribute_group amd_uncore_attr_group = {
245 .attrs = amd_uncore_attrs,
246};
247
248PMU_FORMAT_ATTR(event, "config:0-7,32-35");
249PMU_FORMAT_ATTR(umask, "config:8-15");
250
251static struct attribute *amd_uncore_format_attr[] = {
252 &format_attr_event.attr,
253 &format_attr_umask.attr,
254 NULL,
255};
256
257static struct attribute_group amd_uncore_format_group = {
258 .name = "format",
259 .attrs = amd_uncore_format_attr,
260};
261
262static const struct attribute_group *amd_uncore_attr_groups[] = {
263 &amd_uncore_attr_group,
264 &amd_uncore_format_group,
265 NULL,
266};
267
268static struct pmu amd_nb_pmu = {
Peter Zijlstra31d50c52016-04-04 16:02:08 +0200269 .task_ctx_nr = perf_invalid_context,
Jacob Shinc43ca502013-04-19 16:34:28 -0500270 .attr_groups = amd_uncore_attr_groups,
271 .name = "amd_nb",
272 .event_init = amd_uncore_event_init,
273 .add = amd_uncore_add,
274 .del = amd_uncore_del,
275 .start = amd_uncore_start,
276 .stop = amd_uncore_stop,
277 .read = amd_uncore_read,
278};
279
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600280static struct pmu amd_llc_pmu = {
Peter Zijlstra31d50c52016-04-04 16:02:08 +0200281 .task_ctx_nr = perf_invalid_context,
Jacob Shinc43ca502013-04-19 16:34:28 -0500282 .attr_groups = amd_uncore_attr_groups,
283 .name = "amd_l2",
284 .event_init = amd_uncore_event_init,
285 .add = amd_uncore_add,
286 .del = amd_uncore_del,
287 .start = amd_uncore_start,
288 .stop = amd_uncore_stop,
289 .read = amd_uncore_read,
290};
291
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400292static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
Jacob Shinc43ca502013-04-19 16:34:28 -0500293{
294 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
295 cpu_to_node(cpu));
296}
297
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800298static int amd_uncore_cpu_up_prepare(unsigned int cpu)
Jacob Shinc43ca502013-04-19 16:34:28 -0500299{
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600300 struct amd_uncore *uncore_nb = NULL, *uncore_llc;
Jacob Shinc43ca502013-04-19 16:34:28 -0500301
302 if (amd_uncore_nb) {
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800303 uncore_nb = amd_uncore_alloc(cpu);
304 if (!uncore_nb)
305 goto fail;
306 uncore_nb->cpu = cpu;
307 uncore_nb->num_counters = NUM_COUNTERS_NB;
308 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
309 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
310 uncore_nb->active_mask = &amd_nb_active_mask;
311 uncore_nb->pmu = &amd_nb_pmu;
Sebastian Andrzej Siewior7d762e42016-09-09 18:08:23 +0200312 uncore_nb->id = -1;
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800313 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
Jacob Shinc43ca502013-04-19 16:34:28 -0500314 }
315
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600316 if (amd_uncore_llc) {
317 uncore_llc = amd_uncore_alloc(cpu);
318 if (!uncore_llc)
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800319 goto fail;
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600320 uncore_llc->cpu = cpu;
321 uncore_llc->num_counters = NUM_COUNTERS_L2;
322 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
323 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
324 uncore_llc->active_mask = &amd_llc_active_mask;
325 uncore_llc->pmu = &amd_llc_pmu;
326 uncore_llc->id = -1;
327 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
Jacob Shinc43ca502013-04-19 16:34:28 -0500328 }
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800329
330 return 0;
331
332fail:
Thomas Gleixner8bc91622016-02-16 23:04:41 +0100333 if (amd_uncore_nb)
334 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800335 kfree(uncore_nb);
336 return -ENOMEM;
Jacob Shinc43ca502013-04-19 16:34:28 -0500337}
338
339static struct amd_uncore *
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400340amd_uncore_find_online_sibling(struct amd_uncore *this,
341 struct amd_uncore * __percpu *uncores)
Jacob Shinc43ca502013-04-19 16:34:28 -0500342{
343 unsigned int cpu;
344 struct amd_uncore *that;
345
346 for_each_online_cpu(cpu) {
347 that = *per_cpu_ptr(uncores, cpu);
348
349 if (!that)
350 continue;
351
352 if (this == that)
353 continue;
354
355 if (this->id == that->id) {
Sebastian Andrzej Siewior7d762e42016-09-09 18:08:23 +0200356 hlist_add_head(&this->node, &uncore_unused_list);
Jacob Shinc43ca502013-04-19 16:34:28 -0500357 this = that;
358 break;
359 }
360 }
361
362 this->refcnt++;
363 return this;
364}
365
Richard Cochran96b2bd32016-07-13 17:16:13 +0000366static int amd_uncore_cpu_starting(unsigned int cpu)
Jacob Shinc43ca502013-04-19 16:34:28 -0500367{
368 unsigned int eax, ebx, ecx, edx;
369 struct amd_uncore *uncore;
370
371 if (amd_uncore_nb) {
372 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
373 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
374 uncore->id = ecx & 0xff;
375
376 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
377 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
378 }
379
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600380 if (amd_uncore_llc) {
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600381 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
Suravee Suthikulpanitf448eb02018-04-27 16:34:35 -0500382 uncore->id = per_cpu(cpu_llc_id, cpu);
Jacob Shinc43ca502013-04-19 16:34:28 -0500383
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600384 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
385 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
Jacob Shinc43ca502013-04-19 16:34:28 -0500386 }
Richard Cochran96b2bd32016-07-13 17:16:13 +0000387
388 return 0;
Jacob Shinc43ca502013-04-19 16:34:28 -0500389}
390
Sebastian Andrzej Siewior7d762e42016-09-09 18:08:23 +0200391static void uncore_clean_online(void)
392{
393 struct amd_uncore *uncore;
394 struct hlist_node *n;
395
396 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
397 hlist_del(&uncore->node);
398 kfree(uncore);
399 }
400}
401
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400402static void uncore_online(unsigned int cpu,
403 struct amd_uncore * __percpu *uncores)
Jacob Shinc43ca502013-04-19 16:34:28 -0500404{
405 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
406
Sebastian Andrzej Siewior7d762e42016-09-09 18:08:23 +0200407 uncore_clean_online();
Jacob Shinc43ca502013-04-19 16:34:28 -0500408
409 if (cpu == uncore->cpu)
410 cpumask_set_cpu(cpu, uncore->active_mask);
411}
412
Richard Cochran96b2bd32016-07-13 17:16:13 +0000413static int amd_uncore_cpu_online(unsigned int cpu)
Jacob Shinc43ca502013-04-19 16:34:28 -0500414{
415 if (amd_uncore_nb)
416 uncore_online(cpu, amd_uncore_nb);
417
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600418 if (amd_uncore_llc)
419 uncore_online(cpu, amd_uncore_llc);
Richard Cochran96b2bd32016-07-13 17:16:13 +0000420
421 return 0;
Jacob Shinc43ca502013-04-19 16:34:28 -0500422}
423
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400424static void uncore_down_prepare(unsigned int cpu,
425 struct amd_uncore * __percpu *uncores)
Jacob Shinc43ca502013-04-19 16:34:28 -0500426{
427 unsigned int i;
428 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
429
430 if (this->cpu != cpu)
431 return;
432
433 /* this cpu is going down, migrate to a shared sibling if possible */
434 for_each_online_cpu(i) {
435 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
436
437 if (cpu == i)
438 continue;
439
440 if (this == that) {
441 perf_pmu_migrate_context(this->pmu, cpu, i);
442 cpumask_clear_cpu(cpu, that->active_mask);
443 cpumask_set_cpu(i, that->active_mask);
444 that->cpu = i;
445 break;
446 }
447 }
448}
449
Richard Cochran96b2bd32016-07-13 17:16:13 +0000450static int amd_uncore_cpu_down_prepare(unsigned int cpu)
Jacob Shinc43ca502013-04-19 16:34:28 -0500451{
452 if (amd_uncore_nb)
453 uncore_down_prepare(cpu, amd_uncore_nb);
454
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600455 if (amd_uncore_llc)
456 uncore_down_prepare(cpu, amd_uncore_llc);
Richard Cochran96b2bd32016-07-13 17:16:13 +0000457
458 return 0;
Jacob Shinc43ca502013-04-19 16:34:28 -0500459}
460
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400461static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
Jacob Shinc43ca502013-04-19 16:34:28 -0500462{
463 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
464
465 if (cpu == uncore->cpu)
466 cpumask_clear_cpu(cpu, uncore->active_mask);
467
468 if (!--uncore->refcnt)
469 kfree(uncore);
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800470 *per_cpu_ptr(uncores, cpu) = NULL;
Jacob Shinc43ca502013-04-19 16:34:28 -0500471}
472
Richard Cochran96b2bd32016-07-13 17:16:13 +0000473static int amd_uncore_cpu_dead(unsigned int cpu)
Jacob Shinc43ca502013-04-19 16:34:28 -0500474{
475 if (amd_uncore_nb)
476 uncore_dead(cpu, amd_uncore_nb);
477
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600478 if (amd_uncore_llc)
479 uncore_dead(cpu, amd_uncore_llc);
Jacob Shinc43ca502013-04-19 16:34:28 -0500480
Richard Cochran96b2bd32016-07-13 17:16:13 +0000481 return 0;
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800482}
483
Jacob Shinc43ca502013-04-19 16:34:28 -0500484static int __init amd_uncore_init(void)
485{
Jacob Shinc43ca502013-04-19 16:34:28 -0500486 int ret = -ENODEV;
487
488 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800489 goto fail_nodev;
Jacob Shinc43ca502013-04-19 16:34:28 -0500490
Borislav Petkov362f9242015-12-07 10:39:41 +0100491 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800492 goto fail_nodev;
Jacob Shinc43ca502013-04-19 16:34:28 -0500493
Borislav Petkov362f9242015-12-07 10:39:41 +0100494 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
Jacob Shinc43ca502013-04-19 16:34:28 -0500495 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800496 if (!amd_uncore_nb) {
497 ret = -ENOMEM;
498 goto fail_nb;
499 }
500 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
501 if (ret)
502 goto fail_nb;
Jacob Shinc43ca502013-04-19 16:34:28 -0500503
Chen Yucong1b74dde2016-02-02 11:45:02 +0800504 pr_info("perf: AMD NB counters detected\n");
Jacob Shinc43ca502013-04-19 16:34:28 -0500505 ret = 0;
506 }
507
Borislav Petkov362f9242015-12-07 10:39:41 +0100508 if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600509 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
510 if (!amd_uncore_llc) {
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800511 ret = -ENOMEM;
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600512 goto fail_llc;
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800513 }
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600514 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800515 if (ret)
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600516 goto fail_llc;
Jacob Shinc43ca502013-04-19 16:34:28 -0500517
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600518 pr_info("perf: AMD LLC counters detected\n");
Jacob Shinc43ca502013-04-19 16:34:28 -0500519 ret = 0;
520 }
521
Richard Cochran96b2bd32016-07-13 17:16:13 +0000522 /*
523 * Install callbacks. Core will call them for each online cpu.
524 */
525 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
526 "PERF_X86_AMD_UNCORE_PREP",
527 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600528 goto fail_llc;
Jacob Shinc43ca502013-04-19 16:34:28 -0500529
Richard Cochran96b2bd32016-07-13 17:16:13 +0000530 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
531 "AP_PERF_X86_AMD_UNCORE_STARTING",
532 amd_uncore_cpu_starting, NULL))
533 goto fail_prep;
534 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
535 "AP_PERF_X86_AMD_UNCORE_ONLINE",
536 amd_uncore_cpu_online,
537 amd_uncore_cpu_down_prepare))
538 goto fail_start;
Jacob Shinc43ca502013-04-19 16:34:28 -0500539 return 0;
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800540
Richard Cochran96b2bd32016-07-13 17:16:13 +0000541fail_start:
542 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
543fail_prep:
544 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600545fail_llc:
Borislav Petkov362f9242015-12-07 10:39:41 +0100546 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800547 perf_pmu_unregister(&amd_nb_pmu);
Janakarajan Natarajan7bf707d2017-01-16 17:36:21 -0600548 if (amd_uncore_llc)
549 free_percpu(amd_uncore_llc);
Zhouyi Zhou503d3292014-06-11 12:09:03 +0800550fail_nb:
551 if (amd_uncore_nb)
552 free_percpu(amd_uncore_nb);
553
554fail_nodev:
555 return ret;
Jacob Shinc43ca502013-04-19 16:34:28 -0500556}
557device_initcall(amd_uncore_init);