blob: 4b43e7f3b15801916d35e60101938e79df41b26c [file] [log] [blame]
Shannon Zhao051ff582015-12-08 15:29:06 +08001/*
2 * Copyright (C) 2015 Linaro Ltd.
3 * Author: Shannon Zhao <shannon.zhao@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/perf_event.h>
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080022#include <linux/uaccess.h>
Shannon Zhao051ff582015-12-08 15:29:06 +080023#include <asm/kvm_emulate.h>
24#include <kvm/arm_pmu.h>
Shannon Zhaob02386e2016-02-26 19:29:19 +080025#include <kvm/arm_vgic.h>
Shannon Zhao051ff582015-12-08 15:29:06 +080026
27/**
28 * kvm_pmu_get_counter_value - get PMU counter value
29 * @vcpu: The vcpu pointer
30 * @select_idx: The counter index
31 */
32u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
33{
34 u64 counter, reg, enabled, running;
35 struct kvm_pmu *pmu = &vcpu->arch.pmu;
36 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
37
38 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
39 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
40 counter = vcpu_sys_reg(vcpu, reg);
41
42 /* The real counter value is equal to the value of counter register plus
43 * the value perf event counts.
44 */
45 if (pmc->perf_event)
46 counter += perf_event_read_value(pmc->perf_event, &enabled,
47 &running);
48
49 return counter & pmc->bitmask;
50}
51
52/**
53 * kvm_pmu_set_counter_value - set PMU counter value
54 * @vcpu: The vcpu pointer
55 * @select_idx: The counter index
56 * @val: The counter value
57 */
58void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
59{
60 u64 reg;
61
62 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
63 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
64 vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
65}
Shannon Zhao96b0eeb2015-09-08 12:26:13 +080066
Shannon Zhao7f766352015-07-03 14:27:25 +080067/**
68 * kvm_pmu_stop_counter - stop PMU counter
69 * @pmc: The PMU counter pointer
70 *
71 * If this counter has been configured to monitor some event, release it here.
72 */
73static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
74{
75 u64 counter, reg;
76
77 if (pmc->perf_event) {
78 counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
79 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
80 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
81 vcpu_sys_reg(vcpu, reg) = counter;
82 perf_event_disable(pmc->perf_event);
83 perf_event_release_kernel(pmc->perf_event);
84 pmc->perf_event = NULL;
85 }
86}
87
Shannon Zhao2aa36e92015-09-11 11:30:22 +080088/**
89 * kvm_pmu_vcpu_reset - reset pmu state for cpu
90 * @vcpu: The vcpu pointer
91 *
92 */
93void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
94{
95 int i;
96 struct kvm_pmu *pmu = &vcpu->arch.pmu;
97
98 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
99 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
100 pmu->pmc[i].idx = i;
101 pmu->pmc[i].bitmask = 0xffffffffUL;
102 }
103}
104
Shannon Zhao5f0a7142015-09-11 15:18:05 +0800105/**
106 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
107 * @vcpu: The vcpu pointer
108 *
109 */
110void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
111{
112 int i;
113 struct kvm_pmu *pmu = &vcpu->arch.pmu;
114
115 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
116 struct kvm_pmc *pmc = &pmu->pmc[i];
117
118 if (pmc->perf_event) {
119 perf_event_disable(pmc->perf_event);
120 perf_event_release_kernel(pmc->perf_event);
121 pmc->perf_event = NULL;
122 }
123 }
124}
125
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800126u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
127{
128 u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
129
130 val &= ARMV8_PMU_PMCR_N_MASK;
131 if (val == 0)
132 return BIT(ARMV8_PMU_CYCLE_IDX);
133 else
134 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
135}
136
137/**
138 * kvm_pmu_enable_counter - enable selected PMU counter
139 * @vcpu: The vcpu pointer
140 * @val: the value guest writes to PMCNTENSET register
141 *
142 * Call perf_event_enable to start counting the perf event
143 */
144void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
145{
146 int i;
147 struct kvm_pmu *pmu = &vcpu->arch.pmu;
148 struct kvm_pmc *pmc;
149
150 if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
151 return;
152
153 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
154 if (!(val & BIT(i)))
155 continue;
156
157 pmc = &pmu->pmc[i];
158 if (pmc->perf_event) {
159 perf_event_enable(pmc->perf_event);
160 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
161 kvm_debug("fail to enable perf event\n");
162 }
163 }
164}
165
166/**
167 * kvm_pmu_disable_counter - disable selected PMU counter
168 * @vcpu: The vcpu pointer
169 * @val: the value guest writes to PMCNTENCLR register
170 *
171 * Call perf_event_disable to stop counting the perf event
172 */
173void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
174{
175 int i;
176 struct kvm_pmu *pmu = &vcpu->arch.pmu;
177 struct kvm_pmc *pmc;
178
179 if (!val)
180 return;
181
182 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
183 if (!(val & BIT(i)))
184 continue;
185
186 pmc = &pmu->pmc[i];
187 if (pmc->perf_event)
188 perf_event_disable(pmc->perf_event);
189 }
190}
Shannon Zhao7f766352015-07-03 14:27:25 +0800191
Shannon Zhao76d883c2015-09-08 15:03:26 +0800192static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
193{
194 u64 reg = 0;
195
Will Deacon7d4bd1d2016-04-01 12:12:22 +0100196 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
Shannon Zhao76d883c2015-09-08 15:03:26 +0800197 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
198 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
199 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
200 reg &= kvm_pmu_valid_counter_mask(vcpu);
Will Deacon7d4bd1d2016-04-01 12:12:22 +0100201 }
Shannon Zhao76d883c2015-09-08 15:03:26 +0800202
203 return reg;
204}
205
206/**
207 * kvm_pmu_overflow_set - set PMU overflow interrupt
208 * @vcpu: The vcpu pointer
209 * @val: the value guest writes to PMOVSSET register
210 */
211void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
212{
213 u64 reg;
214
215 if (val == 0)
216 return;
217
218 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
219 reg = kvm_pmu_overflow_status(vcpu);
220 if (reg != 0)
221 kvm_vcpu_kick(vcpu);
222}
223
Shannon Zhaob02386e2016-02-26 19:29:19 +0800224static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
225{
226 struct kvm_pmu *pmu = &vcpu->arch.pmu;
227 bool overflow;
228
229 if (!kvm_arm_pmu_v3_ready(vcpu))
230 return;
231
232 overflow = !!kvm_pmu_overflow_status(vcpu);
Christoffer Dall3dbbdf72017-02-01 12:51:52 +0100233 if (pmu->irq_level == overflow)
234 return;
235
236 pmu->irq_level = overflow;
237
238 if (likely(irqchip_in_kernel(vcpu->kvm))) {
239 int ret;
240 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
241 pmu->irq_num, overflow);
242 WARN_ON(ret);
Shannon Zhaob02386e2016-02-26 19:29:19 +0800243 }
244}
245
Christoffer Dall3dbbdf72017-02-01 12:51:52 +0100246bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
247{
248 struct kvm_pmu *pmu = &vcpu->arch.pmu;
249 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
250 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
251
252 if (likely(irqchip_in_kernel(vcpu->kvm)))
253 return false;
254
255 return pmu->irq_level != run_level;
256}
257
258/*
259 * Reflect the PMU overflow interrupt output level into the kvm_run structure
260 */
261void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
262{
263 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
264
265 /* Populate the timer bitmap for user space */
266 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
267 if (vcpu->arch.pmu.irq_level)
268 regs->device_irq_level |= KVM_ARM_DEV_PMU;
269}
270
Shannon Zhaob02386e2016-02-26 19:29:19 +0800271/**
272 * kvm_pmu_flush_hwstate - flush pmu state to cpu
273 * @vcpu: The vcpu pointer
274 *
275 * Check if the PMU has overflowed while we were running in the host, and inject
276 * an interrupt if that was the case.
277 */
278void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
279{
280 kvm_pmu_update_state(vcpu);
281}
282
283/**
284 * kvm_pmu_sync_hwstate - sync pmu state from cpu
285 * @vcpu: The vcpu pointer
286 *
287 * Check if the PMU has overflowed while we were running in the guest, and
288 * inject an interrupt if that was the case.
289 */
290void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
291{
292 kvm_pmu_update_state(vcpu);
293}
294
295static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
296{
297 struct kvm_pmu *pmu;
298 struct kvm_vcpu_arch *vcpu_arch;
299
300 pmc -= pmc->idx;
301 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
302 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
303 return container_of(vcpu_arch, struct kvm_vcpu, arch);
304}
305
306/**
307 * When perf event overflows, call kvm_pmu_overflow_set to set overflow status.
308 */
309static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
310 struct perf_sample_data *data,
311 struct pt_regs *regs)
312{
313 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
314 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
315 int idx = pmc->idx;
316
317 kvm_pmu_overflow_set(vcpu, BIT(idx));
318}
319
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800320/**
321 * kvm_pmu_software_increment - do software increment
322 * @vcpu: The vcpu pointer
323 * @val: the value guest writes to PMSWINC register
324 */
325void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
326{
327 int i;
328 u64 type, enable, reg;
329
330 if (val == 0)
331 return;
332
333 enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
334 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
335 if (!(val & BIT(i)))
336 continue;
337 type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
338 & ARMV8_PMU_EVTYPE_EVENT;
Wei Huangb112c842016-11-16 11:09:20 -0600339 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800340 && (enable & BIT(i))) {
341 reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
342 reg = lower_32_bits(reg);
343 vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
344 if (!reg)
345 kvm_pmu_overflow_set(vcpu, BIT(i));
346 }
347 }
348}
349
Shannon Zhao76993732015-10-28 12:10:30 +0800350/**
351 * kvm_pmu_handle_pmcr - handle PMCR register
352 * @vcpu: The vcpu pointer
353 * @val: the value guest writes to PMCR register
354 */
355void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
356{
357 struct kvm_pmu *pmu = &vcpu->arch.pmu;
358 struct kvm_pmc *pmc;
359 u64 mask;
360 int i;
361
362 mask = kvm_pmu_valid_counter_mask(vcpu);
363 if (val & ARMV8_PMU_PMCR_E) {
364 kvm_pmu_enable_counter(vcpu,
365 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
366 } else {
367 kvm_pmu_disable_counter(vcpu, mask);
368 }
369
370 if (val & ARMV8_PMU_PMCR_C)
371 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
372
373 if (val & ARMV8_PMU_PMCR_P) {
374 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
375 kvm_pmu_set_counter_value(vcpu, i, 0);
376 }
377
378 if (val & ARMV8_PMU_PMCR_LC) {
379 pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
380 pmc->bitmask = 0xffffffffffffffffUL;
381 }
382}
383
Shannon Zhao7f766352015-07-03 14:27:25 +0800384static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
385{
386 return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
387 (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
388}
389
390/**
391 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
392 * @vcpu: The vcpu pointer
393 * @data: The data guest writes to PMXEVTYPER_EL0
394 * @select_idx: The number of selected counter
395 *
396 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
397 * event with given hardware event number. Here we call perf_event API to
398 * emulate this action and create a kernel perf event for it.
399 */
400void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
401 u64 select_idx)
402{
403 struct kvm_pmu *pmu = &vcpu->arch.pmu;
404 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
405 struct perf_event *event;
406 struct perf_event_attr attr;
407 u64 eventsel, counter;
408
409 kvm_pmu_stop_counter(vcpu, pmc);
410 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
411
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800412 /* Software increment event does't need to be backed by a perf event */
Wei Huangb112c842016-11-16 11:09:20 -0600413 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
414 select_idx != ARMV8_PMU_CYCLE_IDX)
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800415 return;
416
Shannon Zhao7f766352015-07-03 14:27:25 +0800417 memset(&attr, 0, sizeof(struct perf_event_attr));
418 attr.type = PERF_TYPE_RAW;
419 attr.size = sizeof(attr);
420 attr.pinned = 1;
421 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
422 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
423 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
424 attr.exclude_hv = 1; /* Don't count EL2 events */
425 attr.exclude_host = 1; /* Don't count host events */
Wei Huangb112c842016-11-16 11:09:20 -0600426 attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
427 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
Shannon Zhao7f766352015-07-03 14:27:25 +0800428
429 counter = kvm_pmu_get_counter_value(vcpu, select_idx);
430 /* The initial sample period (overflow count) of an event. */
431 attr.sample_period = (-counter) & pmc->bitmask;
432
Shannon Zhaob02386e2016-02-26 19:29:19 +0800433 event = perf_event_create_kernel_counter(&attr, -1, current,
434 kvm_pmu_perf_overflow, pmc);
Shannon Zhao7f766352015-07-03 14:27:25 +0800435 if (IS_ERR(event)) {
436 pr_err_once("kvm: pmu event creation failed %ld\n",
437 PTR_ERR(event));
438 return;
439 }
440
441 pmc->perf_event = event;
442}
Shannon Zhao808e7382016-01-11 22:46:15 +0800443
444bool kvm_arm_support_pmu_v3(void)
445{
446 /*
447 * Check if HW_PERF_EVENTS are supported by checking the number of
448 * hardware performance counters. This could ensure the presence of
449 * a physical PMU and CONFIG_PERF_EVENT is selected.
450 */
451 return (perf_num_counters() > 0);
452}
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800453
454static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
455{
456 if (!kvm_arm_support_pmu_v3())
457 return -ENODEV;
458
Christoffer Dall6fe407f2016-09-26 18:51:47 -0700459 /*
460 * We currently require an in-kernel VGIC to use the PMU emulation,
461 * because we do not support forwarding PMU overflow interrupts to
462 * userspace yet.
463 */
464 if (!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))
465 return -ENODEV;
466
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800467 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) ||
468 !kvm_arm_pmu_irq_initialized(vcpu))
469 return -ENXIO;
470
471 if (kvm_arm_pmu_v3_ready(vcpu))
472 return -EBUSY;
473
474 kvm_pmu_vcpu_reset(vcpu);
475 vcpu->arch.pmu.ready = true;
476
477 return 0;
478}
479
Andre Przywara2defaff2016-03-07 17:32:29 +0700480#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
481
482/*
483 * For one VM the interrupt type must be same for each vcpu.
484 * As a PPI, the interrupt number is the same for all vcpus,
485 * while as an SPI it must be a separate number per vcpu.
486 */
487static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800488{
489 int i;
490 struct kvm_vcpu *vcpu;
491
492 kvm_for_each_vcpu(i, vcpu, kvm) {
493 if (!kvm_arm_pmu_irq_initialized(vcpu))
494 continue;
495
Andre Przywara2defaff2016-03-07 17:32:29 +0700496 if (irq_is_ppi(irq)) {
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800497 if (vcpu->arch.pmu.irq_num != irq)
498 return false;
499 } else {
500 if (vcpu->arch.pmu.irq_num == irq)
501 return false;
502 }
503 }
504
505 return true;
506}
507
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800508int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
509{
510 switch (attr->attr) {
511 case KVM_ARM_VCPU_PMU_V3_IRQ: {
512 int __user *uaddr = (int __user *)(long)attr->addr;
513 int irq;
514
515 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
516 return -ENODEV;
517
518 if (get_user(irq, uaddr))
519 return -EFAULT;
520
Andre Przywara2defaff2016-03-07 17:32:29 +0700521 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
522 if (!(irq_is_ppi(irq) || vgic_valid_spi(vcpu->kvm, irq)))
523 return -EINVAL;
524
525 if (!pmu_irq_is_valid(vcpu->kvm, irq))
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800526 return -EINVAL;
527
528 if (kvm_arm_pmu_irq_initialized(vcpu))
529 return -EBUSY;
530
531 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
532 vcpu->arch.pmu.irq_num = irq;
533 return 0;
534 }
535 case KVM_ARM_VCPU_PMU_V3_INIT:
536 return kvm_arm_pmu_v3_init(vcpu);
537 }
538
539 return -ENXIO;
540}
541
542int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
543{
544 switch (attr->attr) {
545 case KVM_ARM_VCPU_PMU_V3_IRQ: {
546 int __user *uaddr = (int __user *)(long)attr->addr;
547 int irq;
548
549 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
550 return -ENODEV;
551
552 if (!kvm_arm_pmu_irq_initialized(vcpu))
553 return -ENXIO;
554
555 irq = vcpu->arch.pmu.irq_num;
556 return put_user(irq, uaddr);
557 }
558 }
559
560 return -ENXIO;
561}
562
563int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
564{
565 switch (attr->attr) {
566 case KVM_ARM_VCPU_PMU_V3_IRQ:
567 case KVM_ARM_VCPU_PMU_V3_INIT:
568 if (kvm_arm_support_pmu_v3() &&
569 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
570 return 0;
571 }
572
573 return -ENXIO;
574}