blob: 74e858c42ae15674bc0e56e12645e82eed7bca61 [file] [log] [blame]
Shannon Zhao051ff582015-12-08 15:29:06 +08001/*
2 * Copyright (C) 2015 Linaro Ltd.
3 * Author: Shannon Zhao <shannon.zhao@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/perf_event.h>
22#include <asm/kvm_emulate.h>
23#include <kvm/arm_pmu.h>
Shannon Zhaob02386e2016-02-26 19:29:19 +080024#include <kvm/arm_vgic.h>
Shannon Zhao051ff582015-12-08 15:29:06 +080025
26/**
27 * kvm_pmu_get_counter_value - get PMU counter value
28 * @vcpu: The vcpu pointer
29 * @select_idx: The counter index
30 */
31u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
32{
33 u64 counter, reg, enabled, running;
34 struct kvm_pmu *pmu = &vcpu->arch.pmu;
35 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
36
37 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
38 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
39 counter = vcpu_sys_reg(vcpu, reg);
40
41 /* The real counter value is equal to the value of counter register plus
42 * the value perf event counts.
43 */
44 if (pmc->perf_event)
45 counter += perf_event_read_value(pmc->perf_event, &enabled,
46 &running);
47
48 return counter & pmc->bitmask;
49}
50
51/**
52 * kvm_pmu_set_counter_value - set PMU counter value
53 * @vcpu: The vcpu pointer
54 * @select_idx: The counter index
55 * @val: The counter value
56 */
57void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
58{
59 u64 reg;
60
61 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
62 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
63 vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
64}
Shannon Zhao96b0eeb2015-09-08 12:26:13 +080065
Shannon Zhao7f766352015-07-03 14:27:25 +080066/**
67 * kvm_pmu_stop_counter - stop PMU counter
68 * @pmc: The PMU counter pointer
69 *
70 * If this counter has been configured to monitor some event, release it here.
71 */
72static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
73{
74 u64 counter, reg;
75
76 if (pmc->perf_event) {
77 counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
78 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
79 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
80 vcpu_sys_reg(vcpu, reg) = counter;
81 perf_event_disable(pmc->perf_event);
82 perf_event_release_kernel(pmc->perf_event);
83 pmc->perf_event = NULL;
84 }
85}
86
Shannon Zhao96b0eeb2015-09-08 12:26:13 +080087u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
88{
89 u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
90
91 val &= ARMV8_PMU_PMCR_N_MASK;
92 if (val == 0)
93 return BIT(ARMV8_PMU_CYCLE_IDX);
94 else
95 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
96}
97
98/**
99 * kvm_pmu_enable_counter - enable selected PMU counter
100 * @vcpu: The vcpu pointer
101 * @val: the value guest writes to PMCNTENSET register
102 *
103 * Call perf_event_enable to start counting the perf event
104 */
105void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
106{
107 int i;
108 struct kvm_pmu *pmu = &vcpu->arch.pmu;
109 struct kvm_pmc *pmc;
110
111 if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
112 return;
113
114 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
115 if (!(val & BIT(i)))
116 continue;
117
118 pmc = &pmu->pmc[i];
119 if (pmc->perf_event) {
120 perf_event_enable(pmc->perf_event);
121 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
122 kvm_debug("fail to enable perf event\n");
123 }
124 }
125}
126
127/**
128 * kvm_pmu_disable_counter - disable selected PMU counter
129 * @vcpu: The vcpu pointer
130 * @val: the value guest writes to PMCNTENCLR register
131 *
132 * Call perf_event_disable to stop counting the perf event
133 */
134void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
135{
136 int i;
137 struct kvm_pmu *pmu = &vcpu->arch.pmu;
138 struct kvm_pmc *pmc;
139
140 if (!val)
141 return;
142
143 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
144 if (!(val & BIT(i)))
145 continue;
146
147 pmc = &pmu->pmc[i];
148 if (pmc->perf_event)
149 perf_event_disable(pmc->perf_event);
150 }
151}
Shannon Zhao7f766352015-07-03 14:27:25 +0800152
Shannon Zhao76d883c2015-09-08 15:03:26 +0800153static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
154{
155 u64 reg = 0;
156
157 if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
158 reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
159 reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
160 reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
161 reg &= kvm_pmu_valid_counter_mask(vcpu);
162
163 return reg;
164}
165
166/**
167 * kvm_pmu_overflow_set - set PMU overflow interrupt
168 * @vcpu: The vcpu pointer
169 * @val: the value guest writes to PMOVSSET register
170 */
171void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
172{
173 u64 reg;
174
175 if (val == 0)
176 return;
177
178 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
179 reg = kvm_pmu_overflow_status(vcpu);
180 if (reg != 0)
181 kvm_vcpu_kick(vcpu);
182}
183
Shannon Zhaob02386e2016-02-26 19:29:19 +0800184static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
185{
186 struct kvm_pmu *pmu = &vcpu->arch.pmu;
187 bool overflow;
188
189 if (!kvm_arm_pmu_v3_ready(vcpu))
190 return;
191
192 overflow = !!kvm_pmu_overflow_status(vcpu);
193 if (pmu->irq_level != overflow) {
194 pmu->irq_level = overflow;
195 kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
196 pmu->irq_num, overflow);
197 }
198}
199
200/**
201 * kvm_pmu_flush_hwstate - flush pmu state to cpu
202 * @vcpu: The vcpu pointer
203 *
204 * Check if the PMU has overflowed while we were running in the host, and inject
205 * an interrupt if that was the case.
206 */
207void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
208{
209 kvm_pmu_update_state(vcpu);
210}
211
212/**
213 * kvm_pmu_sync_hwstate - sync pmu state from cpu
214 * @vcpu: The vcpu pointer
215 *
216 * Check if the PMU has overflowed while we were running in the guest, and
217 * inject an interrupt if that was the case.
218 */
219void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
220{
221 kvm_pmu_update_state(vcpu);
222}
223
224static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
225{
226 struct kvm_pmu *pmu;
227 struct kvm_vcpu_arch *vcpu_arch;
228
229 pmc -= pmc->idx;
230 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
231 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
232 return container_of(vcpu_arch, struct kvm_vcpu, arch);
233}
234
235/**
236 * When perf event overflows, call kvm_pmu_overflow_set to set overflow status.
237 */
238static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
239 struct perf_sample_data *data,
240 struct pt_regs *regs)
241{
242 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
243 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
244 int idx = pmc->idx;
245
246 kvm_pmu_overflow_set(vcpu, BIT(idx));
247}
248
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800249/**
250 * kvm_pmu_software_increment - do software increment
251 * @vcpu: The vcpu pointer
252 * @val: the value guest writes to PMSWINC register
253 */
254void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
255{
256 int i;
257 u64 type, enable, reg;
258
259 if (val == 0)
260 return;
261
262 enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
263 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
264 if (!(val & BIT(i)))
265 continue;
266 type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
267 & ARMV8_PMU_EVTYPE_EVENT;
268 if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
269 && (enable & BIT(i))) {
270 reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
271 reg = lower_32_bits(reg);
272 vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
273 if (!reg)
274 kvm_pmu_overflow_set(vcpu, BIT(i));
275 }
276 }
277}
278
Shannon Zhao76993732015-10-28 12:10:30 +0800279/**
280 * kvm_pmu_handle_pmcr - handle PMCR register
281 * @vcpu: The vcpu pointer
282 * @val: the value guest writes to PMCR register
283 */
284void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
285{
286 struct kvm_pmu *pmu = &vcpu->arch.pmu;
287 struct kvm_pmc *pmc;
288 u64 mask;
289 int i;
290
291 mask = kvm_pmu_valid_counter_mask(vcpu);
292 if (val & ARMV8_PMU_PMCR_E) {
293 kvm_pmu_enable_counter(vcpu,
294 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
295 } else {
296 kvm_pmu_disable_counter(vcpu, mask);
297 }
298
299 if (val & ARMV8_PMU_PMCR_C)
300 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
301
302 if (val & ARMV8_PMU_PMCR_P) {
303 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
304 kvm_pmu_set_counter_value(vcpu, i, 0);
305 }
306
307 if (val & ARMV8_PMU_PMCR_LC) {
308 pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
309 pmc->bitmask = 0xffffffffffffffffUL;
310 }
311}
312
Shannon Zhao7f766352015-07-03 14:27:25 +0800313static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
314{
315 return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
316 (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
317}
318
319/**
320 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
321 * @vcpu: The vcpu pointer
322 * @data: The data guest writes to PMXEVTYPER_EL0
323 * @select_idx: The number of selected counter
324 *
325 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
326 * event with given hardware event number. Here we call perf_event API to
327 * emulate this action and create a kernel perf event for it.
328 */
329void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
330 u64 select_idx)
331{
332 struct kvm_pmu *pmu = &vcpu->arch.pmu;
333 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
334 struct perf_event *event;
335 struct perf_event_attr attr;
336 u64 eventsel, counter;
337
338 kvm_pmu_stop_counter(vcpu, pmc);
339 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
340
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800341 /* Software increment event does't need to be backed by a perf event */
342 if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
343 return;
344
Shannon Zhao7f766352015-07-03 14:27:25 +0800345 memset(&attr, 0, sizeof(struct perf_event_attr));
346 attr.type = PERF_TYPE_RAW;
347 attr.size = sizeof(attr);
348 attr.pinned = 1;
349 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
350 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
351 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
352 attr.exclude_hv = 1; /* Don't count EL2 events */
353 attr.exclude_host = 1; /* Don't count host events */
354 attr.config = eventsel;
355
356 counter = kvm_pmu_get_counter_value(vcpu, select_idx);
357 /* The initial sample period (overflow count) of an event. */
358 attr.sample_period = (-counter) & pmc->bitmask;
359
Shannon Zhaob02386e2016-02-26 19:29:19 +0800360 event = perf_event_create_kernel_counter(&attr, -1, current,
361 kvm_pmu_perf_overflow, pmc);
Shannon Zhao7f766352015-07-03 14:27:25 +0800362 if (IS_ERR(event)) {
363 pr_err_once("kvm: pmu event creation failed %ld\n",
364 PTR_ERR(event));
365 return;
366 }
367
368 pmc->perf_event = event;
369}