blob: 82ff5de8b1e7a5564df01dd323c0662ab6457b3b [file] [log] [blame]
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001/*
2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/kvm_host.h>
12#include <linux/err.h>
Suresh Warrier366274f2016-08-19 15:35:55 +100013#include <linux/kernel_stat.h>
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +000014
15#include <asm/kvm_book3s.h>
16#include <asm/kvm_ppc.h>
17#include <asm/hvcall.h>
18#include <asm/xics.h>
19#include <asm/debug.h>
20#include <asm/synch.h>
Suresh Warrier0c2a6602015-12-17 14:59:09 -060021#include <asm/cputhreads.h>
Suresh Warrier366274f2016-08-19 15:35:55 +100022#include <asm/pgtable.h>
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +000023#include <asm/ppc-opcode.h>
Suresh Warriere3c13e52016-08-19 15:35:51 +100024#include <asm/pnv-pci.h>
Paul Mackerras5d375192016-08-19 15:35:56 +100025#include <asm/opal.h>
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +000026
27#include "book3s_xics.h"
28
29#define DEBUG_PASSUP
30
Suresh E. Warrier520fe9c2015-12-21 16:33:57 -060031int h_ipi_redirect = 1;
32EXPORT_SYMBOL(h_ipi_redirect);
Suresh Warrier644abbb2016-08-19 15:35:54 +100033int kvm_irq_bypass = 1;
34EXPORT_SYMBOL(kvm_irq_bypass);
Suresh E. Warrier520fe9c2015-12-21 16:33:57 -060035
Suresh Warrierb0221552015-03-20 20:39:47 +110036static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
37 u32 new_irq);
Paul Mackerras5d375192016-08-19 15:35:56 +100038static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
Suresh Warrierb0221552015-03-20 20:39:47 +110039
Suresh Warrierb0221552015-03-20 20:39:47 +110040/* -- ICS routines -- */
41static void ics_rm_check_resend(struct kvmppc_xics *xics,
42 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
43{
44 int i;
45
46 arch_spin_lock(&ics->lock);
47
48 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
49 struct ics_irq_state *state = &ics->irq_state[i];
50
51 if (!state->resend)
52 continue;
53
54 arch_spin_unlock(&ics->lock);
55 icp_rm_deliver_irq(xics, icp, state->number);
56 arch_spin_lock(&ics->lock);
57 }
58
59 arch_spin_unlock(&ics->lock);
60}
61
62/* -- ICP routines -- */
63
Suresh E. Warriere17769e2015-12-21 16:22:51 -060064#ifdef CONFIG_SMP
65static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
66{
67 int hcpu;
68
69 hcpu = hcore << threads_shift;
70 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
71 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
72 icp_native_cause_ipi_rm(hcpu);
73}
74#else
75static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
76#endif
77
78/*
79 * We start the search from our current CPU Id in the core map
80 * and go in a circle until we get back to our ID looking for a
81 * core that is running in host context and that hasn't already
82 * been targeted for another rm_host_ops.
83 *
84 * In the future, could consider using a fairer algorithm (one
85 * that distributes the IPIs better)
86 *
87 * Returns -1, if no CPU could be found in the host
88 * Else, returns a CPU Id which has been reserved for use
89 */
90static inline int grab_next_hostcore(int start,
91 struct kvmppc_host_rm_core *rm_core, int max, int action)
92{
93 bool success;
94 int core;
95 union kvmppc_rm_state old, new;
96
97 for (core = start + 1; core < max; core++) {
98 old = new = READ_ONCE(rm_core[core].rm_state);
99
100 if (!old.in_host || old.rm_action)
101 continue;
102
103 /* Try to grab this host core if not taken already. */
104 new.rm_action = action;
105
106 success = cmpxchg64(&rm_core[core].rm_state.raw,
107 old.raw, new.raw) == old.raw;
108 if (success) {
109 /*
110 * Make sure that the store to the rm_action is made
111 * visible before we return to caller (and the
112 * subsequent store to rm_data) to synchronize with
113 * the IPI handler.
114 */
115 smp_wmb();
116 return core;
117 }
118 }
119
120 return -1;
121}
122
123static inline int find_available_hostcore(int action)
124{
125 int core;
126 int my_core = smp_processor_id() >> threads_shift;
127 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
128
129 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
130 if (core == -1)
131 core = grab_next_hostcore(core, rm_core, my_core, action);
132
133 return core;
134}
135
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000136static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
137 struct kvm_vcpu *this_vcpu)
138{
139 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000140 int cpu;
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600141 int hcore;
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000142
143 /* Mark the target VCPU as having an interrupt pending */
144 vcpu->stat.queue_intr++;
145 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
146
147 /* Kick self ? Just set MER and return */
148 if (vcpu == this_vcpu) {
149 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
150 return;
151 }
152
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600153 /*
154 * Check if the core is loaded,
155 * if not, find an available host core to post to wake the VCPU,
156 * if we can't find one, set up state to eventually return too hard.
157 */
Paul Mackerrasec257162015-06-24 21:18:03 +1000158 cpu = vcpu->arch.thread_cpu;
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000159 if (cpu < 0 || cpu >= nr_cpu_ids) {
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600160 hcore = -1;
Suresh E. Warrier520fe9c2015-12-21 16:33:57 -0600161 if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600162 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
163 if (hcore != -1) {
164 icp_send_hcore_msg(hcore, vcpu);
165 } else {
166 this_icp->rm_action |= XICS_RM_KICK_VCPU;
167 this_icp->rm_kick_target = vcpu;
168 }
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000169 return;
170 }
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000171
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100172 smp_mb();
173 kvmhv_rm_send_ipi(cpu);
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000174}
175
176static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
177{
178 /* Note: Only called on self ! */
179 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
180 &vcpu->arch.pending_exceptions);
181 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
182}
183
184static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
185 union kvmppc_icp_state old,
186 union kvmppc_icp_state new)
187{
188 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
189 bool success;
190
191 /* Calculate new output value */
192 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
193
194 /* Attempt atomic update */
195 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
196 if (!success)
197 goto bail;
198
199 /*
200 * Check for output state update
201 *
202 * Note that this is racy since another processor could be updating
203 * the state already. This is why we never clear the interrupt output
204 * here, we only ever set it. The clear only happens prior to doing
205 * an update and only by the processor itself. Currently we do it
206 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
207 *
208 * We also do not try to figure out whether the EE state has changed,
209 * we unconditionally set it if the new state calls for it. The reason
210 * for that is that we opportunistically remove the pending interrupt
211 * flag when raising CPPR, so we need to set it back here if an
212 * interrupt is still pending.
213 */
214 if (new.out_ee)
215 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
216
217 /* Expose the state change for debug purposes */
218 this_vcpu->arch.icp->rm_dbgstate = new;
219 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
220
221 bail:
222 return success;
223}
224
225static inline int check_too_hard(struct kvmppc_xics *xics,
226 struct kvmppc_icp *icp)
227{
228 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
229}
230
Suresh Warrierb0221552015-03-20 20:39:47 +1100231static void icp_rm_check_resend(struct kvmppc_xics *xics,
232 struct kvmppc_icp *icp)
233{
234 u32 icsid;
235
236 /* Order this load with the test for need_resend in the caller */
237 smp_rmb();
238 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
239 struct kvmppc_ics *ics = xics->ics[icsid];
240
241 if (!test_and_clear_bit(icsid, icp->resend_map))
242 continue;
243 if (!ics)
244 continue;
245 ics_rm_check_resend(xics, ics, icp);
246 }
247}
248
249static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
250 u32 *reject)
251{
252 union kvmppc_icp_state old_state, new_state;
253 bool success;
254
255 do {
256 old_state = new_state = READ_ONCE(icp->state);
257
258 *reject = 0;
259
260 /* See if we can deliver */
261 success = new_state.cppr > priority &&
262 new_state.mfrr > priority &&
263 new_state.pending_pri > priority;
264
265 /*
266 * If we can, check for a rejection and perform the
267 * delivery
268 */
269 if (success) {
270 *reject = new_state.xisr;
271 new_state.xisr = irq;
272 new_state.pending_pri = priority;
273 } else {
274 /*
275 * If we failed to deliver we set need_resend
276 * so a subsequent CPPR state change causes us
277 * to try a new delivery.
278 */
279 new_state.need_resend = true;
280 }
281
282 } while (!icp_rm_try_update(icp, old_state, new_state));
283
284 return success;
285}
286
287static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
288 u32 new_irq)
289{
290 struct ics_irq_state *state;
291 struct kvmppc_ics *ics;
292 u32 reject;
293 u16 src;
294
295 /*
296 * This is used both for initial delivery of an interrupt and
297 * for subsequent rejection.
298 *
299 * Rejection can be racy vs. resends. We have evaluated the
300 * rejection in an atomic ICP transaction which is now complete,
301 * so potentially the ICP can already accept the interrupt again.
302 *
303 * So we need to retry the delivery. Essentially the reject path
304 * boils down to a failed delivery. Always.
305 *
306 * Now the interrupt could also have moved to a different target,
307 * thus we may need to re-do the ICP lookup as well
308 */
309
310 again:
311 /* Get the ICS state and lock it */
312 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
313 if (!ics) {
314 /* Unsafe increment, but this does not need to be accurate */
Suresh Warrier6e0365b2015-03-20 20:39:48 +1100315 xics->err_noics++;
Suresh Warrierb0221552015-03-20 20:39:47 +1100316 return;
317 }
318 state = &ics->irq_state[src];
319
320 /* Get a lock on the ICS */
321 arch_spin_lock(&ics->lock);
322
323 /* Get our server */
324 if (!icp || state->server != icp->server_num) {
325 icp = kvmppc_xics_find_server(xics->kvm, state->server);
326 if (!icp) {
327 /* Unsafe increment again*/
Suresh Warrier6e0365b2015-03-20 20:39:48 +1100328 xics->err_noicp++;
Suresh Warrierb0221552015-03-20 20:39:47 +1100329 goto out;
330 }
331 }
332
333 /* Clear the resend bit of that interrupt */
334 state->resend = 0;
335
336 /*
337 * If masked, bail out
338 *
339 * Note: PAPR doesn't mention anything about masked pending
340 * when doing a resend, only when doing a delivery.
341 *
342 * However that would have the effect of losing a masked
343 * interrupt that was rejected and isn't consistent with
344 * the whole masked_pending business which is about not
345 * losing interrupts that occur while masked.
346 *
347 * I don't differentiate normal deliveries and resends, this
348 * implementation will differ from PAPR and not lose such
349 * interrupts.
350 */
351 if (state->priority == MASKED) {
352 state->masked_pending = 1;
353 goto out;
354 }
355
356 /*
357 * Try the delivery, this will set the need_resend flag
358 * in the ICP as part of the atomic transaction if the
359 * delivery is not possible.
360 *
361 * Note that if successful, the new delivery might have itself
362 * rejected an interrupt that was "delivered" before we took the
363 * ics spin lock.
364 *
365 * In this case we do the whole sequence all over again for the
366 * new guy. We cannot assume that the rejected interrupt is less
367 * favored than the new one, and thus doesn't need to be delivered,
368 * because by the time we exit icp_rm_try_to_deliver() the target
369 * processor may well have already consumed & completed it, and thus
370 * the rejected interrupt might actually be already acceptable.
371 */
372 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
373 /*
374 * Delivery was successful, did we reject somebody else ?
375 */
376 if (reject && reject != XICS_IPI) {
377 arch_spin_unlock(&ics->lock);
378 new_irq = reject;
379 goto again;
380 }
381 } else {
382 /*
383 * We failed to deliver the interrupt we need to set the
384 * resend map bit and mark the ICS state as needing a resend
385 */
386 set_bit(ics->icsid, icp->resend_map);
387 state->resend = 1;
388
389 /*
390 * If the need_resend flag got cleared in the ICP some time
391 * between icp_rm_try_to_deliver() atomic update and now, then
392 * we know it might have missed the resend_map bit. So we
393 * retry
394 */
395 smp_mb();
396 if (!icp->state.need_resend) {
397 arch_spin_unlock(&ics->lock);
398 goto again;
399 }
400 }
401 out:
402 arch_spin_unlock(&ics->lock);
403}
404
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000405static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
406 u8 new_cppr)
407{
408 union kvmppc_icp_state old_state, new_state;
409 bool resend;
410
411 /*
412 * This handles several related states in one operation:
413 *
414 * ICP State: Down_CPPR
415 *
416 * Load CPPR with new value and if the XISR is 0
417 * then check for resends:
418 *
419 * ICP State: Resend
420 *
421 * If MFRR is more favored than CPPR, check for IPIs
422 * and notify ICS of a potential resend. This is done
423 * asynchronously (when used in real mode, we will have
424 * to exit here).
425 *
426 * We do not handle the complete Check_IPI as documented
427 * here. In the PAPR, this state will be used for both
428 * Set_MFRR and Down_CPPR. However, we know that we aren't
429 * changing the MFRR state here so we don't need to handle
430 * the case of an MFRR causing a reject of a pending irq,
431 * this will have been handled when the MFRR was set in the
432 * first place.
433 *
434 * Thus we don't have to handle rejects, only resends.
435 *
436 * When implementing real mode for HV KVM, resend will lead to
437 * a H_TOO_HARD return and the whole transaction will be handled
438 * in virtual mode.
439 */
440 do {
Christian Borntraeger5ee07612015-01-06 22:41:46 +0100441 old_state = new_state = READ_ONCE(icp->state);
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000442
443 /* Down_CPPR */
444 new_state.cppr = new_cppr;
445
446 /*
447 * Cut down Resend / Check_IPI / IPI
448 *
449 * The logic is that we cannot have a pending interrupt
450 * trumped by an IPI at this point (see above), so we
451 * know that either the pending interrupt is already an
452 * IPI (in which case we don't care to override it) or
453 * it's either more favored than us or non existent
454 */
455 if (new_state.mfrr < new_cppr &&
456 new_state.mfrr <= new_state.pending_pri) {
457 new_state.pending_pri = new_state.mfrr;
458 new_state.xisr = XICS_IPI;
459 }
460
461 /* Latch/clear resend bit */
462 resend = new_state.need_resend;
463 new_state.need_resend = 0;
464
465 } while (!icp_rm_try_update(icp, old_state, new_state));
466
467 /*
468 * Now handle resend checks. Those are asynchronous to the ICP
469 * state update in HW (ie bus transactions) so we can handle them
470 * separately here as well.
471 */
Suresh E. Warrier5b88cda2014-11-03 15:51:59 +1100472 if (resend) {
Suresh Warrier6e0365b2015-03-20 20:39:48 +1100473 icp->n_check_resend++;
Suresh Warrierb0221552015-03-20 20:39:47 +1100474 icp_rm_check_resend(xics, icp);
Suresh E. Warrier5b88cda2014-11-03 15:51:59 +1100475 }
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000476}
477
478
479unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
480{
481 union kvmppc_icp_state old_state, new_state;
482 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
483 struct kvmppc_icp *icp = vcpu->arch.icp;
484 u32 xirr;
485
486 if (!xics || !xics->real_mode)
487 return H_TOO_HARD;
488
489 /* First clear the interrupt */
490 icp_rm_clr_vcpu_irq(icp->vcpu);
491
492 /*
493 * ICP State: Accept_Interrupt
494 *
495 * Return the pending interrupt (if any) along with the
496 * current CPPR, then clear the XISR & set CPPR to the
497 * pending priority
498 */
499 do {
Christian Borntraeger5ee07612015-01-06 22:41:46 +0100500 old_state = new_state = READ_ONCE(icp->state);
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000501
502 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
503 if (!old_state.xisr)
504 break;
505 new_state.cppr = new_state.pending_pri;
506 new_state.pending_pri = 0xff;
507 new_state.xisr = 0;
508
509 } while (!icp_rm_try_update(icp, old_state, new_state));
510
511 /* Return the result in GPR4 */
512 vcpu->arch.gpr[4] = xirr;
513
514 return check_too_hard(xics, icp);
515}
516
517int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
518 unsigned long mfrr)
519{
520 union kvmppc_icp_state old_state, new_state;
521 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
522 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
523 u32 reject;
524 bool resend;
525 bool local;
526
527 if (!xics || !xics->real_mode)
528 return H_TOO_HARD;
529
530 local = this_icp->server_num == server;
531 if (local)
532 icp = this_icp;
533 else
534 icp = kvmppc_xics_find_server(vcpu->kvm, server);
535 if (!icp)
536 return H_PARAMETER;
537
538 /*
539 * ICP state: Set_MFRR
540 *
541 * If the CPPR is more favored than the new MFRR, then
542 * nothing needs to be done as there can be no XISR to
543 * reject.
544 *
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000545 * ICP state: Check_IPI
Suresh E. Warrier5b88cda2014-11-03 15:51:59 +1100546 *
547 * If the CPPR is less favored, then we might be replacing
548 * an interrupt, and thus need to possibly reject it.
549 *
550 * ICP State: IPI
551 *
552 * Besides rejecting any pending interrupts, we also
553 * update XISR and pending_pri to mark IPI as pending.
554 *
555 * PAPR does not describe this state, but if the MFRR is being
556 * made less favored than its earlier value, there might be
557 * a previously-rejected interrupt needing to be resent.
558 * Ideally, we would want to resend only if
559 * prio(pending_interrupt) < mfrr &&
560 * prio(pending_interrupt) < cppr
561 * where pending interrupt is the one that was rejected. But
562 * we don't have that state, so we simply trigger a resend
563 * whenever the MFRR is made less favored.
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000564 */
565 do {
Christian Borntraeger5ee07612015-01-06 22:41:46 +0100566 old_state = new_state = READ_ONCE(icp->state);
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000567
568 /* Set_MFRR */
569 new_state.mfrr = mfrr;
570
571 /* Check_IPI */
572 reject = 0;
573 resend = false;
574 if (mfrr < new_state.cppr) {
575 /* Reject a pending interrupt if not an IPI */
Suresh E. Warrier5b88cda2014-11-03 15:51:59 +1100576 if (mfrr <= new_state.pending_pri) {
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000577 reject = new_state.xisr;
Suresh E. Warrier5b88cda2014-11-03 15:51:59 +1100578 new_state.pending_pri = mfrr;
579 new_state.xisr = XICS_IPI;
580 }
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000581 }
582
Suresh E. Warrier5b88cda2014-11-03 15:51:59 +1100583 if (mfrr > old_state.mfrr) {
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000584 resend = new_state.need_resend;
585 new_state.need_resend = 0;
586 }
587 } while (!icp_rm_try_update(icp, old_state, new_state));
588
Suresh Warrierb0221552015-03-20 20:39:47 +1100589 /* Handle reject in real mode */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000590 if (reject && reject != XICS_IPI) {
Suresh Warrier6e0365b2015-03-20 20:39:48 +1100591 this_icp->n_reject++;
Suresh Warrierb0221552015-03-20 20:39:47 +1100592 icp_rm_deliver_irq(xics, icp, reject);
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000593 }
594
Suresh Warrierb0221552015-03-20 20:39:47 +1100595 /* Handle resends in real mode */
Suresh E. Warrier5b88cda2014-11-03 15:51:59 +1100596 if (resend) {
Suresh Warrier6e0365b2015-03-20 20:39:48 +1100597 this_icp->n_check_resend++;
Suresh Warrierb0221552015-03-20 20:39:47 +1100598 icp_rm_check_resend(xics, icp);
Suresh E. Warrier5b88cda2014-11-03 15:51:59 +1100599 }
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000600
601 return check_too_hard(xics, this_icp);
602}
603
604int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
605{
606 union kvmppc_icp_state old_state, new_state;
607 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
608 struct kvmppc_icp *icp = vcpu->arch.icp;
609 u32 reject;
610
611 if (!xics || !xics->real_mode)
612 return H_TOO_HARD;
613
614 /*
615 * ICP State: Set_CPPR
616 *
617 * We can safely compare the new value with the current
618 * value outside of the transaction as the CPPR is only
619 * ever changed by the processor on itself
620 */
621 if (cppr > icp->state.cppr) {
622 icp_rm_down_cppr(xics, icp, cppr);
623 goto bail;
624 } else if (cppr == icp->state.cppr)
625 return H_SUCCESS;
626
627 /*
628 * ICP State: Up_CPPR
629 *
630 * The processor is raising its priority, this can result
631 * in a rejection of a pending interrupt:
632 *
633 * ICP State: Reject_Current
634 *
635 * We can remove EE from the current processor, the update
636 * transaction will set it again if needed
637 */
638 icp_rm_clr_vcpu_irq(icp->vcpu);
639
640 do {
Christian Borntraeger5ee07612015-01-06 22:41:46 +0100641 old_state = new_state = READ_ONCE(icp->state);
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000642
643 reject = 0;
644 new_state.cppr = cppr;
645
646 if (cppr <= new_state.pending_pri) {
647 reject = new_state.xisr;
648 new_state.xisr = 0;
649 new_state.pending_pri = 0xff;
650 }
651
652 } while (!icp_rm_try_update(icp, old_state, new_state));
653
Suresh Warrierb0221552015-03-20 20:39:47 +1100654 /*
655 * Check for rejects. They are handled by doing a new delivery
656 * attempt (see comments in icp_rm_deliver_irq).
657 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000658 if (reject && reject != XICS_IPI) {
Suresh Warrier6e0365b2015-03-20 20:39:48 +1100659 icp->n_reject++;
Suresh Warrierb0221552015-03-20 20:39:47 +1100660 icp_rm_deliver_irq(xics, icp, reject);
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000661 }
662 bail:
663 return check_too_hard(xics, icp);
664}
665
666int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
667{
668 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
669 struct kvmppc_icp *icp = vcpu->arch.icp;
670 struct kvmppc_ics *ics;
671 struct ics_irq_state *state;
672 u32 irq = xirr & 0x00ffffff;
673 u16 src;
674
675 if (!xics || !xics->real_mode)
676 return H_TOO_HARD;
677
678 /*
679 * ICP State: EOI
680 *
681 * Note: If EOI is incorrectly used by SW to lower the CPPR
682 * value (ie more favored), we do not check for rejection of
683 * a pending interrupt, this is a SW error and PAPR sepcifies
684 * that we don't have to deal with it.
685 *
686 * The sending of an EOI to the ICS is handled after the
687 * CPPR update
688 *
689 * ICP State: Down_CPPR which we handle
690 * in a separate function as it's shared with H_CPPR.
691 */
692 icp_rm_down_cppr(xics, icp, xirr >> 24);
693
694 /* IPIs have no EOI */
695 if (irq == XICS_IPI)
696 goto bail;
697 /*
698 * EOI handling: If the interrupt is still asserted, we need to
699 * resend it. We can take a lockless "peek" at the ICS state here.
700 *
701 * "Message" interrupts will never have "asserted" set
702 */
703 ics = kvmppc_xics_find_ics(xics, irq, &src);
704 if (!ics)
705 goto bail;
706 state = &ics->irq_state[src];
707
Suresh Warrierb0221552015-03-20 20:39:47 +1100708 /* Still asserted, resend it */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000709 if (state->asserted) {
Suresh Warrier6e0365b2015-03-20 20:39:48 +1100710 icp->n_reject++;
Suresh Warrierb0221552015-03-20 20:39:47 +1100711 icp_rm_deliver_irq(xics, icp, irq);
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000712 }
Paul Mackerras25a2150b2014-06-30 20:51:14 +1000713
714 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
715 icp->rm_action |= XICS_RM_NOTIFY_EOI;
716 icp->rm_eoied_irq = irq;
717 }
Paul Mackerras5d375192016-08-19 15:35:56 +1000718
Suresh Warrier65e70262016-08-19 15:35:57 +1000719 if (state->host_irq) {
720 ++vcpu->stat.pthru_all;
721 if (state->intr_cpu != -1) {
722 int pcpu = raw_smp_processor_id();
723
724 pcpu = cpu_first_thread_sibling(pcpu);
725 ++vcpu->stat.pthru_host;
726 if (state->intr_cpu != pcpu) {
727 ++vcpu->stat.pthru_bad_aff;
728 xics_opal_rm_set_server(state->host_irq, pcpu);
729 }
730 state->intr_cpu = -1;
731 }
Paul Mackerras5d375192016-08-19 15:35:56 +1000732 }
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000733 bail:
734 return check_too_hard(xics, icp);
735}
Suresh Warrier0c2a6602015-12-17 14:59:09 -0600736
Suresh Warriere3c13e52016-08-19 15:35:51 +1000737unsigned long eoi_rc;
738
739static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
740{
741 unsigned long xics_phys;
742 int64_t rc;
743
744 rc = pnv_opal_pci_msi_eoi(c, hwirq);
745
746 if (rc)
747 eoi_rc = rc;
748
749 iosync();
750
751 /* EOI it */
752 xics_phys = local_paca->kvm_hstate.xics_phys;
753 _stwcix(xics_phys + XICS_XIRR, xirr);
754}
755
Paul Mackerras5d375192016-08-19 15:35:56 +1000756static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
757{
758 unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2;
759
760 return opal_rm_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY);
761}
762
Suresh Warrier366274f2016-08-19 15:35:55 +1000763/*
764 * Increment a per-CPU 32-bit unsigned integer variable.
765 * Safe to call in real-mode. Handles vmalloc'ed addresses
766 *
767 * ToDo: Make this work for any integral type
768 */
769
770static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
771{
772 unsigned long l;
773 unsigned int *raddr;
774 int cpu = smp_processor_id();
775
776 raddr = per_cpu_ptr(addr, cpu);
777 l = (unsigned long)raddr;
778
779 if (REGION_ID(l) == VMALLOC_REGION_ID) {
780 l = vmalloc_to_phys(raddr);
781 raddr = (unsigned int *)l;
782 }
783 ++*raddr;
784}
785
786/*
787 * We don't try to update the flags in the irq_desc 'istate' field in
788 * here as would happen in the normal IRQ handling path for several reasons:
789 * - state flags represent internal IRQ state and are not expected to be
790 * updated outside the IRQ subsystem
791 * - more importantly, these are useful for edge triggered interrupts,
792 * IRQ probing, etc., but we are only handling MSI/MSIx interrupts here
793 * and these states shouldn't apply to us.
794 *
795 * However, we do update irq_stats - we somewhat duplicate the code in
796 * kstat_incr_irqs_this_cpu() for this since this function is defined
797 * in irq/internal.h which we don't want to include here.
798 * The only difference is that desc->kstat_irqs is an allocated per CPU
799 * variable and could have been vmalloc'ed, so we can't directly
800 * call __this_cpu_inc() on it. The kstat structure is a static
801 * per CPU variable and it should be accessible by real-mode KVM.
802 *
803 */
804static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
805{
806 this_cpu_inc_rm(desc->kstat_irqs);
807 __this_cpu_inc(kstat.irqs_sum);
808}
809
Suresh Warriere3c13e52016-08-19 15:35:51 +1000810long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
811 u32 xirr,
812 struct kvmppc_irq_map *irq_map,
813 struct kvmppc_passthru_irqmap *pimap)
814{
815 struct kvmppc_xics *xics;
816 struct kvmppc_icp *icp;
817 u32 irq;
818
819 irq = irq_map->v_hwirq;
820 xics = vcpu->kvm->arch.xics;
821 icp = vcpu->arch.icp;
822
Suresh Warrier366274f2016-08-19 15:35:55 +1000823 kvmppc_rm_handle_irq_desc(irq_map->desc);
Suresh Warriere3c13e52016-08-19 15:35:51 +1000824 icp_rm_deliver_irq(xics, icp, irq);
825
826 /* EOI the interrupt */
827 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
828
829 if (check_too_hard(xics, icp) == H_TOO_HARD)
Suresh Warrierf7af5202016-08-19 15:35:52 +1000830 return 2;
Suresh Warriere3c13e52016-08-19 15:35:51 +1000831 else
832 return -2;
833}
834
Suresh Warrier0c2a6602015-12-17 14:59:09 -0600835/* --- Non-real mode XICS-related built-in routines --- */
836
837/**
838 * Host Operations poked by RM KVM
839 */
840static void rm_host_ipi_action(int action, void *data)
841{
842 switch (action) {
843 case XICS_RM_KICK_VCPU:
844 kvmppc_host_rm_ops_hv->vcpu_kick(data);
845 break;
846 default:
847 WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
848 break;
849 }
850
851}
852
853void kvmppc_xics_ipi_action(void)
854{
855 int core;
856 unsigned int cpu = smp_processor_id();
857 struct kvmppc_host_rm_core *rm_corep;
858
859 core = cpu >> threads_shift;
860 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
861
862 if (rm_corep->rm_data) {
863 rm_host_ipi_action(rm_corep->rm_state.rm_action,
864 rm_corep->rm_data);
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600865 /* Order these stores against the real mode KVM */
Suresh Warrier0c2a6602015-12-17 14:59:09 -0600866 rm_corep->rm_data = NULL;
Suresh E. Warriere17769e2015-12-21 16:22:51 -0600867 smp_wmb();
Suresh Warrier0c2a6602015-12-17 14:59:09 -0600868 rm_corep->rm_state.rm_action = 0;
869 }
870}