blob: 7fd247cbd0d11fe2618ee6171af3910c34ccb77c [file] [log] [blame]
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +00001/*
2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/kvm_host.h>
12#include <linux/err.h>
13#include <linux/gfp.h>
14
15#include <asm/uaccess.h>
16#include <asm/kvm_book3s.h>
17#include <asm/kvm_ppc.h>
18#include <asm/hvcall.h>
19#include <asm/xics.h>
20#include <asm/debug.h>
21
22#include <linux/debugfs.h>
23#include <linux/seq_file.h>
24
25#include "book3s_xics.h"
26
27#if 1
28#define XICS_DBG(fmt...) do { } while (0)
29#else
30#define XICS_DBG(fmt...) trace_printk(fmt)
31#endif
32
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +000033#define ENABLE_REALMODE true
34#define DEBUG_REALMODE false
35
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +000036/*
37 * LOCKING
38 * =======
39 *
40 * Each ICS has a mutex protecting the information about the IRQ
41 * sources and avoiding simultaneous deliveries if the same interrupt.
42 *
43 * ICP operations are done via a single compare & swap transaction
44 * (most ICP state fits in the union kvmppc_icp_state)
45 */
46
47/*
48 * TODO
49 * ====
50 *
51 * - To speed up resends, keep a bitmap of "resend" set bits in the
52 * ICS
53 *
54 * - Speed up server# -> ICP lookup (array ? hash table ?)
55 *
56 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
57 * locks array to improve scalability
58 *
59 * - ioctl's to save/restore the entire state for snapshot & migration
60 */
61
62/* -- ICS routines -- */
63
64static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
65 u32 new_irq);
66
67static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
68{
69 struct ics_irq_state *state;
70 struct kvmppc_ics *ics;
71 u16 src;
72
73 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
74
75 ics = kvmppc_xics_find_ics(xics, irq, &src);
76 if (!ics) {
77 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
78 return -EINVAL;
79 }
80 state = &ics->irq_state[src];
81 if (!state->exists)
82 return -EINVAL;
83
84 /*
85 * We set state->asserted locklessly. This should be fine as
86 * we are the only setter, thus concurrent access is undefined
87 * to begin with.
88 */
89 if (level == KVM_INTERRUPT_SET_LEVEL)
90 state->asserted = 1;
91 else if (level == KVM_INTERRUPT_UNSET) {
92 state->asserted = 0;
93 return 0;
94 }
95
96 /* Attempt delivery */
97 icp_deliver_irq(xics, NULL, irq);
98
99 return 0;
100}
101
102static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
103 struct kvmppc_icp *icp)
104{
105 int i;
106
107 mutex_lock(&ics->lock);
108
109 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
110 struct ics_irq_state *state = &ics->irq_state[i];
111
112 if (!state->resend)
113 continue;
114
115 XICS_DBG("resend %#x prio %#x\n", state->number,
116 state->priority);
117
118 mutex_unlock(&ics->lock);
119 icp_deliver_irq(xics, icp, state->number);
120 mutex_lock(&ics->lock);
121 }
122
123 mutex_unlock(&ics->lock);
124}
125
126int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
127{
128 struct kvmppc_xics *xics = kvm->arch.xics;
129 struct kvmppc_icp *icp;
130 struct kvmppc_ics *ics;
131 struct ics_irq_state *state;
132 u16 src;
133 bool deliver;
134
135 if (!xics)
136 return -ENODEV;
137
138 ics = kvmppc_xics_find_ics(xics, irq, &src);
139 if (!ics)
140 return -EINVAL;
141 state = &ics->irq_state[src];
142
143 icp = kvmppc_xics_find_server(kvm, server);
144 if (!icp)
145 return -EINVAL;
146
147 mutex_lock(&ics->lock);
148
149 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
150 irq, server, priority,
151 state->masked_pending, state->resend);
152
153 state->server = server;
154 state->priority = priority;
155 deliver = false;
156 if ((state->masked_pending || state->resend) && priority != MASKED) {
157 state->masked_pending = 0;
158 deliver = true;
159 }
160
161 mutex_unlock(&ics->lock);
162
163 if (deliver)
164 icp_deliver_irq(xics, icp, irq);
165
166 return 0;
167}
168
169int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
170{
171 struct kvmppc_xics *xics = kvm->arch.xics;
172 struct kvmppc_ics *ics;
173 struct ics_irq_state *state;
174 u16 src;
175
176 if (!xics)
177 return -ENODEV;
178
179 ics = kvmppc_xics_find_ics(xics, irq, &src);
180 if (!ics)
181 return -EINVAL;
182 state = &ics->irq_state[src];
183
184 mutex_lock(&ics->lock);
185 *server = state->server;
186 *priority = state->priority;
187 mutex_unlock(&ics->lock);
188
189 return 0;
190}
191
192/* -- ICP routines, including hcalls -- */
193
194static inline bool icp_try_update(struct kvmppc_icp *icp,
195 union kvmppc_icp_state old,
196 union kvmppc_icp_state new,
197 bool change_self)
198{
199 bool success;
200
201 /* Calculate new output value */
202 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
203
204 /* Attempt atomic update */
205 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
206 if (!success)
207 goto bail;
208
209 XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
210 icp->server_num,
211 old.cppr, old.mfrr, old.pending_pri, old.xisr,
212 old.need_resend, old.out_ee);
213 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
214 new.cppr, new.mfrr, new.pending_pri, new.xisr,
215 new.need_resend, new.out_ee);
216 /*
217 * Check for output state update
218 *
219 * Note that this is racy since another processor could be updating
220 * the state already. This is why we never clear the interrupt output
221 * here, we only ever set it. The clear only happens prior to doing
222 * an update and only by the processor itself. Currently we do it
223 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
224 *
225 * We also do not try to figure out whether the EE state has changed,
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000226 * we unconditionally set it if the new state calls for it. The reason
227 * for that is that we opportunistically remove the pending interrupt
228 * flag when raising CPPR, so we need to set it back here if an
229 * interrupt is still pending.
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000230 */
231 if (new.out_ee) {
232 kvmppc_book3s_queue_irqprio(icp->vcpu,
233 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
234 if (!change_self)
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000235 kvmppc_fast_vcpu_kick(icp->vcpu);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000236 }
237 bail:
238 return success;
239}
240
241static void icp_check_resend(struct kvmppc_xics *xics,
242 struct kvmppc_icp *icp)
243{
244 u32 icsid;
245
246 /* Order this load with the test for need_resend in the caller */
247 smp_rmb();
248 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
249 struct kvmppc_ics *ics = xics->ics[icsid];
250
251 if (!test_and_clear_bit(icsid, icp->resend_map))
252 continue;
253 if (!ics)
254 continue;
255 ics_check_resend(xics, ics, icp);
256 }
257}
258
259static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
260 u32 *reject)
261{
262 union kvmppc_icp_state old_state, new_state;
263 bool success;
264
265 XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
266 icp->server_num);
267
268 do {
269 old_state = new_state = ACCESS_ONCE(icp->state);
270
271 *reject = 0;
272
273 /* See if we can deliver */
274 success = new_state.cppr > priority &&
275 new_state.mfrr > priority &&
276 new_state.pending_pri > priority;
277
278 /*
279 * If we can, check for a rejection and perform the
280 * delivery
281 */
282 if (success) {
283 *reject = new_state.xisr;
284 new_state.xisr = irq;
285 new_state.pending_pri = priority;
286 } else {
287 /*
288 * If we failed to deliver we set need_resend
289 * so a subsequent CPPR state change causes us
290 * to try a new delivery.
291 */
292 new_state.need_resend = true;
293 }
294
295 } while (!icp_try_update(icp, old_state, new_state, false));
296
297 return success;
298}
299
300static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
301 u32 new_irq)
302{
303 struct ics_irq_state *state;
304 struct kvmppc_ics *ics;
305 u32 reject;
306 u16 src;
307
308 /*
309 * This is used both for initial delivery of an interrupt and
310 * for subsequent rejection.
311 *
312 * Rejection can be racy vs. resends. We have evaluated the
313 * rejection in an atomic ICP transaction which is now complete,
314 * so potentially the ICP can already accept the interrupt again.
315 *
316 * So we need to retry the delivery. Essentially the reject path
317 * boils down to a failed delivery. Always.
318 *
319 * Now the interrupt could also have moved to a different target,
320 * thus we may need to re-do the ICP lookup as well
321 */
322
323 again:
324 /* Get the ICS state and lock it */
325 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
326 if (!ics) {
327 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
328 return;
329 }
330 state = &ics->irq_state[src];
331
332 /* Get a lock on the ICS */
333 mutex_lock(&ics->lock);
334
335 /* Get our server */
336 if (!icp || state->server != icp->server_num) {
337 icp = kvmppc_xics_find_server(xics->kvm, state->server);
338 if (!icp) {
339 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
340 new_irq, state->server);
341 goto out;
342 }
343 }
344
345 /* Clear the resend bit of that interrupt */
346 state->resend = 0;
347
348 /*
349 * If masked, bail out
350 *
351 * Note: PAPR doesn't mention anything about masked pending
352 * when doing a resend, only when doing a delivery.
353 *
354 * However that would have the effect of losing a masked
355 * interrupt that was rejected and isn't consistent with
356 * the whole masked_pending business which is about not
357 * losing interrupts that occur while masked.
358 *
359 * I don't differenciate normal deliveries and resends, this
360 * implementation will differ from PAPR and not lose such
361 * interrupts.
362 */
363 if (state->priority == MASKED) {
364 XICS_DBG("irq %#x masked pending\n", new_irq);
365 state->masked_pending = 1;
366 goto out;
367 }
368
369 /*
370 * Try the delivery, this will set the need_resend flag
371 * in the ICP as part of the atomic transaction if the
372 * delivery is not possible.
373 *
374 * Note that if successful, the new delivery might have itself
375 * rejected an interrupt that was "delivered" before we took the
376 * icp mutex.
377 *
378 * In this case we do the whole sequence all over again for the
379 * new guy. We cannot assume that the rejected interrupt is less
380 * favored than the new one, and thus doesn't need to be delivered,
381 * because by the time we exit icp_try_to_deliver() the target
382 * processor may well have alrady consumed & completed it, and thus
383 * the rejected interrupt might actually be already acceptable.
384 */
385 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
386 /*
387 * Delivery was successful, did we reject somebody else ?
388 */
389 if (reject && reject != XICS_IPI) {
390 mutex_unlock(&ics->lock);
391 new_irq = reject;
392 goto again;
393 }
394 } else {
395 /*
396 * We failed to deliver the interrupt we need to set the
397 * resend map bit and mark the ICS state as needing a resend
398 */
399 set_bit(ics->icsid, icp->resend_map);
400 state->resend = 1;
401
402 /*
403 * If the need_resend flag got cleared in the ICP some time
404 * between icp_try_to_deliver() atomic update and now, then
405 * we know it might have missed the resend_map bit. So we
406 * retry
407 */
408 smp_mb();
409 if (!icp->state.need_resend) {
410 mutex_unlock(&ics->lock);
411 goto again;
412 }
413 }
414 out:
415 mutex_unlock(&ics->lock);
416}
417
418static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
419 u8 new_cppr)
420{
421 union kvmppc_icp_state old_state, new_state;
422 bool resend;
423
424 /*
425 * This handles several related states in one operation:
426 *
427 * ICP State: Down_CPPR
428 *
429 * Load CPPR with new value and if the XISR is 0
430 * then check for resends:
431 *
432 * ICP State: Resend
433 *
434 * If MFRR is more favored than CPPR, check for IPIs
435 * and notify ICS of a potential resend. This is done
436 * asynchronously (when used in real mode, we will have
437 * to exit here).
438 *
439 * We do not handle the complete Check_IPI as documented
440 * here. In the PAPR, this state will be used for both
441 * Set_MFRR and Down_CPPR. However, we know that we aren't
442 * changing the MFRR state here so we don't need to handle
443 * the case of an MFRR causing a reject of a pending irq,
444 * this will have been handled when the MFRR was set in the
445 * first place.
446 *
447 * Thus we don't have to handle rejects, only resends.
448 *
449 * When implementing real mode for HV KVM, resend will lead to
450 * a H_TOO_HARD return and the whole transaction will be handled
451 * in virtual mode.
452 */
453 do {
454 old_state = new_state = ACCESS_ONCE(icp->state);
455
456 /* Down_CPPR */
457 new_state.cppr = new_cppr;
458
459 /*
460 * Cut down Resend / Check_IPI / IPI
461 *
462 * The logic is that we cannot have a pending interrupt
463 * trumped by an IPI at this point (see above), so we
464 * know that either the pending interrupt is already an
465 * IPI (in which case we don't care to override it) or
466 * it's either more favored than us or non existent
467 */
468 if (new_state.mfrr < new_cppr &&
469 new_state.mfrr <= new_state.pending_pri) {
470 WARN_ON(new_state.xisr != XICS_IPI &&
471 new_state.xisr != 0);
472 new_state.pending_pri = new_state.mfrr;
473 new_state.xisr = XICS_IPI;
474 }
475
476 /* Latch/clear resend bit */
477 resend = new_state.need_resend;
478 new_state.need_resend = 0;
479
480 } while (!icp_try_update(icp, old_state, new_state, true));
481
482 /*
483 * Now handle resend checks. Those are asynchronous to the ICP
484 * state update in HW (ie bus transactions) so we can handle them
485 * separately here too
486 */
487 if (resend)
488 icp_check_resend(xics, icp);
489}
490
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000491static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000492{
493 union kvmppc_icp_state old_state, new_state;
494 struct kvmppc_icp *icp = vcpu->arch.icp;
495 u32 xirr;
496
497 /* First, remove EE from the processor */
498 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
499 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
500
501 /*
502 * ICP State: Accept_Interrupt
503 *
504 * Return the pending interrupt (if any) along with the
505 * current CPPR, then clear the XISR & set CPPR to the
506 * pending priority
507 */
508 do {
509 old_state = new_state = ACCESS_ONCE(icp->state);
510
511 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
512 if (!old_state.xisr)
513 break;
514 new_state.cppr = new_state.pending_pri;
515 new_state.pending_pri = 0xff;
516 new_state.xisr = 0;
517
518 } while (!icp_try_update(icp, old_state, new_state, true));
519
520 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
521
522 return xirr;
523}
524
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000525static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
526 unsigned long mfrr)
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000527{
528 union kvmppc_icp_state old_state, new_state;
529 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
530 struct kvmppc_icp *icp;
531 u32 reject;
532 bool resend;
533 bool local;
534
535 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
536 vcpu->vcpu_id, server, mfrr);
537
538 icp = vcpu->arch.icp;
539 local = icp->server_num == server;
540 if (!local) {
541 icp = kvmppc_xics_find_server(vcpu->kvm, server);
542 if (!icp)
543 return H_PARAMETER;
544 }
545
546 /*
547 * ICP state: Set_MFRR
548 *
549 * If the CPPR is more favored than the new MFRR, then
550 * nothing needs to be rejected as there can be no XISR to
551 * reject. If the MFRR is being made less favored then
552 * there might be a previously-rejected interrupt needing
553 * to be resent.
554 *
555 * If the CPPR is less favored, then we might be replacing
556 * an interrupt, and thus need to possibly reject it as in
557 *
558 * ICP state: Check_IPI
559 */
560 do {
561 old_state = new_state = ACCESS_ONCE(icp->state);
562
563 /* Set_MFRR */
564 new_state.mfrr = mfrr;
565
566 /* Check_IPI */
567 reject = 0;
568 resend = false;
569 if (mfrr < new_state.cppr) {
570 /* Reject a pending interrupt if not an IPI */
571 if (mfrr <= new_state.pending_pri)
572 reject = new_state.xisr;
573 new_state.pending_pri = mfrr;
574 new_state.xisr = XICS_IPI;
575 }
576
577 if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
578 resend = new_state.need_resend;
579 new_state.need_resend = 0;
580 }
581 } while (!icp_try_update(icp, old_state, new_state, local));
582
583 /* Handle reject */
584 if (reject && reject != XICS_IPI)
585 icp_deliver_irq(xics, icp, reject);
586
587 /* Handle resend */
588 if (resend)
589 icp_check_resend(xics, icp);
590
591 return H_SUCCESS;
592}
593
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000594static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000595{
596 union kvmppc_icp_state old_state, new_state;
597 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
598 struct kvmppc_icp *icp = vcpu->arch.icp;
599 u32 reject;
600
601 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
602
603 /*
604 * ICP State: Set_CPPR
605 *
606 * We can safely compare the new value with the current
607 * value outside of the transaction as the CPPR is only
608 * ever changed by the processor on itself
609 */
610 if (cppr > icp->state.cppr)
611 icp_down_cppr(xics, icp, cppr);
612 else if (cppr == icp->state.cppr)
613 return;
614
615 /*
616 * ICP State: Up_CPPR
617 *
618 * The processor is raising its priority, this can result
619 * in a rejection of a pending interrupt:
620 *
621 * ICP State: Reject_Current
622 *
623 * We can remove EE from the current processor, the update
624 * transaction will set it again if needed
625 */
626 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
627 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
628
629 do {
630 old_state = new_state = ACCESS_ONCE(icp->state);
631
632 reject = 0;
633 new_state.cppr = cppr;
634
635 if (cppr <= new_state.pending_pri) {
636 reject = new_state.xisr;
637 new_state.xisr = 0;
638 new_state.pending_pri = 0xff;
639 }
640
641 } while (!icp_try_update(icp, old_state, new_state, true));
642
643 /*
644 * Check for rejects. They are handled by doing a new delivery
645 * attempt (see comments in icp_deliver_irq).
646 */
647 if (reject && reject != XICS_IPI)
648 icp_deliver_irq(xics, icp, reject);
649}
650
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000651static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000652{
653 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
654 struct kvmppc_icp *icp = vcpu->arch.icp;
655 struct kvmppc_ics *ics;
656 struct ics_irq_state *state;
657 u32 irq = xirr & 0x00ffffff;
658 u16 src;
659
660 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
661
662 /*
663 * ICP State: EOI
664 *
665 * Note: If EOI is incorrectly used by SW to lower the CPPR
666 * value (ie more favored), we do not check for rejection of
667 * a pending interrupt, this is a SW error and PAPR sepcifies
668 * that we don't have to deal with it.
669 *
670 * The sending of an EOI to the ICS is handled after the
671 * CPPR update
672 *
673 * ICP State: Down_CPPR which we handle
674 * in a separate function as it's shared with H_CPPR.
675 */
676 icp_down_cppr(xics, icp, xirr >> 24);
677
678 /* IPIs have no EOI */
679 if (irq == XICS_IPI)
680 return H_SUCCESS;
681 /*
682 * EOI handling: If the interrupt is still asserted, we need to
683 * resend it. We can take a lockless "peek" at the ICS state here.
684 *
685 * "Message" interrupts will never have "asserted" set
686 */
687 ics = kvmppc_xics_find_ics(xics, irq, &src);
688 if (!ics) {
689 XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
690 return H_PARAMETER;
691 }
692 state = &ics->irq_state[src];
693
694 /* Still asserted, resend it */
695 if (state->asserted)
696 icp_deliver_irq(xics, icp, irq);
697
698 return H_SUCCESS;
699}
700
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000701static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
702{
703 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
704 struct kvmppc_icp *icp = vcpu->arch.icp;
705
706 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
707 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
708
709 if (icp->rm_action & XICS_RM_KICK_VCPU)
710 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
711 if (icp->rm_action & XICS_RM_CHECK_RESEND)
712 icp_check_resend(xics, icp);
713 if (icp->rm_action & XICS_RM_REJECT)
714 icp_deliver_irq(xics, icp, icp->rm_reject);
715
716 icp->rm_action = 0;
717
718 return H_SUCCESS;
719}
720
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000721int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
722{
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000723 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000724 unsigned long res;
725 int rc = H_SUCCESS;
726
727 /* Check if we have an ICP */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000728 if (!xics || !vcpu->arch.icp)
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000729 return H_HARDWARE;
730
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000731 /* Check for real mode returning too hard */
732 if (xics->real_mode)
733 return kvmppc_xics_rm_complete(vcpu, req);
734
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000735 switch (req) {
736 case H_XIRR:
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000737 res = kvmppc_h_xirr(vcpu);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000738 kvmppc_set_gpr(vcpu, 4, res);
739 break;
740 case H_CPPR:
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000741 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000742 break;
743 case H_EOI:
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000744 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000745 break;
746 case H_IPI:
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000747 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
748 kvmppc_get_gpr(vcpu, 5));
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000749 break;
750 }
751
752 return rc;
753}
754
755
756/* -- Initialisation code etc. -- */
757
758static int xics_debug_show(struct seq_file *m, void *private)
759{
760 struct kvmppc_xics *xics = m->private;
761 struct kvm *kvm = xics->kvm;
762 struct kvm_vcpu *vcpu;
763 int icsid, i;
764
765 if (!kvm)
766 return 0;
767
768 seq_printf(m, "=========\nICP state\n=========\n");
769
770 kvm_for_each_vcpu(i, vcpu, kvm) {
771 struct kvmppc_icp *icp = vcpu->arch.icp;
772 union kvmppc_icp_state state;
773
774 if (!icp)
775 continue;
776
777 state.raw = ACCESS_ONCE(icp->state.raw);
778 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
779 icp->server_num, state.xisr,
780 state.pending_pri, state.cppr, state.mfrr,
781 state.out_ee, state.need_resend);
782 }
783
784 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
785 struct kvmppc_ics *ics = xics->ics[icsid];
786
787 if (!ics)
788 continue;
789
790 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
791 icsid);
792
793 mutex_lock(&ics->lock);
794
795 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
796 struct ics_irq_state *irq = &ics->irq_state[i];
797
798 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
799 irq->number, irq->server, irq->priority,
800 irq->saved_priority, irq->asserted,
801 irq->resend, irq->masked_pending);
802
803 }
804 mutex_unlock(&ics->lock);
805 }
806 return 0;
807}
808
809static int xics_debug_open(struct inode *inode, struct file *file)
810{
811 return single_open(file, xics_debug_show, inode->i_private);
812}
813
814static const struct file_operations xics_debug_fops = {
815 .open = xics_debug_open,
816 .read = seq_read,
817 .llseek = seq_lseek,
818 .release = single_release,
819};
820
821static void xics_debugfs_init(struct kvmppc_xics *xics)
822{
823 char *name;
824
825 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
826 if (!name) {
827 pr_err("%s: no memory for name\n", __func__);
828 return;
829 }
830
831 xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
832 xics, &xics_debug_fops);
833
834 pr_debug("%s: created %s\n", __func__, name);
835 kfree(name);
836}
837
838struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
839 struct kvmppc_xics *xics, int irq)
840{
841 struct kvmppc_ics *ics;
842 int i, icsid;
843
844 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
845
846 mutex_lock(&kvm->lock);
847
848 /* ICS already exists - somebody else got here first */
849 if (xics->ics[icsid])
850 goto out;
851
852 /* Create the ICS */
853 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
854 if (!ics)
855 goto out;
856
857 mutex_init(&ics->lock);
858 ics->icsid = icsid;
859
860 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
861 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
862 ics->irq_state[i].priority = MASKED;
863 ics->irq_state[i].saved_priority = MASKED;
864 }
865 smp_wmb();
866 xics->ics[icsid] = ics;
867
868 if (icsid > xics->max_icsid)
869 xics->max_icsid = icsid;
870
871 out:
872 mutex_unlock(&kvm->lock);
873 return xics->ics[icsid];
874}
875
876int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
877{
878 struct kvmppc_icp *icp;
879
880 if (!vcpu->kvm->arch.xics)
881 return -ENODEV;
882
883 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
884 return -EEXIST;
885
886 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
887 if (!icp)
888 return -ENOMEM;
889
890 icp->vcpu = vcpu;
891 icp->server_num = server_num;
892 icp->state.mfrr = MASKED;
893 icp->state.pending_pri = MASKED;
894 vcpu->arch.icp = icp;
895
896 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
897
898 return 0;
899}
900
901/* -- ioctls -- */
902
903int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args)
904{
905 struct kvmppc_xics *xics;
906 int r;
907
908 /* locking against multiple callers? */
909
910 xics = kvm->arch.xics;
911 if (!xics)
912 return -ENODEV;
913
914 switch (args->level) {
915 case KVM_INTERRUPT_SET:
916 case KVM_INTERRUPT_SET_LEVEL:
917 case KVM_INTERRUPT_UNSET:
918 r = ics_deliver_irq(xics, args->irq, args->level);
919 break;
920 default:
921 r = -EINVAL;
922 }
923
924 return r;
925}
926
927void kvmppc_xics_free(struct kvmppc_xics *xics)
928{
929 int i;
930 struct kvm *kvm = xics->kvm;
931
932 debugfs_remove(xics->dentry);
933
934 if (kvm)
935 kvm->arch.xics = NULL;
936
937 for (i = 0; i <= xics->max_icsid; i++)
938 kfree(xics->ics[i]);
939 kfree(xics);
940}
941
942int kvm_xics_create(struct kvm *kvm, u32 type)
943{
944 struct kvmppc_xics *xics;
945 int ret = 0;
946
947 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
948 if (!xics)
949 return -ENOMEM;
950
951 xics->kvm = kvm;
952
953 /* Already there ? */
954 mutex_lock(&kvm->lock);
955 if (kvm->arch.xics)
956 ret = -EEXIST;
957 else
958 kvm->arch.xics = xics;
959 mutex_unlock(&kvm->lock);
960
961 if (ret)
962 return ret;
963
964 xics_debugfs_init(xics);
965
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +0000966#ifdef CONFIG_KVM_BOOK3S_64_HV
967 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
968 /* Enable real mode support */
969 xics->real_mode = ENABLE_REALMODE;
970 xics->real_mode_dbg = DEBUG_REALMODE;
971 }
972#endif /* CONFIG_KVM_BOOK3S_64_HV */
973
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000974 return 0;
975}
976
977void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
978{
979 if (!vcpu->arch.icp)
980 return;
981 kfree(vcpu->arch.icp);
982 vcpu->arch.icp = NULL;
983 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
984}