blob: a102efeabf05a1b2aa37fcfd35f0d33867f36d0e [file] [log] [blame]
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001/*
2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9#define pr_fmt(fmt) "xive-kvm: " fmt
10
11#include <linux/kernel.h>
12#include <linux/kvm_host.h>
13#include <linux/err.h>
14#include <linux/gfp.h>
15#include <linux/spinlock.h>
16#include <linux/delay.h>
17#include <linux/percpu.h>
18#include <linux/cpumask.h>
19#include <asm/uaccess.h>
20#include <asm/kvm_book3s.h>
21#include <asm/kvm_ppc.h>
22#include <asm/hvcall.h>
23#include <asm/xics.h>
24#include <asm/xive.h>
25#include <asm/xive-regs.h>
26#include <asm/debug.h>
Paolo Bonzini4415b332017-05-09 11:50:01 +020027#include <asm/debugfs.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100028#include <asm/time.h>
29#include <asm/opal.h>
30
31#include <linux/debugfs.h>
32#include <linux/seq_file.h>
33
34#include "book3s_xive.h"
35
36
37/*
38 * Virtual mode variants of the hcalls for use on radix/radix
39 * with AIL. They require the VCPU's VP to be "pushed"
40 *
41 * We still instanciate them here because we use some of the
42 * generated utility functions as well in this file.
43 */
44#define XIVE_RUNTIME_CHECKS
45#define X_PFX xive_vm_
46#define X_STATIC static
47#define X_STAT_PFX stat_vm_
48#define __x_tima xive_tima
49#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
50#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100051#define __x_writeb __raw_writeb
52#define __x_readw __raw_readw
53#define __x_readq __raw_readq
54#define __x_writeq __raw_writeq
55
56#include "book3s_xive_template.c"
57
58/*
59 * We leave a gap of a couple of interrupts in the queue to
60 * account for the IPI and additional safety guard.
61 */
62#define XIVE_Q_GAP 2
63
64/*
65 * This is a simple trigger for a generic XIVE IRQ. This must
66 * only be called for interrupts that support a trigger page
67 */
68static bool xive_irq_trigger(struct xive_irq_data *xd)
69{
70 /* This should be only for MSIs */
71 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
72 return false;
73
74 /* Those interrupts should always have a trigger page */
75 if (WARN_ON(!xd->trig_mmio))
76 return false;
77
78 out_be64(xd->trig_mmio, 0);
79
80 return true;
81}
82
83static irqreturn_t xive_esc_irq(int irq, void *data)
84{
85 struct kvm_vcpu *vcpu = data;
86
87 /* We use the existing H_PROD mechanism to wake up the target */
88 vcpu->arch.prodded = 1;
89 smp_mb();
90 if (vcpu->arch.ceded)
91 kvmppc_fast_vcpu_kick(vcpu);
92
93 return IRQ_HANDLED;
94}
95
96static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
97{
98 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
99 struct xive_q *q = &xc->queues[prio];
100 char *name = NULL;
101 int rc;
102
103 /* Already there ? */
104 if (xc->esc_virq[prio])
105 return 0;
106
107 /* Hook up the escalation interrupt */
108 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
109 if (!xc->esc_virq[prio]) {
110 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
111 prio, xc->server_num);
112 return -EIO;
113 }
114
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +1100115 if (xc->xive->single_escalation)
116 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
117 vcpu->kvm->arch.lpid, xc->server_num);
118 else
119 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
120 vcpu->kvm->arch.lpid, xc->server_num, prio);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000121 if (!name) {
122 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
123 prio, xc->server_num);
124 rc = -ENOMEM;
125 goto error;
126 }
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +1100127
128 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
129
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000130 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
131 IRQF_NO_THREAD, name, vcpu);
132 if (rc) {
133 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
134 prio, xc->server_num);
135 goto error;
136 }
137 xc->esc_virq_names[prio] = name;
138 return 0;
139error:
140 irq_dispose_mapping(xc->esc_virq[prio]);
141 xc->esc_virq[prio] = 0;
142 kfree(name);
143 return rc;
144}
145
146static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
147{
148 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
149 struct kvmppc_xive *xive = xc->xive;
150 struct xive_q *q = &xc->queues[prio];
151 void *qpage;
152 int rc;
153
154 if (WARN_ON(q->qpage))
155 return 0;
156
157 /* Allocate the queue and retrieve infos on current node for now */
158 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
159 if (!qpage) {
160 pr_err("Failed to allocate queue %d for VCPU %d\n",
161 prio, xc->server_num);
162 return -ENOMEM;;
163 }
164 memset(qpage, 0, 1 << xive->q_order);
165
166 /*
167 * Reconfigure the queue. This will set q->qpage only once the
168 * queue is fully configured. This is a requirement for prio 0
169 * as we will stop doing EOIs for every IPI as soon as we observe
170 * qpage being non-NULL, and instead will only EOI when we receive
171 * corresponding queue 0 entries
172 */
173 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
174 xive->q_order, true);
175 if (rc)
176 pr_err("Failed to configure queue %d for VCPU %d\n",
177 prio, xc->server_num);
178 return rc;
179}
180
181/* Called with kvm_lock held */
182static int xive_check_provisioning(struct kvm *kvm, u8 prio)
183{
184 struct kvmppc_xive *xive = kvm->arch.xive;
185 struct kvm_vcpu *vcpu;
186 int i, rc;
187
188 lockdep_assert_held(&kvm->lock);
189
190 /* Already provisioned ? */
191 if (xive->qmap & (1 << prio))
192 return 0;
193
194 pr_devel("Provisioning prio... %d\n", prio);
195
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +1100196 /* Provision each VCPU and enable escalations if needed */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000197 kvm_for_each_vcpu(i, vcpu, kvm) {
198 if (!vcpu->arch.xive_vcpu)
199 continue;
200 rc = xive_provision_queue(vcpu, prio);
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +1100201 if (rc == 0 && !xive->single_escalation)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000202 xive_attach_escalation(vcpu, prio);
203 if (rc)
204 return rc;
205 }
206
207 /* Order previous stores and mark it as provisioned */
208 mb();
209 xive->qmap |= (1 << prio);
210 return 0;
211}
212
213static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
214{
215 struct kvm_vcpu *vcpu;
216 struct kvmppc_xive_vcpu *xc;
217 struct xive_q *q;
218
219 /* Locate target server */
220 vcpu = kvmppc_xive_find_server(kvm, server);
221 if (!vcpu) {
222 pr_warn("%s: Can't find server %d\n", __func__, server);
223 return;
224 }
225 xc = vcpu->arch.xive_vcpu;
226 if (WARN_ON(!xc))
227 return;
228
229 q = &xc->queues[prio];
230 atomic_inc(&q->pending_count);
231}
232
233static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
234{
235 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
236 struct xive_q *q;
237 u32 max;
238
239 if (WARN_ON(!xc))
240 return -ENXIO;
241 if (!xc->valid)
242 return -ENXIO;
243
244 q = &xc->queues[prio];
245 if (WARN_ON(!q->qpage))
246 return -ENXIO;
247
248 /* Calculate max number of interrupts in that queue. */
249 max = (q->msk + 1) - XIVE_Q_GAP;
250 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
251}
252
253static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
254{
255 struct kvm_vcpu *vcpu;
256 int i, rc;
257
258 /* Locate target server */
259 vcpu = kvmppc_xive_find_server(kvm, *server);
260 if (!vcpu) {
261 pr_devel("Can't find server %d\n", *server);
262 return -EINVAL;
263 }
264
265 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
266
267 /* Try pick it */
268 rc = xive_try_pick_queue(vcpu, prio);
269 if (rc == 0)
270 return rc;
271
272 pr_devel(" .. failed, looking up candidate...\n");
273
274 /* Failed, pick another VCPU */
275 kvm_for_each_vcpu(i, vcpu, kvm) {
276 if (!vcpu->arch.xive_vcpu)
277 continue;
278 rc = xive_try_pick_queue(vcpu, prio);
279 if (rc == 0) {
280 *server = vcpu->arch.xive_vcpu->server_num;
281 pr_devel(" found on 0x%x/%d\n", *server, prio);
282 return rc;
283 }
284 }
285 pr_devel(" no available target !\n");
286
287 /* No available target ! */
288 return -EBUSY;
289}
290
291static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
292 struct kvmppc_xive_src_block *sb,
293 struct kvmppc_xive_irq_state *state)
294{
295 struct xive_irq_data *xd;
296 u32 hw_num;
297 u8 old_prio;
298 u64 val;
299
300 /*
301 * Take the lock, set masked, try again if racing
302 * with H_EOI
303 */
304 for (;;) {
305 arch_spin_lock(&sb->lock);
306 old_prio = state->guest_priority;
307 state->guest_priority = MASKED;
308 mb();
309 if (!state->in_eoi)
310 break;
311 state->guest_priority = old_prio;
312 arch_spin_unlock(&sb->lock);
313 }
314
315 /* No change ? Bail */
316 if (old_prio == MASKED)
317 return old_prio;
318
319 /* Get the right irq */
320 kvmppc_xive_select_irq(state, &hw_num, &xd);
321
322 /*
323 * If the interrupt is marked as needing masking via
324 * firmware, we do it here. Firmware masking however
325 * is "lossy", it won't return the old p and q bits
326 * and won't set the interrupt to a state where it will
327 * record queued ones. If this is an issue we should do
328 * lazy masking instead.
329 *
330 * For now, we work around this in unmask by forcing
331 * an interrupt whenever we unmask a non-LSI via FW
332 * (if ever).
333 */
334 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
335 xive_native_configure_irq(hw_num,
336 xive->vp_base + state->act_server,
337 MASKED, state->number);
338 /* set old_p so we can track if an H_EOI was done */
339 state->old_p = true;
340 state->old_q = false;
341 } else {
342 /* Set PQ to 10, return old P and old Q and remember them */
343 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
344 state->old_p = !!(val & 2);
345 state->old_q = !!(val & 1);
346
347 /*
348 * Synchronize hardware to sensure the queues are updated
349 * when masking
350 */
351 xive_native_sync_source(hw_num);
352 }
353
354 return old_prio;
355}
356
357static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
358 struct kvmppc_xive_irq_state *state)
359{
360 /*
361 * Take the lock try again if racing with H_EOI
362 */
363 for (;;) {
364 arch_spin_lock(&sb->lock);
365 if (!state->in_eoi)
366 break;
367 arch_spin_unlock(&sb->lock);
368 }
369}
370
371static void xive_finish_unmask(struct kvmppc_xive *xive,
372 struct kvmppc_xive_src_block *sb,
373 struct kvmppc_xive_irq_state *state,
374 u8 prio)
375{
376 struct xive_irq_data *xd;
377 u32 hw_num;
378
379 /* If we aren't changing a thing, move on */
380 if (state->guest_priority != MASKED)
381 goto bail;
382
383 /* Get the right irq */
384 kvmppc_xive_select_irq(state, &hw_num, &xd);
385
386 /*
387 * See command in xive_lock_and_mask() concerning masking
388 * via firmware.
389 */
390 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
391 xive_native_configure_irq(hw_num,
392 xive->vp_base + state->act_server,
393 state->act_priority, state->number);
394 /* If an EOI is needed, do it here */
395 if (!state->old_p)
396 xive_vm_source_eoi(hw_num, xd);
397 /* If this is not an LSI, force a trigger */
398 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
399 xive_irq_trigger(xd);
400 goto bail;
401 }
402
403 /* Old Q set, set PQ to 11 */
404 if (state->old_q)
405 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
406
407 /*
408 * If not old P, then perform an "effective" EOI,
409 * on the source. This will handle the cases where
410 * FW EOI is needed.
411 */
412 if (!state->old_p)
413 xive_vm_source_eoi(hw_num, xd);
414
415 /* Synchronize ordering and mark unmasked */
416 mb();
417bail:
418 state->guest_priority = prio;
419}
420
421/*
422 * Target an interrupt to a given server/prio, this will fallback
423 * to another server if necessary and perform the HW targetting
424 * updates as needed
425 *
426 * NOTE: Must be called with the state lock held
427 */
428static int xive_target_interrupt(struct kvm *kvm,
429 struct kvmppc_xive_irq_state *state,
430 u32 server, u8 prio)
431{
432 struct kvmppc_xive *xive = kvm->arch.xive;
433 u32 hw_num;
434 int rc;
435
436 /*
437 * This will return a tentative server and actual
438 * priority. The count for that new target will have
439 * already been incremented.
440 */
441 rc = xive_select_target(kvm, &server, prio);
442
443 /*
444 * We failed to find a target ? Not much we can do
445 * at least until we support the GIQ.
446 */
447 if (rc)
448 return rc;
449
450 /*
451 * Increment the old queue pending count if there
452 * was one so that the old queue count gets adjusted later
453 * when observed to be empty.
454 */
455 if (state->act_priority != MASKED)
456 xive_inc_q_pending(kvm,
457 state->act_server,
458 state->act_priority);
459 /*
460 * Update state and HW
461 */
462 state->act_priority = prio;
463 state->act_server = server;
464
465 /* Get the right irq */
466 kvmppc_xive_select_irq(state, &hw_num, NULL);
467
468 return xive_native_configure_irq(hw_num,
469 xive->vp_base + server,
470 prio, state->number);
471}
472
473/*
474 * Targetting rules: In order to avoid losing track of
475 * pending interrupts accross mask and unmask, which would
476 * allow queue overflows, we implement the following rules:
477 *
478 * - Unless it was never enabled (or we run out of capacity)
479 * an interrupt is always targetted at a valid server/queue
480 * pair even when "masked" by the guest. This pair tends to
481 * be the last one used but it can be changed under some
482 * circumstances. That allows us to separate targetting
483 * from masking, we only handle accounting during (re)targetting,
484 * this also allows us to let an interrupt drain into its target
485 * queue after masking, avoiding complex schemes to remove
486 * interrupts out of remote processor queues.
487 *
488 * - When masking, we set PQ to 10 and save the previous value
489 * of P and Q.
490 *
491 * - When unmasking, if saved Q was set, we set PQ to 11
492 * otherwise we leave PQ to the HW state which will be either
493 * 10 if nothing happened or 11 if the interrupt fired while
494 * masked. Effectively we are OR'ing the previous Q into the
495 * HW Q.
496 *
497 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
498 * which will unmask the interrupt and shoot a new one if Q was
499 * set.
500 *
501 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
502 * effectively meaning an H_EOI from the guest is still expected
503 * for that interrupt).
504 *
505 * - If H_EOI occurs while masked, we clear the saved P.
506 *
507 * - When changing target, we account on the new target and
508 * increment a separate "pending" counter on the old one.
509 * This pending counter will be used to decrement the old
510 * target's count when its queue has been observed empty.
511 */
512
513int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
514 u32 priority)
515{
516 struct kvmppc_xive *xive = kvm->arch.xive;
517 struct kvmppc_xive_src_block *sb;
518 struct kvmppc_xive_irq_state *state;
519 u8 new_act_prio;
520 int rc = 0;
521 u16 idx;
522
523 if (!xive)
524 return -ENODEV;
525
526 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
527 irq, server, priority);
528
529 /* First, check provisioning of queues */
530 if (priority != MASKED)
531 rc = xive_check_provisioning(xive->kvm,
532 xive_prio_from_guest(priority));
533 if (rc) {
534 pr_devel(" provisioning failure %d !\n", rc);
535 return rc;
536 }
537
538 sb = kvmppc_xive_find_source(xive, irq, &idx);
539 if (!sb)
540 return -EINVAL;
541 state = &sb->irq_state[idx];
542
543 /*
544 * We first handle masking/unmasking since the locking
545 * might need to be retried due to EOIs, we'll handle
546 * targetting changes later. These functions will return
547 * with the SB lock held.
548 *
549 * xive_lock_and_mask() will also set state->guest_priority
550 * but won't otherwise change other fields of the state.
551 *
552 * xive_lock_for_unmask will not actually unmask, this will
553 * be done later by xive_finish_unmask() once the targetting
554 * has been done, so we don't try to unmask an interrupt
555 * that hasn't yet been targetted.
556 */
557 if (priority == MASKED)
558 xive_lock_and_mask(xive, sb, state);
559 else
560 xive_lock_for_unmask(sb, state);
561
562
563 /*
564 * Then we handle targetting.
565 *
566 * First calculate a new "actual priority"
567 */
568 new_act_prio = state->act_priority;
569 if (priority != MASKED)
570 new_act_prio = xive_prio_from_guest(priority);
571
572 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
573 new_act_prio, state->act_server, state->act_priority);
574
575 /*
576 * Then check if we actually need to change anything,
577 *
578 * The condition for re-targetting the interrupt is that
579 * we have a valid new priority (new_act_prio is not 0xff)
580 * and either the server or the priority changed.
581 *
582 * Note: If act_priority was ff and the new priority is
583 * also ff, we don't do anything and leave the interrupt
584 * untargetted. An attempt of doing an int_on on an
585 * untargetted interrupt will fail. If that is a problem
586 * we could initialize interrupts with valid default
587 */
588
589 if (new_act_prio != MASKED &&
590 (state->act_server != server ||
591 state->act_priority != new_act_prio))
592 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
593
594 /*
595 * Perform the final unmasking of the interrupt source
596 * if necessary
597 */
598 if (priority != MASKED)
599 xive_finish_unmask(xive, sb, state, priority);
600
601 /*
602 * Finally Update saved_priority to match. Only int_on/off
603 * set this field to a different value.
604 */
605 state->saved_priority = priority;
606
607 arch_spin_unlock(&sb->lock);
608 return rc;
609}
610
611int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
612 u32 *priority)
613{
614 struct kvmppc_xive *xive = kvm->arch.xive;
615 struct kvmppc_xive_src_block *sb;
616 struct kvmppc_xive_irq_state *state;
617 u16 idx;
618
619 if (!xive)
620 return -ENODEV;
621
622 sb = kvmppc_xive_find_source(xive, irq, &idx);
623 if (!sb)
624 return -EINVAL;
625 state = &sb->irq_state[idx];
626 arch_spin_lock(&sb->lock);
Sam Bobroff2fb1e942017-09-26 16:47:04 +1000627 *server = state->act_server;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000628 *priority = state->guest_priority;
629 arch_spin_unlock(&sb->lock);
630
631 return 0;
632}
633
634int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
635{
636 struct kvmppc_xive *xive = kvm->arch.xive;
637 struct kvmppc_xive_src_block *sb;
638 struct kvmppc_xive_irq_state *state;
639 u16 idx;
640
641 if (!xive)
642 return -ENODEV;
643
644 sb = kvmppc_xive_find_source(xive, irq, &idx);
645 if (!sb)
646 return -EINVAL;
647 state = &sb->irq_state[idx];
648
649 pr_devel("int_on(irq=0x%x)\n", irq);
650
651 /*
652 * Check if interrupt was not targetted
653 */
654 if (state->act_priority == MASKED) {
655 pr_devel("int_on on untargetted interrupt\n");
656 return -EINVAL;
657 }
658
659 /* If saved_priority is 0xff, do nothing */
660 if (state->saved_priority == MASKED)
661 return 0;
662
663 /*
664 * Lock and unmask it.
665 */
666 xive_lock_for_unmask(sb, state);
667 xive_finish_unmask(xive, sb, state, state->saved_priority);
668 arch_spin_unlock(&sb->lock);
669
670 return 0;
671}
672
673int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
674{
675 struct kvmppc_xive *xive = kvm->arch.xive;
676 struct kvmppc_xive_src_block *sb;
677 struct kvmppc_xive_irq_state *state;
678 u16 idx;
679
680 if (!xive)
681 return -ENODEV;
682
683 sb = kvmppc_xive_find_source(xive, irq, &idx);
684 if (!sb)
685 return -EINVAL;
686 state = &sb->irq_state[idx];
687
688 pr_devel("int_off(irq=0x%x)\n", irq);
689
690 /*
691 * Lock and mask
692 */
693 state->saved_priority = xive_lock_and_mask(xive, sb, state);
694 arch_spin_unlock(&sb->lock);
695
696 return 0;
697}
698
699static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
700{
701 struct kvmppc_xive_src_block *sb;
702 struct kvmppc_xive_irq_state *state;
703 u16 idx;
704
705 sb = kvmppc_xive_find_source(xive, irq, &idx);
706 if (!sb)
707 return false;
708 state = &sb->irq_state[idx];
709 if (!state->valid)
710 return false;
711
712 /*
713 * Trigger the IPI. This assumes we never restore a pass-through
714 * interrupt which should be safe enough
715 */
716 xive_irq_trigger(&state->ipi_data);
717
718 return true;
719}
720
721u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
722{
723 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
724
725 if (!xc)
726 return 0;
727
728 /* Return the per-cpu state for state saving/migration */
729 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
730 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT;
731}
732
733int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
734{
735 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
736 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
737 u8 cppr, mfrr;
738 u32 xisr;
739
740 if (!xc || !xive)
741 return -ENOENT;
742
743 /* Grab individual state fields. We don't use pending_pri */
744 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
745 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
746 KVM_REG_PPC_ICP_XISR_MASK;
747 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
748
749 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
750 xc->server_num, cppr, mfrr, xisr);
751
752 /*
753 * We can't update the state of a "pushed" VCPU, but that
754 * shouldn't happen.
755 */
756 if (WARN_ON(vcpu->arch.xive_pushed))
757 return -EIO;
758
759 /* Update VCPU HW saved state */
760 vcpu->arch.xive_saved_state.cppr = cppr;
761 xc->hw_cppr = xc->cppr = cppr;
762
763 /*
764 * Update MFRR state. If it's not 0xff, we mark the VCPU as
765 * having a pending MFRR change, which will re-evaluate the
766 * target. The VCPU will thus potentially get a spurious
767 * interrupt but that's not a big deal.
768 */
769 xc->mfrr = mfrr;
770 if (mfrr < cppr)
771 xive_irq_trigger(&xc->vp_ipi_data);
772
773 /*
774 * Now saved XIRR is "interesting". It means there's something in
775 * the legacy "1 element" queue... for an IPI we simply ignore it,
776 * as the MFRR restore will handle that. For anything else we need
777 * to force a resend of the source.
778 * However the source may not have been setup yet. If that's the
779 * case, we keep that info and increment a counter in the xive to
780 * tell subsequent xive_set_source() to go look.
781 */
782 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
783 xc->delayed_irq = xisr;
784 xive->delayed_irqs++;
785 pr_devel(" xisr restore delayed\n");
786 }
787
788 return 0;
789}
790
791int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
792 struct irq_desc *host_desc)
793{
794 struct kvmppc_xive *xive = kvm->arch.xive;
795 struct kvmppc_xive_src_block *sb;
796 struct kvmppc_xive_irq_state *state;
797 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
798 unsigned int host_irq = irq_desc_get_irq(host_desc);
799 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
800 u16 idx;
801 u8 prio;
802 int rc;
803
804 if (!xive)
805 return -ENODEV;
806
807 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
808
809 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
810 if (!sb)
811 return -EINVAL;
812 state = &sb->irq_state[idx];
813
814 /*
815 * Mark the passed-through interrupt as going to a VCPU,
816 * this will prevent further EOIs and similar operations
817 * from the XIVE code. It will also mask the interrupt
818 * to either PQ=10 or 11 state, the latter if the interrupt
819 * is pending. This will allow us to unmask or retrigger it
820 * after routing it to the guest with a simple EOI.
821 *
822 * The "state" argument is a "token", all it needs is to be
823 * non-NULL to switch to passed-through or NULL for the
824 * other way around. We may not yet have an actual VCPU
825 * target here and we don't really care.
826 */
827 rc = irq_set_vcpu_affinity(host_irq, state);
828 if (rc) {
829 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
830 return rc;
831 }
832
833 /*
834 * Mask and read state of IPI. We need to know if its P bit
835 * is set as that means it's potentially already using a
836 * queue entry in the target
837 */
838 prio = xive_lock_and_mask(xive, sb, state);
839 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
840 state->old_p, state->old_q);
841
842 /* Turn the IPI hard off */
843 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
844
845 /* Grab info about irq */
846 state->pt_number = hw_irq;
847 state->pt_data = irq_data_get_irq_handler_data(host_data);
848
849 /*
850 * Configure the IRQ to match the existing configuration of
851 * the IPI if it was already targetted. Otherwise this will
852 * mask the interrupt in a lossy way (act_priority is 0xff)
853 * which is fine for a never started interrupt.
854 */
855 xive_native_configure_irq(hw_irq,
856 xive->vp_base + state->act_server,
857 state->act_priority, state->number);
858
859 /*
860 * We do an EOI to enable the interrupt (and retrigger if needed)
861 * if the guest has the interrupt unmasked and the P bit was *not*
862 * set in the IPI. If it was set, we know a slot may still be in
863 * use in the target queue thus we have to wait for a guest
864 * originated EOI
865 */
866 if (prio != MASKED && !state->old_p)
867 xive_vm_source_eoi(hw_irq, state->pt_data);
868
869 /* Clear old_p/old_q as they are no longer relevant */
870 state->old_p = state->old_q = false;
871
872 /* Restore guest prio (unlocks EOI) */
873 mb();
874 state->guest_priority = prio;
875 arch_spin_unlock(&sb->lock);
876
877 return 0;
878}
879EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
880
881int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
882 struct irq_desc *host_desc)
883{
884 struct kvmppc_xive *xive = kvm->arch.xive;
885 struct kvmppc_xive_src_block *sb;
886 struct kvmppc_xive_irq_state *state;
887 unsigned int host_irq = irq_desc_get_irq(host_desc);
888 u16 idx;
889 u8 prio;
890 int rc;
891
892 if (!xive)
893 return -ENODEV;
894
895 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
896
897 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
898 if (!sb)
899 return -EINVAL;
900 state = &sb->irq_state[idx];
901
902 /*
903 * Mask and read state of IRQ. We need to know if its P bit
904 * is set as that means it's potentially already using a
905 * queue entry in the target
906 */
907 prio = xive_lock_and_mask(xive, sb, state);
908 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
909 state->old_p, state->old_q);
910
911 /*
912 * If old_p is set, the interrupt is pending, we switch it to
913 * PQ=11. This will force a resend in the host so the interrupt
914 * isn't lost to whatver host driver may pick it up
915 */
916 if (state->old_p)
917 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
918
919 /* Release the passed-through interrupt to the host */
920 rc = irq_set_vcpu_affinity(host_irq, NULL);
921 if (rc) {
922 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
923 return rc;
924 }
925
926 /* Forget about the IRQ */
927 state->pt_number = 0;
928 state->pt_data = NULL;
929
930 /* Reconfigure the IPI */
931 xive_native_configure_irq(state->ipi_number,
932 xive->vp_base + state->act_server,
933 state->act_priority, state->number);
934
935 /*
936 * If old_p is set (we have a queue entry potentially
937 * occupied) or the interrupt is masked, we set the IPI
938 * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
939 */
940 if (prio == MASKED || state->old_p)
941 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
942 else
943 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
944
945 /* Restore guest prio (unlocks EOI) */
946 mb();
947 state->guest_priority = prio;
948 arch_spin_unlock(&sb->lock);
949
950 return 0;
951}
952EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
953
954static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
955{
956 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
957 struct kvm *kvm = vcpu->kvm;
958 struct kvmppc_xive *xive = kvm->arch.xive;
959 int i, j;
960
961 for (i = 0; i <= xive->max_sbid; i++) {
962 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
963
964 if (!sb)
965 continue;
966 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
967 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
968
969 if (!state->valid)
970 continue;
971 if (state->act_priority == MASKED)
972 continue;
973 if (state->act_server != xc->server_num)
974 continue;
975
976 /* Clean it up */
977 arch_spin_lock(&sb->lock);
978 state->act_priority = MASKED;
979 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
980 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
981 if (state->pt_number) {
982 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
983 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
984 }
985 arch_spin_unlock(&sb->lock);
986 }
987 }
988}
989
990void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
991{
992 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
993 struct kvmppc_xive *xive = xc->xive;
994 int i;
995
996 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
997
998 /* Ensure no interrupt is still routed to that VP */
999 xc->valid = false;
1000 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1001
1002 /* Mask the VP IPI */
1003 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1004
1005 /* Disable the VP */
1006 xive_native_disable_vp(xc->vp_id);
1007
1008 /* Free the queues & associated interrupts */
1009 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1010 struct xive_q *q = &xc->queues[i];
1011
1012 /* Free the escalation irq */
1013 if (xc->esc_virq[i]) {
1014 free_irq(xc->esc_virq[i], vcpu);
1015 irq_dispose_mapping(xc->esc_virq[i]);
1016 kfree(xc->esc_virq_names[i]);
1017 }
1018 /* Free the queue */
1019 xive_native_disable_queue(xc->vp_id, q, i);
1020 if (q->qpage) {
1021 free_pages((unsigned long)q->qpage,
1022 xive->q_page_order);
1023 q->qpage = NULL;
1024 }
1025 }
1026
1027 /* Free the IPI */
1028 if (xc->vp_ipi) {
1029 xive_cleanup_irq_data(&xc->vp_ipi_data);
1030 xive_native_free_irq(xc->vp_ipi);
1031 }
1032 /* Free the VP */
1033 kfree(xc);
1034}
1035
1036int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1037 struct kvm_vcpu *vcpu, u32 cpu)
1038{
1039 struct kvmppc_xive *xive = dev->private;
1040 struct kvmppc_xive_vcpu *xc;
1041 int i, r = -EBUSY;
1042
1043 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1044
1045 if (dev->ops != &kvm_xive_ops) {
1046 pr_devel("Wrong ops !\n");
1047 return -EPERM;
1048 }
1049 if (xive->kvm != vcpu->kvm)
1050 return -EPERM;
1051 if (vcpu->arch.irq_type)
1052 return -EBUSY;
1053 if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1054 pr_devel("Duplicate !\n");
1055 return -EEXIST;
1056 }
1057 if (cpu >= KVM_MAX_VCPUS) {
1058 pr_devel("Out of bounds !\n");
1059 return -EINVAL;
1060 }
1061 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1062 if (!xc)
1063 return -ENOMEM;
1064
1065 /* We need to synchronize with queue provisioning */
1066 mutex_lock(&vcpu->kvm->lock);
1067 vcpu->arch.xive_vcpu = xc;
1068 xc->xive = xive;
1069 xc->vcpu = vcpu;
1070 xc->server_num = cpu;
1071 xc->vp_id = xive->vp_base + cpu;
1072 xc->mfrr = 0xff;
1073 xc->valid = true;
1074
1075 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1076 if (r)
1077 goto bail;
1078
1079 /* Configure VCPU fields for use by assembly push/pull */
1080 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1081 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1082
1083 /* Allocate IPI */
1084 xc->vp_ipi = xive_native_alloc_irq();
1085 if (!xc->vp_ipi) {
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001086 pr_err("Failed to allocate xive irq for VCPU IPI\n");
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001087 r = -EIO;
1088 goto bail;
1089 }
1090 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1091
1092 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1093 if (r)
1094 goto bail;
1095
1096 /*
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001097 * Enable the VP first as the single escalation mode will
1098 * affect escalation interrupts numbering
1099 */
1100 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1101 if (r) {
1102 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1103 goto bail;
1104 }
1105
1106 /*
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001107 * Initialize queues. Initially we set them all for no queueing
1108 * and we enable escalation for queue 0 only which we'll use for
1109 * our mfrr change notifications. If the VCPU is hot-plugged, we
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001110 * do handle provisioning however based on the existing "map"
1111 * of enabled queues.
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001112 */
1113 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1114 struct xive_q *q = &xc->queues[i];
1115
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001116 /* Single escalation, no queue 7 */
1117 if (i == 7 && xive->single_escalation)
1118 break;
1119
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001120 /* Is queue already enabled ? Provision it */
1121 if (xive->qmap & (1 << i)) {
1122 r = xive_provision_queue(vcpu, i);
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001123 if (r == 0 && !xive->single_escalation)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001124 xive_attach_escalation(vcpu, i);
1125 if (r)
1126 goto bail;
1127 } else {
1128 r = xive_native_configure_queue(xc->vp_id,
1129 q, i, NULL, 0, true);
1130 if (r) {
1131 pr_err("Failed to configure queue %d for VCPU %d\n",
1132 i, cpu);
1133 goto bail;
1134 }
1135 }
1136 }
1137
1138 /* If not done above, attach priority 0 escalation */
1139 r = xive_attach_escalation(vcpu, 0);
1140 if (r)
1141 goto bail;
1142
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001143 /* Route the IPI */
1144 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1145 if (!r)
1146 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1147
1148bail:
1149 mutex_unlock(&vcpu->kvm->lock);
1150 if (r) {
1151 kvmppc_xive_cleanup_vcpu(vcpu);
1152 return r;
1153 }
1154
1155 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1156 return 0;
1157}
1158
1159/*
1160 * Scanning of queues before/after migration save
1161 */
1162static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1163{
1164 struct kvmppc_xive_src_block *sb;
1165 struct kvmppc_xive_irq_state *state;
1166 u16 idx;
1167
1168 sb = kvmppc_xive_find_source(xive, irq, &idx);
1169 if (!sb)
1170 return;
1171
1172 state = &sb->irq_state[idx];
1173
1174 /* Some sanity checking */
1175 if (!state->valid) {
1176 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1177 return;
1178 }
1179
1180 /*
1181 * If the interrupt is in a queue it should have P set.
1182 * We warn so that gets reported. A backtrace isn't useful
1183 * so no need to use a WARN_ON.
1184 */
1185 if (!state->saved_p)
1186 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1187
1188 /* Set flag */
1189 state->in_queue = true;
1190}
1191
1192static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1193 struct kvmppc_xive_src_block *sb,
1194 u32 irq)
1195{
1196 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1197
1198 if (!state->valid)
1199 return;
1200
1201 /* Mask and save state, this will also sync HW queues */
1202 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1203
1204 /* Transfer P and Q */
1205 state->saved_p = state->old_p;
1206 state->saved_q = state->old_q;
1207
1208 /* Unlock */
1209 arch_spin_unlock(&sb->lock);
1210}
1211
1212static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1213 struct kvmppc_xive_src_block *sb,
1214 u32 irq)
1215{
1216 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1217
1218 if (!state->valid)
1219 return;
1220
1221 /*
1222 * Lock / exclude EOI (not technically necessary if the
1223 * guest isn't running concurrently. If this becomes a
1224 * performance issue we can probably remove the lock.
1225 */
1226 xive_lock_for_unmask(sb, state);
1227
1228 /* Restore mask/prio if it wasn't masked */
1229 if (state->saved_scan_prio != MASKED)
1230 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1231
1232 /* Unlock */
1233 arch_spin_unlock(&sb->lock);
1234}
1235
1236static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1237{
1238 u32 idx = q->idx;
1239 u32 toggle = q->toggle;
1240 u32 irq;
1241
1242 do {
1243 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1244 if (irq > XICS_IPI)
1245 xive_pre_save_set_queued(xive, irq);
1246 } while(irq);
1247}
1248
1249static void xive_pre_save_scan(struct kvmppc_xive *xive)
1250{
1251 struct kvm_vcpu *vcpu = NULL;
1252 int i, j;
1253
1254 /*
1255 * See comment in xive_get_source() about how this
1256 * work. Collect a stable state for all interrupts
1257 */
1258 for (i = 0; i <= xive->max_sbid; i++) {
1259 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1260 if (!sb)
1261 continue;
1262 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1263 xive_pre_save_mask_irq(xive, sb, j);
1264 }
1265
1266 /* Then scan the queues and update the "in_queue" flag */
1267 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1268 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1269 if (!xc)
1270 continue;
1271 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
Paul Mackerras00c14752017-06-30 16:39:55 +10001272 if (xc->queues[j].qpage)
1273 xive_pre_save_queue(xive, &xc->queues[j]);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001274 }
1275 }
1276
1277 /* Finally restore interrupt states */
1278 for (i = 0; i <= xive->max_sbid; i++) {
1279 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1280 if (!sb)
1281 continue;
1282 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1283 xive_pre_save_unmask_irq(xive, sb, j);
1284 }
1285}
1286
1287static void xive_post_save_scan(struct kvmppc_xive *xive)
1288{
1289 u32 i, j;
1290
1291 /* Clear all the in_queue flags */
1292 for (i = 0; i <= xive->max_sbid; i++) {
1293 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1294 if (!sb)
1295 continue;
1296 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1297 sb->irq_state[j].in_queue = false;
1298 }
1299
1300 /* Next get_source() will do a new scan */
1301 xive->saved_src_count = 0;
1302}
1303
1304/*
1305 * This returns the source configuration and state to user space.
1306 */
1307static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1308{
1309 struct kvmppc_xive_src_block *sb;
1310 struct kvmppc_xive_irq_state *state;
1311 u64 __user *ubufp = (u64 __user *) addr;
1312 u64 val, prio;
1313 u16 idx;
1314
1315 sb = kvmppc_xive_find_source(xive, irq, &idx);
1316 if (!sb)
1317 return -ENOENT;
1318
1319 state = &sb->irq_state[idx];
1320
1321 if (!state->valid)
1322 return -ENOENT;
1323
1324 pr_devel("get_source(%ld)...\n", irq);
1325
1326 /*
1327 * So to properly save the state into something that looks like a
1328 * XICS migration stream we cannot treat interrupts individually.
1329 *
1330 * We need, instead, mask them all (& save their previous PQ state)
1331 * to get a stable state in the HW, then sync them to ensure that
1332 * any interrupt that had already fired hits its queue, and finally
1333 * scan all the queues to collect which interrupts are still present
1334 * in the queues, so we can set the "pending" flag on them and
1335 * they can be resent on restore.
1336 *
1337 * So we do it all when the "first" interrupt gets saved, all the
1338 * state is collected at that point, the rest of xive_get_source()
1339 * will merely collect and convert that state to the expected
1340 * userspace bit mask.
1341 */
1342 if (xive->saved_src_count == 0)
1343 xive_pre_save_scan(xive);
1344 xive->saved_src_count++;
1345
1346 /* Convert saved state into something compatible with xics */
Sam Bobroff2fb1e942017-09-26 16:47:04 +10001347 val = state->act_server;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001348 prio = state->saved_scan_prio;
1349
1350 if (prio == MASKED) {
1351 val |= KVM_XICS_MASKED;
1352 prio = state->saved_priority;
1353 }
1354 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1355 if (state->lsi) {
1356 val |= KVM_XICS_LEVEL_SENSITIVE;
1357 if (state->saved_p)
1358 val |= KVM_XICS_PENDING;
1359 } else {
1360 if (state->saved_p)
1361 val |= KVM_XICS_PRESENTED;
1362
1363 if (state->saved_q)
1364 val |= KVM_XICS_QUEUED;
1365
1366 /*
1367 * We mark it pending (which will attempt a re-delivery)
1368 * if we are in a queue *or* we were masked and had
1369 * Q set which is equivalent to the XICS "masked pending"
1370 * state
1371 */
1372 if (state->in_queue || (prio == MASKED && state->saved_q))
1373 val |= KVM_XICS_PENDING;
1374 }
1375
1376 /*
1377 * If that was the last interrupt saved, reset the
1378 * in_queue flags
1379 */
1380 if (xive->saved_src_count == xive->src_count)
1381 xive_post_save_scan(xive);
1382
1383 /* Copy the result to userspace */
1384 if (put_user(val, ubufp))
1385 return -EFAULT;
1386
1387 return 0;
1388}
1389
1390static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
1391 int irq)
1392{
1393 struct kvm *kvm = xive->kvm;
1394 struct kvmppc_xive_src_block *sb;
1395 int i, bid;
1396
1397 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1398
1399 mutex_lock(&kvm->lock);
1400
1401 /* block already exists - somebody else got here first */
1402 if (xive->src_blocks[bid])
1403 goto out;
1404
1405 /* Create the ICS */
1406 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1407 if (!sb)
1408 goto out;
1409
1410 sb->id = bid;
1411
1412 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1413 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1414 sb->irq_state[i].guest_priority = MASKED;
1415 sb->irq_state[i].saved_priority = MASKED;
1416 sb->irq_state[i].act_priority = MASKED;
1417 }
1418 smp_wmb();
1419 xive->src_blocks[bid] = sb;
1420
1421 if (bid > xive->max_sbid)
1422 xive->max_sbid = bid;
1423
1424out:
1425 mutex_unlock(&kvm->lock);
1426 return xive->src_blocks[bid];
1427}
1428
1429static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1430{
1431 struct kvm *kvm = xive->kvm;
1432 struct kvm_vcpu *vcpu = NULL;
1433 int i;
1434
1435 kvm_for_each_vcpu(i, vcpu, kvm) {
1436 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1437
1438 if (!xc)
1439 continue;
1440
1441 if (xc->delayed_irq == irq) {
1442 xc->delayed_irq = 0;
1443 xive->delayed_irqs--;
1444 return true;
1445 }
1446 }
1447 return false;
1448}
1449
1450static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1451{
1452 struct kvmppc_xive_src_block *sb;
1453 struct kvmppc_xive_irq_state *state;
1454 u64 __user *ubufp = (u64 __user *) addr;
1455 u16 idx;
1456 u64 val;
1457 u8 act_prio, guest_prio;
1458 u32 server;
1459 int rc = 0;
1460
1461 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1462 return -ENOENT;
1463
1464 pr_devel("set_source(irq=0x%lx)\n", irq);
1465
1466 /* Find the source */
1467 sb = kvmppc_xive_find_source(xive, irq, &idx);
1468 if (!sb) {
1469 pr_devel("No source, creating source block...\n");
1470 sb = xive_create_src_block(xive, irq);
1471 if (!sb) {
1472 pr_devel("Failed to create block...\n");
1473 return -ENOMEM;
1474 }
1475 }
1476 state = &sb->irq_state[idx];
1477
1478 /* Read user passed data */
1479 if (get_user(val, ubufp)) {
1480 pr_devel("fault getting user info !\n");
1481 return -EFAULT;
1482 }
1483
1484 server = val & KVM_XICS_DESTINATION_MASK;
1485 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1486
1487 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1488 val, server, guest_prio);
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001489
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001490 /*
1491 * If the source doesn't already have an IPI, allocate
1492 * one and get the corresponding data
1493 */
1494 if (!state->ipi_number) {
1495 state->ipi_number = xive_native_alloc_irq();
1496 if (state->ipi_number == 0) {
1497 pr_devel("Failed to allocate IPI !\n");
1498 return -ENOMEM;
1499 }
1500 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1501 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1502 }
1503
1504 /*
1505 * We use lock_and_mask() to set us in the right masked
1506 * state. We will override that state from the saved state
1507 * further down, but this will handle the cases of interrupts
1508 * that need FW masking. We set the initial guest_priority to
1509 * 0 before calling it to ensure it actually performs the masking.
1510 */
1511 state->guest_priority = 0;
1512 xive_lock_and_mask(xive, sb, state);
1513
1514 /*
1515 * Now, we select a target if we have one. If we don't we
1516 * leave the interrupt untargetted. It means that an interrupt
1517 * can become "untargetted" accross migration if it was masked
1518 * by set_xive() but there is little we can do about it.
1519 */
1520
1521 /* First convert prio and mark interrupt as untargetted */
1522 act_prio = xive_prio_from_guest(guest_prio);
1523 state->act_priority = MASKED;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001524
1525 /*
1526 * We need to drop the lock due to the mutex below. Hopefully
1527 * nothing is touching that interrupt yet since it hasn't been
1528 * advertized to a running guest yet
1529 */
1530 arch_spin_unlock(&sb->lock);
1531
1532 /* If we have a priority target the interrupt */
1533 if (act_prio != MASKED) {
1534 /* First, check provisioning of queues */
1535 mutex_lock(&xive->kvm->lock);
1536 rc = xive_check_provisioning(xive->kvm, act_prio);
1537 mutex_unlock(&xive->kvm->lock);
1538
1539 /* Target interrupt */
1540 if (rc == 0)
1541 rc = xive_target_interrupt(xive->kvm, state,
1542 server, act_prio);
1543 /*
1544 * If provisioning or targetting failed, leave it
1545 * alone and masked. It will remain disabled until
1546 * the guest re-targets it.
1547 */
1548 }
1549
1550 /*
1551 * Find out if this was a delayed irq stashed in an ICP,
1552 * in which case, treat it as pending
1553 */
1554 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1555 val |= KVM_XICS_PENDING;
1556 pr_devel(" Found delayed ! forcing PENDING !\n");
1557 }
1558
1559 /* Cleanup the SW state */
1560 state->old_p = false;
1561 state->old_q = false;
1562 state->lsi = false;
1563 state->asserted = false;
1564
1565 /* Restore LSI state */
1566 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1567 state->lsi = true;
1568 if (val & KVM_XICS_PENDING)
1569 state->asserted = true;
1570 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1571 }
1572
1573 /*
1574 * Restore P and Q. If the interrupt was pending, we
1575 * force both P and Q, which will trigger a resend.
1576 *
1577 * That means that a guest that had both an interrupt
1578 * pending (queued) and Q set will restore with only
1579 * one instance of that interrupt instead of 2, but that
1580 * is perfectly fine as coalescing interrupts that haven't
1581 * been presented yet is always allowed.
1582 */
1583 if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
1584 state->old_p = true;
1585 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1586 state->old_q = true;
1587
1588 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1589
1590 /*
1591 * If the interrupt was unmasked, update guest priority and
1592 * perform the appropriate state transition and do a
1593 * re-trigger if necessary.
1594 */
1595 if (val & KVM_XICS_MASKED) {
1596 pr_devel(" masked, saving prio\n");
1597 state->guest_priority = MASKED;
1598 state->saved_priority = guest_prio;
1599 } else {
1600 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1601 xive_finish_unmask(xive, sb, state, guest_prio);
1602 state->saved_priority = guest_prio;
1603 }
1604
1605 /* Increment the number of valid sources and mark this one valid */
1606 if (!state->valid)
1607 xive->src_count++;
1608 state->valid = true;
1609
1610 return 0;
1611}
1612
1613int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1614 bool line_status)
1615{
1616 struct kvmppc_xive *xive = kvm->arch.xive;
1617 struct kvmppc_xive_src_block *sb;
1618 struct kvmppc_xive_irq_state *state;
1619 u16 idx;
1620
1621 if (!xive)
1622 return -ENODEV;
1623
1624 sb = kvmppc_xive_find_source(xive, irq, &idx);
1625 if (!sb)
1626 return -EINVAL;
1627
1628 /* Perform locklessly .... (we need to do some RCUisms here...) */
1629 state = &sb->irq_state[idx];
1630 if (!state->valid)
1631 return -EINVAL;
1632
1633 /* We don't allow a trigger on a passed-through interrupt */
1634 if (state->pt_number)
1635 return -EINVAL;
1636
1637 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1638 state->asserted = 1;
1639 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1640 state->asserted = 0;
1641 return 0;
1642 }
1643
1644 /* Trigger the IPI */
1645 xive_irq_trigger(&state->ipi_data);
1646
1647 return 0;
1648}
1649
1650static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1651{
1652 struct kvmppc_xive *xive = dev->private;
1653
1654 /* We honor the existing XICS ioctl */
1655 switch (attr->group) {
1656 case KVM_DEV_XICS_GRP_SOURCES:
1657 return xive_set_source(xive, attr->attr, attr->addr);
1658 }
1659 return -ENXIO;
1660}
1661
1662static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1663{
1664 struct kvmppc_xive *xive = dev->private;
1665
1666 /* We honor the existing XICS ioctl */
1667 switch (attr->group) {
1668 case KVM_DEV_XICS_GRP_SOURCES:
1669 return xive_get_source(xive, attr->attr, attr->addr);
1670 }
1671 return -ENXIO;
1672}
1673
1674static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1675{
1676 /* We honor the same limits as XICS, at least for now */
1677 switch (attr->group) {
1678 case KVM_DEV_XICS_GRP_SOURCES:
1679 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1680 attr->attr < KVMPPC_XICS_NR_IRQS)
1681 return 0;
1682 break;
1683 }
1684 return -ENXIO;
1685}
1686
1687static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1688{
1689 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1690 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1691 xive_cleanup_irq_data(xd);
1692}
1693
1694static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1695{
1696 int i;
1697
1698 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1699 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1700
1701 if (!state->valid)
1702 continue;
1703
1704 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1705 xive_native_free_irq(state->ipi_number);
1706
1707 /* Pass-through, cleanup too */
1708 if (state->pt_number)
1709 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1710
1711 state->valid = false;
1712 }
1713}
1714
1715static void kvmppc_xive_free(struct kvm_device *dev)
1716{
1717 struct kvmppc_xive *xive = dev->private;
1718 struct kvm *kvm = xive->kvm;
1719 int i;
1720
1721 debugfs_remove(xive->dentry);
1722
1723 if (kvm)
1724 kvm->arch.xive = NULL;
1725
1726 /* Mask and free interrupts */
1727 for (i = 0; i <= xive->max_sbid; i++) {
1728 if (xive->src_blocks[i])
1729 kvmppc_xive_free_sources(xive->src_blocks[i]);
1730 kfree(xive->src_blocks[i]);
1731 xive->src_blocks[i] = NULL;
1732 }
1733
1734 if (xive->vp_base != XIVE_INVALID_VP)
1735 xive_native_free_vp_block(xive->vp_base);
1736
1737
1738 kfree(xive);
1739 kfree(dev);
1740}
1741
1742static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1743{
1744 struct kvmppc_xive *xive;
1745 struct kvm *kvm = dev->kvm;
1746 int ret = 0;
1747
1748 pr_devel("Creating xive for partition\n");
1749
1750 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1751 if (!xive)
1752 return -ENOMEM;
1753
1754 dev->private = xive;
1755 xive->dev = dev;
1756 xive->kvm = kvm;
1757
1758 /* Already there ? */
1759 if (kvm->arch.xive)
1760 ret = -EEXIST;
1761 else
1762 kvm->arch.xive = xive;
1763
1764 /* We use the default queue size set by the host */
1765 xive->q_order = xive_native_default_eq_shift();
1766 if (xive->q_order < PAGE_SHIFT)
1767 xive->q_page_order = 0;
1768 else
1769 xive->q_page_order = xive->q_order - PAGE_SHIFT;
1770
1771 /* Allocate a bunch of VPs */
1772 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1773 pr_devel("VP_Base=%x\n", xive->vp_base);
1774
1775 if (xive->vp_base == XIVE_INVALID_VP)
1776 ret = -ENOMEM;
1777
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001778 xive->single_escalation = xive_native_has_single_escalation();
1779
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001780 if (ret) {
1781 kfree(xive);
1782 return ret;
1783 }
1784
1785 return 0;
1786}
1787
1788
1789static int xive_debug_show(struct seq_file *m, void *private)
1790{
1791 struct kvmppc_xive *xive = m->private;
1792 struct kvm *kvm = xive->kvm;
1793 struct kvm_vcpu *vcpu;
1794 u64 t_rm_h_xirr = 0;
1795 u64 t_rm_h_ipoll = 0;
1796 u64 t_rm_h_cppr = 0;
1797 u64 t_rm_h_eoi = 0;
1798 u64 t_rm_h_ipi = 0;
1799 u64 t_vm_h_xirr = 0;
1800 u64 t_vm_h_ipoll = 0;
1801 u64 t_vm_h_cppr = 0;
1802 u64 t_vm_h_eoi = 0;
1803 u64 t_vm_h_ipi = 0;
1804 unsigned int i;
1805
1806 if (!kvm)
1807 return 0;
1808
1809 seq_printf(m, "=========\nVCPU state\n=========\n");
1810
1811 kvm_for_each_vcpu(i, vcpu, kvm) {
1812 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
Benjamin Herrenschmidtc424c102018-01-12 13:37:11 +11001813 unsigned int i;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001814
1815 if (!xc)
1816 continue;
1817
1818 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1819 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1820 xc->server_num, xc->cppr, xc->hw_cppr,
1821 xc->mfrr, xc->pending,
1822 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
Benjamin Herrenschmidtc424c102018-01-12 13:37:11 +11001823 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1824 struct xive_q *q = &xc->queues[i];
1825 u32 i0, i1, idx;
1826
1827 if (!q->qpage && !xc->esc_virq[i])
1828 continue;
1829
1830 seq_printf(m, " [q%d]: ", i);
1831
1832 if (q->qpage) {
1833 idx = q->idx;
1834 i0 = be32_to_cpup(q->qpage + idx);
1835 idx = (idx + 1) & q->msk;
1836 i1 = be32_to_cpup(q->qpage + idx);
1837 seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
1838 }
1839 if (xc->esc_virq[i]) {
1840 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
1841 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1842 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
1843 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
1844 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
1845 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
1846 xc->esc_virq[i], pq, xd->eoi_page);
1847 seq_printf(m, "\n");
1848 }
1849 }
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001850
1851 t_rm_h_xirr += xc->stat_rm_h_xirr;
1852 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
1853 t_rm_h_cppr += xc->stat_rm_h_cppr;
1854 t_rm_h_eoi += xc->stat_rm_h_eoi;
1855 t_rm_h_ipi += xc->stat_rm_h_ipi;
1856 t_vm_h_xirr += xc->stat_vm_h_xirr;
1857 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
1858 t_vm_h_cppr += xc->stat_vm_h_cppr;
1859 t_vm_h_eoi += xc->stat_vm_h_eoi;
1860 t_vm_h_ipi += xc->stat_vm_h_ipi;
1861 }
1862
1863 seq_printf(m, "Hcalls totals\n");
1864 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
1865 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
1866 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
1867 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
1868 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
1869
1870 return 0;
1871}
1872
1873static int xive_debug_open(struct inode *inode, struct file *file)
1874{
1875 return single_open(file, xive_debug_show, inode->i_private);
1876}
1877
1878static const struct file_operations xive_debug_fops = {
1879 .open = xive_debug_open,
1880 .read = seq_read,
1881 .llseek = seq_lseek,
1882 .release = single_release,
1883};
1884
1885static void xive_debugfs_init(struct kvmppc_xive *xive)
1886{
1887 char *name;
1888
1889 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
1890 if (!name) {
1891 pr_err("%s: no memory for name\n", __func__);
1892 return;
1893 }
1894
1895 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1896 xive, &xive_debug_fops);
1897
1898 pr_debug("%s: created %s\n", __func__, name);
1899 kfree(name);
1900}
1901
1902static void kvmppc_xive_init(struct kvm_device *dev)
1903{
1904 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
1905
1906 /* Register some debug interfaces */
1907 xive_debugfs_init(xive);
1908}
1909
1910struct kvm_device_ops kvm_xive_ops = {
1911 .name = "kvm-xive",
1912 .create = kvmppc_xive_create,
1913 .init = kvmppc_xive_init,
1914 .destroy = kvmppc_xive_free,
1915 .set_attr = xive_set_attr,
1916 .get_attr = xive_get_attr,
1917 .has_attr = xive_has_attr,
1918};
1919
1920void kvmppc_xive_init_module(void)
1921{
1922 __xive_vm_h_xirr = xive_vm_h_xirr;
1923 __xive_vm_h_ipoll = xive_vm_h_ipoll;
1924 __xive_vm_h_ipi = xive_vm_h_ipi;
1925 __xive_vm_h_cppr = xive_vm_h_cppr;
1926 __xive_vm_h_eoi = xive_vm_h_eoi;
1927}
1928
1929void kvmppc_xive_exit_module(void)
1930{
1931 __xive_vm_h_xirr = NULL;
1932 __xive_vm_h_ipoll = NULL;
1933 __xive_vm_h_ipi = NULL;
1934 __xive_vm_h_cppr = NULL;
1935 __xive_vm_h_eoi = NULL;
1936}