blob: 6e41ba7ec8f45b8c7861f038820e18b5d9cbb507 [file] [log] [blame]
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001/*
2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9/* File to be included by other .c files */
10
11#define XGLUE(a,b) a##b
12#define GLUE(a,b) XGLUE(a,b)
13
Benjamin Herrenschmidt9dc81d62018-05-10 13:06:42 +100014/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
15#define XICS_DUMMY 1
16
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100017static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
18{
19 u8 cppr;
20 u16 ack;
21
Benjamin Herrenschmidt2c4fb782017-08-18 12:10:52 +100022 /*
Benjamin Herrenschmidtbb9b52b2017-08-18 12:10:58 +100023 * Ensure any previous store to CPPR is ordered vs.
24 * the subsequent loads from PIPR or ACK.
25 */
26 eieio();
27
28 /*
Benjamin Herrenschmidt2c4fb782017-08-18 12:10:52 +100029 * DD1 bug workaround: If PIPR is less favored than CPPR
30 * ignore the interrupt or we might incorrectly lose an IPB
31 * bit.
32 */
33 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
Benjamin Herrenschmidtd222af02017-09-06 15:20:55 +100034 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
35 u8 pipr = be64_to_cpu(qw1) & 0xff;
Benjamin Herrenschmidt2c4fb782017-08-18 12:10:52 +100036 if (pipr >= xc->hw_cppr)
37 return;
38 }
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100039
40 /* Perform the acknowledge OS to register cycle. */
41 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
42
43 /* Synchronize subsequent queue accesses */
44 mb();
45
46 /* XXX Check grouping level */
47
48 /* Anything ? */
49 if (!((ack >> 8) & TM_QW1_NSR_EO))
50 return;
51
52 /* Grab CPPR of the most favored pending interrupt */
53 cppr = ack & 0xff;
54 if (cppr < 8)
55 xc->pending |= 1 << cppr;
56
57#ifdef XIVE_RUNTIME_CHECKS
58 /* Check consistency */
59 if (cppr >= xc->hw_cppr)
60 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
61 smp_processor_id(), cppr, xc->hw_cppr);
62#endif
63
64 /*
65 * Update our image of the HW CPPR. We don't yet modify
66 * xc->cppr, this will be done as we scan for interrupts
67 * in the queues.
68 */
69 xc->hw_cppr = cppr;
70}
71
72static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
73{
74 u64 val;
75
76 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
77 offset |= offset << 4;
78
79 val =__x_readq(__x_eoi_page(xd) + offset);
80#ifdef __LITTLE_ENDIAN__
81 val >>= 64-8;
82#endif
83 return (u8)val;
84}
85
86
87static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
88{
89 /* If the XIVE supports the new "store EOI facility, use it */
90 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
Benjamin Herrenschmidt25642702017-06-14 10:19:25 +100091 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100092 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
93 opal_int_eoi(hw_irq);
94 } else {
95 uint64_t eoi_val;
96
97 /*
98 * Otherwise for EOI, we use the special MMIO that does
99 * a clear of both P and Q and returns the old Q,
100 * except for LSIs where we use the "EOI cycle" special
101 * load.
102 *
103 * This allows us to then do a re-trigger if Q was set
104 * rather than synthetizing an interrupt in software
105 *
106 * For LSIs, using the HW EOI cycle works around a problem
107 * on P9 DD1 PHBs where the other ESB accesses don't work
108 * properly.
109 */
110 if (xd->flags & XIVE_IRQ_FLAG_LSI)
Benjamin Herrenschmidt25642702017-06-14 10:19:25 +1000111 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000112 else {
113 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
114
115 /* Re-trigger if needed */
116 if ((eoi_val & 1) && __x_trig_page(xd))
117 __x_writeq(0, __x_trig_page(xd));
118 }
119 }
120}
121
122enum {
123 scan_fetch,
124 scan_poll,
125 scan_eoi,
126};
127
128static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
129 u8 pending, int scan_type)
130{
131 u32 hirq = 0;
132 u8 prio = 0xff;
133
134 /* Find highest pending priority */
135 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
136 struct xive_q *q;
137 u32 idx, toggle;
138 __be32 *qpage;
139
140 /*
141 * If pending is 0 this will return 0xff which is what
142 * we want
143 */
144 prio = ffs(pending) - 1;
145
146 /*
147 * If the most favoured prio we found pending is less
148 * favored (or equal) than a pending IPI, we return
149 * the IPI instead.
150 *
151 * Note: If pending was 0 and mfrr is 0xff, we will
152 * not spurriously take an IPI because mfrr cannot
153 * then be smaller than cppr.
154 */
155 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
156 prio = xc->mfrr;
157 hirq = XICS_IPI;
158 break;
159 }
160
161 /* Don't scan past the guest cppr */
162 if (prio >= xc->cppr || prio > 7)
163 break;
164
165 /* Grab queue and pointers */
166 q = &xc->queues[prio];
167 idx = q->idx;
168 toggle = q->toggle;
169
170 /*
171 * Snapshot the queue page. The test further down for EOI
172 * must use the same "copy" that was used by __xive_read_eq
173 * since qpage can be set concurrently and we don't want
174 * to miss an EOI.
175 */
176 qpage = READ_ONCE(q->qpage);
177
178skip_ipi:
179 /*
180 * Try to fetch from the queue. Will return 0 for a
181 * non-queueing priority (ie, qpage = 0).
182 */
183 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
184
185 /*
186 * If this was a signal for an MFFR change done by
187 * H_IPI we skip it. Additionally, if we were fetching
188 * we EOI it now, thus re-enabling reception of a new
189 * such signal.
190 *
191 * We also need to do that if prio is 0 and we had no
192 * page for the queue. In this case, we have non-queued
193 * IPI that needs to be EOId.
194 *
195 * This is safe because if we have another pending MFRR
196 * change that wasn't observed above, the Q bit will have
197 * been set and another occurrence of the IPI will trigger.
198 */
199 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
200 if (scan_type == scan_fetch)
201 GLUE(X_PFX,source_eoi)(xc->vp_ipi,
202 &xc->vp_ipi_data);
203 /* Loop back on same queue with updated idx/toggle */
204#ifdef XIVE_RUNTIME_CHECKS
205 WARN_ON(hirq && hirq != XICS_IPI);
206#endif
207 if (hirq)
208 goto skip_ipi;
209 }
210
Benjamin Herrenschmidt9dc81d62018-05-10 13:06:42 +1000211 /* If it's the dummy interrupt, continue searching */
212 if (hirq == XICS_DUMMY)
213 goto skip_ipi;
214
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000215 /* If fetching, update queue pointers */
216 if (scan_type == scan_fetch) {
217 q->idx = idx;
218 q->toggle = toggle;
219 }
220
221 /* Something found, stop searching */
222 if (hirq)
223 break;
224
225 /* Clear the pending bit on the now empty queue */
226 pending &= ~(1 << prio);
227
228 /*
229 * Check if the queue count needs adjusting due to
230 * interrupts being moved away.
231 */
232 if (atomic_read(&q->pending_count)) {
233 int p = atomic_xchg(&q->pending_count, 0);
234 if (p) {
235#ifdef XIVE_RUNTIME_CHECKS
236 WARN_ON(p > atomic_read(&q->count));
237#endif
238 atomic_sub(p, &q->count);
239 }
240 }
241 }
242
243 /* If we are just taking a "peek", do nothing else */
244 if (scan_type == scan_poll)
245 return hirq;
246
247 /* Update the pending bits */
248 xc->pending = pending;
249
250 /*
251 * If this is an EOI that's it, no CPPR adjustment done here,
252 * all we needed was cleanup the stale pending bits and check
253 * if there's anything left.
254 */
255 if (scan_type == scan_eoi)
256 return hirq;
257
258 /*
259 * If we found an interrupt, adjust what the guest CPPR should
260 * be as if we had just fetched that interrupt from HW.
Benjamin Herrenschmidtbb9b52b2017-08-18 12:10:58 +1000261 *
262 * Note: This can only make xc->cppr smaller as the previous
263 * loop will only exit with hirq != 0 if prio is lower than
264 * the current xc->cppr. Thus we don't need to re-check xc->mfrr
265 * for pending IPIs.
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000266 */
267 if (hirq)
268 xc->cppr = prio;
269 /*
270 * If it was an IPI the HW CPPR might have been lowered too much
271 * as the HW interrupt we use for IPIs is routed to priority 0.
272 *
273 * We re-sync it here.
274 */
275 if (xc->cppr != xc->hw_cppr) {
276 xc->hw_cppr = xc->cppr;
277 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
278 }
279
280 return hirq;
281}
282
283X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
284{
285 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
286 u8 old_cppr;
287 u32 hirq;
288
289 pr_devel("H_XIRR\n");
290
291 xc->GLUE(X_STAT_PFX,h_xirr)++;
292
293 /* First collect pending bits from HW */
294 GLUE(X_PFX,ack_pending)(xc);
295
296 /*
297 * Cleanup the old-style bits if needed (they may have been
298 * set by pull or an escalation interrupts).
299 */
300 if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
301 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
302 &vcpu->arch.pending_exceptions);
303
304 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
305 xc->pending, xc->hw_cppr, xc->cppr);
306
307 /* Grab previous CPPR and reverse map it */
308 old_cppr = xive_prio_to_guest(xc->cppr);
309
310 /* Scan for actual interrupts */
311 hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
312
313 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
314 hirq, xc->hw_cppr, xc->cppr);
315
316#ifdef XIVE_RUNTIME_CHECKS
317 /* That should never hit */
318 if (hirq & 0xff000000)
319 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
320#endif
321
322 /*
323 * XXX We could check if the interrupt is masked here and
324 * filter it. If we chose to do so, we would need to do:
325 *
326 * if (masked) {
327 * lock();
328 * if (masked) {
329 * old_Q = true;
330 * hirq = 0;
331 * }
332 * unlock();
333 * }
334 */
335
336 /* Return interrupt and old CPPR in GPR4 */
Simon Guo1143a702018-05-07 14:20:07 +0800337 vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000338
339 return H_SUCCESS;
340}
341
342X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
343{
344 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
345 u8 pending = xc->pending;
346 u32 hirq;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000347
348 pr_devel("H_IPOLL(server=%ld)\n", server);
349
350 xc->GLUE(X_STAT_PFX,h_ipoll)++;
351
352 /* Grab the target VCPU if not the current one */
353 if (xc->server_num != server) {
354 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
355 if (!vcpu)
356 return H_PARAMETER;
357 xc = vcpu->arch.xive_vcpu;
358
359 /* Scan all priorities */
360 pending = 0xff;
361 } else {
362 /* Grab pending interrupt if any */
Benjamin Herrenschmidtd222af02017-09-06 15:20:55 +1000363 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
364 u8 pipr = be64_to_cpu(qw1) & 0xff;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000365 if (pipr < 8)
366 pending |= 1 << pipr;
367 }
368
369 hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
370
371 /* Return interrupt and old CPPR in GPR4 */
Simon Guo1143a702018-05-07 14:20:07 +0800372 vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000373
374 return H_SUCCESS;
375}
376
377static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
378{
379 u8 pending, prio;
380
381 pending = xc->pending;
382 if (xc->mfrr != 0xff) {
383 if (xc->mfrr < 8)
384 pending |= 1 << xc->mfrr;
385 else
386 pending |= 0x80;
387 }
388 if (!pending)
389 return;
390 prio = ffs(pending) - 1;
391
392 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
393}
394
Benjamin Herrenschmidt9dc81d62018-05-10 13:06:42 +1000395static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
396 struct kvmppc_xive_vcpu *xc)
397{
398 unsigned int prio;
399
400 /* For each priority that is now masked */
401 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
402 struct xive_q *q = &xc->queues[prio];
403 struct kvmppc_xive_irq_state *state;
404 struct kvmppc_xive_src_block *sb;
405 u32 idx, toggle, entry, irq, hw_num;
406 struct xive_irq_data *xd;
407 __be32 *qpage;
408 u16 src;
409
410 idx = q->idx;
411 toggle = q->toggle;
412 qpage = READ_ONCE(q->qpage);
413 if (!qpage)
414 continue;
415
416 /* For each interrupt in the queue */
417 for (;;) {
418 entry = be32_to_cpup(qpage + idx);
419
420 /* No more ? */
421 if ((entry >> 31) == toggle)
422 break;
423 irq = entry & 0x7fffffff;
424
425 /* Skip dummies and IPIs */
426 if (irq == XICS_DUMMY || irq == XICS_IPI)
427 goto next;
428 sb = kvmppc_xive_find_source(xive, irq, &src);
429 if (!sb)
430 goto next;
431 state = &sb->irq_state[src];
432
433 /* Has it been rerouted ? */
434 if (xc->server_num == state->act_server)
435 goto next;
436
437 /*
438 * Allright, it *has* been re-routed, kill it from
439 * the queue.
440 */
441 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
442
443 /* Find the HW interrupt */
444 kvmppc_xive_select_irq(state, &hw_num, &xd);
445
446 /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
447 if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
448 GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
449
450 /* EOI the source */
451 GLUE(X_PFX,source_eoi)(hw_num, xd);
452
453 next:
454 idx = (idx + 1) & q->msk;
455 if (idx == 0)
456 toggle ^= 1;
457 }
458 }
459}
460
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000461X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
462{
463 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
Benjamin Herrenschmidt9dc81d62018-05-10 13:06:42 +1000464 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000465 u8 old_cppr;
466
467 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
468
469 xc->GLUE(X_STAT_PFX,h_cppr)++;
470
471 /* Map CPPR */
472 cppr = xive_prio_from_guest(cppr);
473
474 /* Remember old and update SW state */
475 old_cppr = xc->cppr;
476 xc->cppr = cppr;
477
478 /*
Benjamin Herrenschmidtbb9b52b2017-08-18 12:10:58 +1000479 * Order the above update of xc->cppr with the subsequent
480 * read of xc->mfrr inside push_pending_to_hw()
481 */
482 smp_mb();
483
Benjamin Herrenschmidt9dc81d62018-05-10 13:06:42 +1000484 if (cppr > old_cppr) {
485 /*
486 * We are masking less, we need to look for pending things
487 * to deliver and set VP pending bits accordingly to trigger
488 * a new interrupt otherwise we might miss MFRR changes for
489 * which we have optimized out sending an IPI signal.
490 */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000491 GLUE(X_PFX,push_pending_to_hw)(xc);
Benjamin Herrenschmidt9dc81d62018-05-10 13:06:42 +1000492 } else {
493 /*
494 * We are masking more, we need to check the queue for any
495 * interrupt that has been routed to another CPU, take
496 * it out (replace it with the dummy) and retrigger it.
497 *
498 * This is necessary since those interrupts may otherwise
499 * never be processed, at least not until this CPU restores
500 * its CPPR.
501 *
502 * This is in theory racy vs. HW adding new interrupts to
503 * the queue. In practice this works because the interesting
504 * cases are when the guest has done a set_xive() to move the
505 * interrupt away, which flushes the xive, followed by the
506 * target CPU doing a H_CPPR. So any new interrupt coming into
507 * the queue must still be routed to us and isn't a source
508 * of concern.
509 */
510 GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
511 }
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000512
513 /* Apply new CPPR */
514 xc->hw_cppr = cppr;
515 __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
516
517 return H_SUCCESS;
518}
519
520X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
521{
522 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
523 struct kvmppc_xive_src_block *sb;
524 struct kvmppc_xive_irq_state *state;
525 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
526 struct xive_irq_data *xd;
527 u8 new_cppr = xirr >> 24;
528 u32 irq = xirr & 0x00ffffff, hw_num;
529 u16 src;
530 int rc = 0;
531
532 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
533
534 xc->GLUE(X_STAT_PFX,h_eoi)++;
535
536 xc->cppr = xive_prio_from_guest(new_cppr);
537
538 /*
539 * IPIs are synthetized from MFRR and thus don't need
540 * any special EOI handling. The underlying interrupt
541 * used to signal MFRR changes is EOId when fetched from
542 * the queue.
543 */
Benjamin Herrenschmidtbb9b52b2017-08-18 12:10:58 +1000544 if (irq == XICS_IPI || irq == 0) {
545 /*
546 * This barrier orders the setting of xc->cppr vs.
547 * subsquent test of xc->mfrr done inside
548 * scan_interrupts and push_pending_to_hw
549 */
550 smp_mb();
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000551 goto bail;
Benjamin Herrenschmidtbb9b52b2017-08-18 12:10:58 +1000552 }
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000553
554 /* Find interrupt source */
555 sb = kvmppc_xive_find_source(xive, irq, &src);
556 if (!sb) {
557 pr_devel(" source not found !\n");
558 rc = H_PARAMETER;
Benjamin Herrenschmidtbb9b52b2017-08-18 12:10:58 +1000559 /* Same as above */
560 smp_mb();
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000561 goto bail;
562 }
563 state = &sb->irq_state[src];
564 kvmppc_xive_select_irq(state, &hw_num, &xd);
565
566 state->in_eoi = true;
Benjamin Herrenschmidtbb9b52b2017-08-18 12:10:58 +1000567
568 /*
569 * This barrier orders both setting of in_eoi above vs,
570 * subsequent test of guest_priority, and the setting
571 * of xc->cppr vs. subsquent test of xc->mfrr done inside
572 * scan_interrupts and push_pending_to_hw
573 */
574 smp_mb();
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000575
576again:
577 if (state->guest_priority == MASKED) {
578 arch_spin_lock(&sb->lock);
579 if (state->guest_priority != MASKED) {
580 arch_spin_unlock(&sb->lock);
581 goto again;
582 }
583 pr_devel(" EOI on saved P...\n");
584
585 /* Clear old_p, that will cause unmask to perform an EOI */
586 state->old_p = false;
587
588 arch_spin_unlock(&sb->lock);
589 } else {
590 pr_devel(" EOI on source...\n");
591
592 /* Perform EOI on the source */
593 GLUE(X_PFX,source_eoi)(hw_num, xd);
594
595 /* If it's an emulated LSI, check level and resend */
596 if (state->lsi && state->asserted)
597 __x_writeq(0, __x_trig_page(xd));
598
599 }
600
Benjamin Herrenschmidtbb9b52b2017-08-18 12:10:58 +1000601 /*
602 * This barrier orders the above guest_priority check
603 * and spin_lock/unlock with clearing in_eoi below.
604 *
605 * It also has to be a full mb() as it must ensure
606 * the MMIOs done in source_eoi() are completed before
607 * state->in_eoi is visible.
608 */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000609 mb();
610 state->in_eoi = false;
611bail:
612
613 /* Re-evaluate pending IRQs and update HW */
614 GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
615 GLUE(X_PFX,push_pending_to_hw)(xc);
616 pr_devel(" after scan pending=%02x\n", xc->pending);
617
618 /* Apply new CPPR */
619 xc->hw_cppr = xc->cppr;
620 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
621
622 return rc;
623}
624
625X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
626 unsigned long mfrr)
627{
628 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
629
630 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
631
632 xc->GLUE(X_STAT_PFX,h_ipi)++;
633
634 /* Find target */
635 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
636 if (!vcpu)
637 return H_PARAMETER;
638 xc = vcpu->arch.xive_vcpu;
639
640 /* Locklessly write over MFRR */
641 xc->mfrr = mfrr;
642
Benjamin Herrenschmidtbb9b52b2017-08-18 12:10:58 +1000643 /*
644 * The load of xc->cppr below and the subsequent MMIO store
645 * to the IPI must happen after the above mfrr update is
646 * globally visible so that:
647 *
648 * - Synchronize with another CPU doing an H_EOI or a H_CPPR
649 * updating xc->cppr then reading xc->mfrr.
650 *
651 * - The target of the IPI sees the xc->mfrr update
652 */
653 mb();
654
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000655 /* Shoot the IPI if most favored than target cppr */
656 if (mfrr < xc->cppr)
657 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
658
659 return H_SUCCESS;
660}