Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License, version 2, as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | /* File to be included by other .c files */ |
| 10 | |
| 11 | #define XGLUE(a,b) a##b |
| 12 | #define GLUE(a,b) XGLUE(a,b) |
| 13 | |
| 14 | static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) |
| 15 | { |
| 16 | u8 cppr; |
| 17 | u16 ack; |
| 18 | |
Benjamin Herrenschmidt | 2c4fb78 | 2017-08-18 12:10:52 +1000 | [diff] [blame] | 19 | /* |
Benjamin Herrenschmidt | bb9b52b | 2017-08-18 12:10:58 +1000 | [diff] [blame] | 20 | * Ensure any previous store to CPPR is ordered vs. |
| 21 | * the subsequent loads from PIPR or ACK. |
| 22 | */ |
| 23 | eieio(); |
| 24 | |
| 25 | /* |
Benjamin Herrenschmidt | 2c4fb78 | 2017-08-18 12:10:52 +1000 | [diff] [blame] | 26 | * DD1 bug workaround: If PIPR is less favored than CPPR |
| 27 | * ignore the interrupt or we might incorrectly lose an IPB |
| 28 | * bit. |
| 29 | */ |
| 30 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { |
| 31 | u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR); |
| 32 | if (pipr >= xc->hw_cppr) |
| 33 | return; |
| 34 | } |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 35 | |
| 36 | /* Perform the acknowledge OS to register cycle. */ |
| 37 | ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG)); |
| 38 | |
| 39 | /* Synchronize subsequent queue accesses */ |
| 40 | mb(); |
| 41 | |
| 42 | /* XXX Check grouping level */ |
| 43 | |
| 44 | /* Anything ? */ |
| 45 | if (!((ack >> 8) & TM_QW1_NSR_EO)) |
| 46 | return; |
| 47 | |
| 48 | /* Grab CPPR of the most favored pending interrupt */ |
| 49 | cppr = ack & 0xff; |
| 50 | if (cppr < 8) |
| 51 | xc->pending |= 1 << cppr; |
| 52 | |
| 53 | #ifdef XIVE_RUNTIME_CHECKS |
| 54 | /* Check consistency */ |
| 55 | if (cppr >= xc->hw_cppr) |
| 56 | pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n", |
| 57 | smp_processor_id(), cppr, xc->hw_cppr); |
| 58 | #endif |
| 59 | |
| 60 | /* |
| 61 | * Update our image of the HW CPPR. We don't yet modify |
| 62 | * xc->cppr, this will be done as we scan for interrupts |
| 63 | * in the queues. |
| 64 | */ |
| 65 | xc->hw_cppr = cppr; |
| 66 | } |
| 67 | |
| 68 | static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset) |
| 69 | { |
| 70 | u64 val; |
| 71 | |
| 72 | if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) |
| 73 | offset |= offset << 4; |
| 74 | |
| 75 | val =__x_readq(__x_eoi_page(xd) + offset); |
| 76 | #ifdef __LITTLE_ENDIAN__ |
| 77 | val >>= 64-8; |
| 78 | #endif |
| 79 | return (u8)val; |
| 80 | } |
| 81 | |
| 82 | |
| 83 | static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) |
| 84 | { |
| 85 | /* If the XIVE supports the new "store EOI facility, use it */ |
| 86 | if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) |
Benjamin Herrenschmidt | 2564270 | 2017-06-14 10:19:25 +1000 | [diff] [blame] | 87 | __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 88 | else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { |
| 89 | opal_int_eoi(hw_irq); |
| 90 | } else { |
| 91 | uint64_t eoi_val; |
| 92 | |
| 93 | /* |
| 94 | * Otherwise for EOI, we use the special MMIO that does |
| 95 | * a clear of both P and Q and returns the old Q, |
| 96 | * except for LSIs where we use the "EOI cycle" special |
| 97 | * load. |
| 98 | * |
| 99 | * This allows us to then do a re-trigger if Q was set |
| 100 | * rather than synthetizing an interrupt in software |
| 101 | * |
| 102 | * For LSIs, using the HW EOI cycle works around a problem |
| 103 | * on P9 DD1 PHBs where the other ESB accesses don't work |
| 104 | * properly. |
| 105 | */ |
| 106 | if (xd->flags & XIVE_IRQ_FLAG_LSI) |
Benjamin Herrenschmidt | 2564270 | 2017-06-14 10:19:25 +1000 | [diff] [blame] | 107 | __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 108 | else { |
| 109 | eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); |
| 110 | |
| 111 | /* Re-trigger if needed */ |
| 112 | if ((eoi_val & 1) && __x_trig_page(xd)) |
| 113 | __x_writeq(0, __x_trig_page(xd)); |
| 114 | } |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | enum { |
| 119 | scan_fetch, |
| 120 | scan_poll, |
| 121 | scan_eoi, |
| 122 | }; |
| 123 | |
| 124 | static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc, |
| 125 | u8 pending, int scan_type) |
| 126 | { |
| 127 | u32 hirq = 0; |
| 128 | u8 prio = 0xff; |
| 129 | |
| 130 | /* Find highest pending priority */ |
| 131 | while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { |
| 132 | struct xive_q *q; |
| 133 | u32 idx, toggle; |
| 134 | __be32 *qpage; |
| 135 | |
| 136 | /* |
| 137 | * If pending is 0 this will return 0xff which is what |
| 138 | * we want |
| 139 | */ |
| 140 | prio = ffs(pending) - 1; |
| 141 | |
| 142 | /* |
| 143 | * If the most favoured prio we found pending is less |
| 144 | * favored (or equal) than a pending IPI, we return |
| 145 | * the IPI instead. |
| 146 | * |
| 147 | * Note: If pending was 0 and mfrr is 0xff, we will |
| 148 | * not spurriously take an IPI because mfrr cannot |
| 149 | * then be smaller than cppr. |
| 150 | */ |
| 151 | if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { |
| 152 | prio = xc->mfrr; |
| 153 | hirq = XICS_IPI; |
| 154 | break; |
| 155 | } |
| 156 | |
| 157 | /* Don't scan past the guest cppr */ |
| 158 | if (prio >= xc->cppr || prio > 7) |
| 159 | break; |
| 160 | |
| 161 | /* Grab queue and pointers */ |
| 162 | q = &xc->queues[prio]; |
| 163 | idx = q->idx; |
| 164 | toggle = q->toggle; |
| 165 | |
| 166 | /* |
| 167 | * Snapshot the queue page. The test further down for EOI |
| 168 | * must use the same "copy" that was used by __xive_read_eq |
| 169 | * since qpage can be set concurrently and we don't want |
| 170 | * to miss an EOI. |
| 171 | */ |
| 172 | qpage = READ_ONCE(q->qpage); |
| 173 | |
| 174 | skip_ipi: |
| 175 | /* |
| 176 | * Try to fetch from the queue. Will return 0 for a |
| 177 | * non-queueing priority (ie, qpage = 0). |
| 178 | */ |
| 179 | hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); |
| 180 | |
| 181 | /* |
| 182 | * If this was a signal for an MFFR change done by |
| 183 | * H_IPI we skip it. Additionally, if we were fetching |
| 184 | * we EOI it now, thus re-enabling reception of a new |
| 185 | * such signal. |
| 186 | * |
| 187 | * We also need to do that if prio is 0 and we had no |
| 188 | * page for the queue. In this case, we have non-queued |
| 189 | * IPI that needs to be EOId. |
| 190 | * |
| 191 | * This is safe because if we have another pending MFRR |
| 192 | * change that wasn't observed above, the Q bit will have |
| 193 | * been set and another occurrence of the IPI will trigger. |
| 194 | */ |
| 195 | if (hirq == XICS_IPI || (prio == 0 && !qpage)) { |
| 196 | if (scan_type == scan_fetch) |
| 197 | GLUE(X_PFX,source_eoi)(xc->vp_ipi, |
| 198 | &xc->vp_ipi_data); |
| 199 | /* Loop back on same queue with updated idx/toggle */ |
| 200 | #ifdef XIVE_RUNTIME_CHECKS |
| 201 | WARN_ON(hirq && hirq != XICS_IPI); |
| 202 | #endif |
| 203 | if (hirq) |
| 204 | goto skip_ipi; |
| 205 | } |
| 206 | |
| 207 | /* If fetching, update queue pointers */ |
| 208 | if (scan_type == scan_fetch) { |
| 209 | q->idx = idx; |
| 210 | q->toggle = toggle; |
| 211 | } |
| 212 | |
| 213 | /* Something found, stop searching */ |
| 214 | if (hirq) |
| 215 | break; |
| 216 | |
| 217 | /* Clear the pending bit on the now empty queue */ |
| 218 | pending &= ~(1 << prio); |
| 219 | |
| 220 | /* |
| 221 | * Check if the queue count needs adjusting due to |
| 222 | * interrupts being moved away. |
| 223 | */ |
| 224 | if (atomic_read(&q->pending_count)) { |
| 225 | int p = atomic_xchg(&q->pending_count, 0); |
| 226 | if (p) { |
| 227 | #ifdef XIVE_RUNTIME_CHECKS |
| 228 | WARN_ON(p > atomic_read(&q->count)); |
| 229 | #endif |
| 230 | atomic_sub(p, &q->count); |
| 231 | } |
| 232 | } |
| 233 | } |
| 234 | |
| 235 | /* If we are just taking a "peek", do nothing else */ |
| 236 | if (scan_type == scan_poll) |
| 237 | return hirq; |
| 238 | |
| 239 | /* Update the pending bits */ |
| 240 | xc->pending = pending; |
| 241 | |
| 242 | /* |
| 243 | * If this is an EOI that's it, no CPPR adjustment done here, |
| 244 | * all we needed was cleanup the stale pending bits and check |
| 245 | * if there's anything left. |
| 246 | */ |
| 247 | if (scan_type == scan_eoi) |
| 248 | return hirq; |
| 249 | |
| 250 | /* |
| 251 | * If we found an interrupt, adjust what the guest CPPR should |
| 252 | * be as if we had just fetched that interrupt from HW. |
Benjamin Herrenschmidt | bb9b52b | 2017-08-18 12:10:58 +1000 | [diff] [blame] | 253 | * |
| 254 | * Note: This can only make xc->cppr smaller as the previous |
| 255 | * loop will only exit with hirq != 0 if prio is lower than |
| 256 | * the current xc->cppr. Thus we don't need to re-check xc->mfrr |
| 257 | * for pending IPIs. |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 258 | */ |
| 259 | if (hirq) |
| 260 | xc->cppr = prio; |
| 261 | /* |
| 262 | * If it was an IPI the HW CPPR might have been lowered too much |
| 263 | * as the HW interrupt we use for IPIs is routed to priority 0. |
| 264 | * |
| 265 | * We re-sync it here. |
| 266 | */ |
| 267 | if (xc->cppr != xc->hw_cppr) { |
| 268 | xc->hw_cppr = xc->cppr; |
| 269 | __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); |
| 270 | } |
| 271 | |
| 272 | return hirq; |
| 273 | } |
| 274 | |
| 275 | X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) |
| 276 | { |
| 277 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
| 278 | u8 old_cppr; |
| 279 | u32 hirq; |
| 280 | |
| 281 | pr_devel("H_XIRR\n"); |
| 282 | |
| 283 | xc->GLUE(X_STAT_PFX,h_xirr)++; |
| 284 | |
| 285 | /* First collect pending bits from HW */ |
| 286 | GLUE(X_PFX,ack_pending)(xc); |
| 287 | |
| 288 | /* |
| 289 | * Cleanup the old-style bits if needed (they may have been |
| 290 | * set by pull or an escalation interrupts). |
| 291 | */ |
| 292 | if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions)) |
| 293 | clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, |
| 294 | &vcpu->arch.pending_exceptions); |
| 295 | |
| 296 | pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", |
| 297 | xc->pending, xc->hw_cppr, xc->cppr); |
| 298 | |
| 299 | /* Grab previous CPPR and reverse map it */ |
| 300 | old_cppr = xive_prio_to_guest(xc->cppr); |
| 301 | |
| 302 | /* Scan for actual interrupts */ |
| 303 | hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch); |
| 304 | |
| 305 | pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n", |
| 306 | hirq, xc->hw_cppr, xc->cppr); |
| 307 | |
| 308 | #ifdef XIVE_RUNTIME_CHECKS |
| 309 | /* That should never hit */ |
| 310 | if (hirq & 0xff000000) |
| 311 | pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq); |
| 312 | #endif |
| 313 | |
| 314 | /* |
| 315 | * XXX We could check if the interrupt is masked here and |
| 316 | * filter it. If we chose to do so, we would need to do: |
| 317 | * |
| 318 | * if (masked) { |
| 319 | * lock(); |
| 320 | * if (masked) { |
| 321 | * old_Q = true; |
| 322 | * hirq = 0; |
| 323 | * } |
| 324 | * unlock(); |
| 325 | * } |
| 326 | */ |
| 327 | |
| 328 | /* Return interrupt and old CPPR in GPR4 */ |
| 329 | vcpu->arch.gpr[4] = hirq | (old_cppr << 24); |
| 330 | |
| 331 | return H_SUCCESS; |
| 332 | } |
| 333 | |
| 334 | X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server) |
| 335 | { |
| 336 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
| 337 | u8 pending = xc->pending; |
| 338 | u32 hirq; |
| 339 | u8 pipr; |
| 340 | |
| 341 | pr_devel("H_IPOLL(server=%ld)\n", server); |
| 342 | |
| 343 | xc->GLUE(X_STAT_PFX,h_ipoll)++; |
| 344 | |
| 345 | /* Grab the target VCPU if not the current one */ |
| 346 | if (xc->server_num != server) { |
| 347 | vcpu = kvmppc_xive_find_server(vcpu->kvm, server); |
| 348 | if (!vcpu) |
| 349 | return H_PARAMETER; |
| 350 | xc = vcpu->arch.xive_vcpu; |
| 351 | |
| 352 | /* Scan all priorities */ |
| 353 | pending = 0xff; |
| 354 | } else { |
| 355 | /* Grab pending interrupt if any */ |
| 356 | pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR); |
| 357 | if (pipr < 8) |
| 358 | pending |= 1 << pipr; |
| 359 | } |
| 360 | |
| 361 | hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); |
| 362 | |
| 363 | /* Return interrupt and old CPPR in GPR4 */ |
| 364 | vcpu->arch.gpr[4] = hirq | (xc->cppr << 24); |
| 365 | |
| 366 | return H_SUCCESS; |
| 367 | } |
| 368 | |
| 369 | static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc) |
| 370 | { |
| 371 | u8 pending, prio; |
| 372 | |
| 373 | pending = xc->pending; |
| 374 | if (xc->mfrr != 0xff) { |
| 375 | if (xc->mfrr < 8) |
| 376 | pending |= 1 << xc->mfrr; |
| 377 | else |
| 378 | pending |= 0x80; |
| 379 | } |
| 380 | if (!pending) |
| 381 | return; |
| 382 | prio = ffs(pending) - 1; |
| 383 | |
| 384 | __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); |
| 385 | } |
| 386 | |
| 387 | X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) |
| 388 | { |
| 389 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
| 390 | u8 old_cppr; |
| 391 | |
| 392 | pr_devel("H_CPPR(cppr=%ld)\n", cppr); |
| 393 | |
| 394 | xc->GLUE(X_STAT_PFX,h_cppr)++; |
| 395 | |
| 396 | /* Map CPPR */ |
| 397 | cppr = xive_prio_from_guest(cppr); |
| 398 | |
| 399 | /* Remember old and update SW state */ |
| 400 | old_cppr = xc->cppr; |
| 401 | xc->cppr = cppr; |
| 402 | |
| 403 | /* |
Benjamin Herrenschmidt | bb9b52b | 2017-08-18 12:10:58 +1000 | [diff] [blame] | 404 | * Order the above update of xc->cppr with the subsequent |
| 405 | * read of xc->mfrr inside push_pending_to_hw() |
| 406 | */ |
| 407 | smp_mb(); |
| 408 | |
| 409 | /* |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 410 | * We are masking less, we need to look for pending things |
| 411 | * to deliver and set VP pending bits accordingly to trigger |
| 412 | * a new interrupt otherwise we might miss MFRR changes for |
| 413 | * which we have optimized out sending an IPI signal. |
| 414 | */ |
| 415 | if (cppr > old_cppr) |
| 416 | GLUE(X_PFX,push_pending_to_hw)(xc); |
| 417 | |
| 418 | /* Apply new CPPR */ |
| 419 | xc->hw_cppr = cppr; |
| 420 | __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR); |
| 421 | |
| 422 | return H_SUCCESS; |
| 423 | } |
| 424 | |
| 425 | X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr) |
| 426 | { |
| 427 | struct kvmppc_xive *xive = vcpu->kvm->arch.xive; |
| 428 | struct kvmppc_xive_src_block *sb; |
| 429 | struct kvmppc_xive_irq_state *state; |
| 430 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
| 431 | struct xive_irq_data *xd; |
| 432 | u8 new_cppr = xirr >> 24; |
| 433 | u32 irq = xirr & 0x00ffffff, hw_num; |
| 434 | u16 src; |
| 435 | int rc = 0; |
| 436 | |
| 437 | pr_devel("H_EOI(xirr=%08lx)\n", xirr); |
| 438 | |
| 439 | xc->GLUE(X_STAT_PFX,h_eoi)++; |
| 440 | |
| 441 | xc->cppr = xive_prio_from_guest(new_cppr); |
| 442 | |
| 443 | /* |
| 444 | * IPIs are synthetized from MFRR and thus don't need |
| 445 | * any special EOI handling. The underlying interrupt |
| 446 | * used to signal MFRR changes is EOId when fetched from |
| 447 | * the queue. |
| 448 | */ |
Benjamin Herrenschmidt | bb9b52b | 2017-08-18 12:10:58 +1000 | [diff] [blame] | 449 | if (irq == XICS_IPI || irq == 0) { |
| 450 | /* |
| 451 | * This barrier orders the setting of xc->cppr vs. |
| 452 | * subsquent test of xc->mfrr done inside |
| 453 | * scan_interrupts and push_pending_to_hw |
| 454 | */ |
| 455 | smp_mb(); |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 456 | goto bail; |
Benjamin Herrenschmidt | bb9b52b | 2017-08-18 12:10:58 +1000 | [diff] [blame] | 457 | } |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 458 | |
| 459 | /* Find interrupt source */ |
| 460 | sb = kvmppc_xive_find_source(xive, irq, &src); |
| 461 | if (!sb) { |
| 462 | pr_devel(" source not found !\n"); |
| 463 | rc = H_PARAMETER; |
Benjamin Herrenschmidt | bb9b52b | 2017-08-18 12:10:58 +1000 | [diff] [blame] | 464 | /* Same as above */ |
| 465 | smp_mb(); |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 466 | goto bail; |
| 467 | } |
| 468 | state = &sb->irq_state[src]; |
| 469 | kvmppc_xive_select_irq(state, &hw_num, &xd); |
| 470 | |
| 471 | state->in_eoi = true; |
Benjamin Herrenschmidt | bb9b52b | 2017-08-18 12:10:58 +1000 | [diff] [blame] | 472 | |
| 473 | /* |
| 474 | * This barrier orders both setting of in_eoi above vs, |
| 475 | * subsequent test of guest_priority, and the setting |
| 476 | * of xc->cppr vs. subsquent test of xc->mfrr done inside |
| 477 | * scan_interrupts and push_pending_to_hw |
| 478 | */ |
| 479 | smp_mb(); |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 480 | |
| 481 | again: |
| 482 | if (state->guest_priority == MASKED) { |
| 483 | arch_spin_lock(&sb->lock); |
| 484 | if (state->guest_priority != MASKED) { |
| 485 | arch_spin_unlock(&sb->lock); |
| 486 | goto again; |
| 487 | } |
| 488 | pr_devel(" EOI on saved P...\n"); |
| 489 | |
| 490 | /* Clear old_p, that will cause unmask to perform an EOI */ |
| 491 | state->old_p = false; |
| 492 | |
| 493 | arch_spin_unlock(&sb->lock); |
| 494 | } else { |
| 495 | pr_devel(" EOI on source...\n"); |
| 496 | |
| 497 | /* Perform EOI on the source */ |
| 498 | GLUE(X_PFX,source_eoi)(hw_num, xd); |
| 499 | |
| 500 | /* If it's an emulated LSI, check level and resend */ |
| 501 | if (state->lsi && state->asserted) |
| 502 | __x_writeq(0, __x_trig_page(xd)); |
| 503 | |
| 504 | } |
| 505 | |
Benjamin Herrenschmidt | bb9b52b | 2017-08-18 12:10:58 +1000 | [diff] [blame] | 506 | /* |
| 507 | * This barrier orders the above guest_priority check |
| 508 | * and spin_lock/unlock with clearing in_eoi below. |
| 509 | * |
| 510 | * It also has to be a full mb() as it must ensure |
| 511 | * the MMIOs done in source_eoi() are completed before |
| 512 | * state->in_eoi is visible. |
| 513 | */ |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 514 | mb(); |
| 515 | state->in_eoi = false; |
| 516 | bail: |
| 517 | |
| 518 | /* Re-evaluate pending IRQs and update HW */ |
| 519 | GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi); |
| 520 | GLUE(X_PFX,push_pending_to_hw)(xc); |
| 521 | pr_devel(" after scan pending=%02x\n", xc->pending); |
| 522 | |
| 523 | /* Apply new CPPR */ |
| 524 | xc->hw_cppr = xc->cppr; |
| 525 | __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); |
| 526 | |
| 527 | return rc; |
| 528 | } |
| 529 | |
| 530 | X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, |
| 531 | unsigned long mfrr) |
| 532 | { |
| 533 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; |
| 534 | |
| 535 | pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr); |
| 536 | |
| 537 | xc->GLUE(X_STAT_PFX,h_ipi)++; |
| 538 | |
| 539 | /* Find target */ |
| 540 | vcpu = kvmppc_xive_find_server(vcpu->kvm, server); |
| 541 | if (!vcpu) |
| 542 | return H_PARAMETER; |
| 543 | xc = vcpu->arch.xive_vcpu; |
| 544 | |
| 545 | /* Locklessly write over MFRR */ |
| 546 | xc->mfrr = mfrr; |
| 547 | |
Benjamin Herrenschmidt | bb9b52b | 2017-08-18 12:10:58 +1000 | [diff] [blame] | 548 | /* |
| 549 | * The load of xc->cppr below and the subsequent MMIO store |
| 550 | * to the IPI must happen after the above mfrr update is |
| 551 | * globally visible so that: |
| 552 | * |
| 553 | * - Synchronize with another CPU doing an H_EOI or a H_CPPR |
| 554 | * updating xc->cppr then reading xc->mfrr. |
| 555 | * |
| 556 | * - The target of the IPI sees the xc->mfrr update |
| 557 | */ |
| 558 | mb(); |
| 559 | |
Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 560 | /* Shoot the IPI if most favored than target cppr */ |
| 561 | if (mfrr < xc->cppr) |
| 562 | __x_writeq(0, __x_trig_page(&xc->vp_ipi_data)); |
| 563 | |
| 564 | return H_SUCCESS; |
| 565 | } |