Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2012 Michael Ellerman, IBM Corporation. |
| 3 | * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License, version 2, as |
| 7 | * published by the Free Software Foundation. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/kvm_host.h> |
| 12 | #include <linux/err.h> |
Suresh Warrier | 366274f | 2016-08-19 15:35:55 +1000 | [diff] [blame] | 13 | #include <linux/kernel_stat.h> |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 14 | |
| 15 | #include <asm/kvm_book3s.h> |
| 16 | #include <asm/kvm_ppc.h> |
| 17 | #include <asm/hvcall.h> |
| 18 | #include <asm/xics.h> |
| 19 | #include <asm/debug.h> |
| 20 | #include <asm/synch.h> |
Suresh Warrier | 0c2a660 | 2015-12-17 14:59:09 -0600 | [diff] [blame] | 21 | #include <asm/cputhreads.h> |
Suresh Warrier | 366274f | 2016-08-19 15:35:55 +1000 | [diff] [blame] | 22 | #include <asm/pgtable.h> |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 23 | #include <asm/ppc-opcode.h> |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 24 | #include <asm/pnv-pci.h> |
Paul Mackerras | 5d37519 | 2016-08-19 15:35:56 +1000 | [diff] [blame] | 25 | #include <asm/opal.h> |
Michael Ellerman | 62623d5 | 2016-10-20 13:32:55 +1100 | [diff] [blame] | 26 | #include <asm/smp.h> |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 27 | |
| 28 | #include "book3s_xics.h" |
| 29 | |
| 30 | #define DEBUG_PASSUP |
| 31 | |
Suresh E. Warrier | 520fe9c | 2015-12-21 16:33:57 -0600 | [diff] [blame] | 32 | int h_ipi_redirect = 1; |
| 33 | EXPORT_SYMBOL(h_ipi_redirect); |
Suresh Warrier | 644abbb | 2016-08-19 15:35:54 +1000 | [diff] [blame] | 34 | int kvm_irq_bypass = 1; |
| 35 | EXPORT_SYMBOL(kvm_irq_bypass); |
Suresh E. Warrier | 520fe9c | 2015-12-21 16:33:57 -0600 | [diff] [blame] | 36 | |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 37 | static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, |
| 38 | u32 new_irq); |
Paul Mackerras | 5d37519 | 2016-08-19 15:35:56 +1000 | [diff] [blame] | 39 | static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu); |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 40 | |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 41 | /* -- ICS routines -- */ |
| 42 | static void ics_rm_check_resend(struct kvmppc_xics *xics, |
| 43 | struct kvmppc_ics *ics, struct kvmppc_icp *icp) |
| 44 | { |
| 45 | int i; |
| 46 | |
| 47 | arch_spin_lock(&ics->lock); |
| 48 | |
| 49 | for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { |
| 50 | struct ics_irq_state *state = &ics->irq_state[i]; |
| 51 | |
| 52 | if (!state->resend) |
| 53 | continue; |
| 54 | |
| 55 | arch_spin_unlock(&ics->lock); |
| 56 | icp_rm_deliver_irq(xics, icp, state->number); |
| 57 | arch_spin_lock(&ics->lock); |
| 58 | } |
| 59 | |
| 60 | arch_spin_unlock(&ics->lock); |
| 61 | } |
| 62 | |
| 63 | /* -- ICP routines -- */ |
| 64 | |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 65 | #ifdef CONFIG_SMP |
| 66 | static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) |
| 67 | { |
| 68 | int hcpu; |
| 69 | |
| 70 | hcpu = hcore << threads_shift; |
| 71 | kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu; |
| 72 | smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION); |
| 73 | icp_native_cause_ipi_rm(hcpu); |
| 74 | } |
| 75 | #else |
| 76 | static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { } |
| 77 | #endif |
| 78 | |
| 79 | /* |
| 80 | * We start the search from our current CPU Id in the core map |
| 81 | * and go in a circle until we get back to our ID looking for a |
| 82 | * core that is running in host context and that hasn't already |
| 83 | * been targeted for another rm_host_ops. |
| 84 | * |
| 85 | * In the future, could consider using a fairer algorithm (one |
| 86 | * that distributes the IPIs better) |
| 87 | * |
| 88 | * Returns -1, if no CPU could be found in the host |
| 89 | * Else, returns a CPU Id which has been reserved for use |
| 90 | */ |
| 91 | static inline int grab_next_hostcore(int start, |
| 92 | struct kvmppc_host_rm_core *rm_core, int max, int action) |
| 93 | { |
| 94 | bool success; |
| 95 | int core; |
| 96 | union kvmppc_rm_state old, new; |
| 97 | |
| 98 | for (core = start + 1; core < max; core++) { |
| 99 | old = new = READ_ONCE(rm_core[core].rm_state); |
| 100 | |
| 101 | if (!old.in_host || old.rm_action) |
| 102 | continue; |
| 103 | |
| 104 | /* Try to grab this host core if not taken already. */ |
| 105 | new.rm_action = action; |
| 106 | |
| 107 | success = cmpxchg64(&rm_core[core].rm_state.raw, |
| 108 | old.raw, new.raw) == old.raw; |
| 109 | if (success) { |
| 110 | /* |
| 111 | * Make sure that the store to the rm_action is made |
| 112 | * visible before we return to caller (and the |
| 113 | * subsequent store to rm_data) to synchronize with |
| 114 | * the IPI handler. |
| 115 | */ |
| 116 | smp_wmb(); |
| 117 | return core; |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | return -1; |
| 122 | } |
| 123 | |
| 124 | static inline int find_available_hostcore(int action) |
| 125 | { |
| 126 | int core; |
| 127 | int my_core = smp_processor_id() >> threads_shift; |
| 128 | struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core; |
| 129 | |
| 130 | core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action); |
| 131 | if (core == -1) |
| 132 | core = grab_next_hostcore(core, rm_core, my_core, action); |
| 133 | |
| 134 | return core; |
| 135 | } |
| 136 | |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 137 | static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, |
| 138 | struct kvm_vcpu *this_vcpu) |
| 139 | { |
| 140 | struct kvmppc_icp *this_icp = this_vcpu->arch.icp; |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 141 | int cpu; |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 142 | int hcore; |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 143 | |
| 144 | /* Mark the target VCPU as having an interrupt pending */ |
| 145 | vcpu->stat.queue_intr++; |
| 146 | set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); |
| 147 | |
| 148 | /* Kick self ? Just set MER and return */ |
| 149 | if (vcpu == this_vcpu) { |
| 150 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER); |
| 151 | return; |
| 152 | } |
| 153 | |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 154 | /* |
| 155 | * Check if the core is loaded, |
| 156 | * if not, find an available host core to post to wake the VCPU, |
| 157 | * if we can't find one, set up state to eventually return too hard. |
| 158 | */ |
Paul Mackerras | ec25716 | 2015-06-24 21:18:03 +1000 | [diff] [blame] | 159 | cpu = vcpu->arch.thread_cpu; |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 160 | if (cpu < 0 || cpu >= nr_cpu_ids) { |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 161 | hcore = -1; |
Suresh E. Warrier | 520fe9c | 2015-12-21 16:33:57 -0600 | [diff] [blame] | 162 | if (kvmppc_host_rm_ops_hv && h_ipi_redirect) |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 163 | hcore = find_available_hostcore(XICS_RM_KICK_VCPU); |
| 164 | if (hcore != -1) { |
| 165 | icp_send_hcore_msg(hcore, vcpu); |
| 166 | } else { |
| 167 | this_icp->rm_action |= XICS_RM_KICK_VCPU; |
| 168 | this_icp->rm_kick_target = vcpu; |
| 169 | } |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 170 | return; |
| 171 | } |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 172 | |
Paul Mackerras | eddb60f | 2015-03-28 14:21:11 +1100 | [diff] [blame] | 173 | smp_mb(); |
| 174 | kvmhv_rm_send_ipi(cpu); |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) |
| 178 | { |
| 179 | /* Note: Only called on self ! */ |
| 180 | clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, |
| 181 | &vcpu->arch.pending_exceptions); |
| 182 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER); |
| 183 | } |
| 184 | |
| 185 | static inline bool icp_rm_try_update(struct kvmppc_icp *icp, |
| 186 | union kvmppc_icp_state old, |
| 187 | union kvmppc_icp_state new) |
| 188 | { |
| 189 | struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu; |
| 190 | bool success; |
| 191 | |
| 192 | /* Calculate new output value */ |
| 193 | new.out_ee = (new.xisr && (new.pending_pri < new.cppr)); |
| 194 | |
| 195 | /* Attempt atomic update */ |
| 196 | success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; |
| 197 | if (!success) |
| 198 | goto bail; |
| 199 | |
| 200 | /* |
| 201 | * Check for output state update |
| 202 | * |
| 203 | * Note that this is racy since another processor could be updating |
| 204 | * the state already. This is why we never clear the interrupt output |
| 205 | * here, we only ever set it. The clear only happens prior to doing |
| 206 | * an update and only by the processor itself. Currently we do it |
| 207 | * in Accept (H_XIRR) and Up_Cppr (H_XPPR). |
| 208 | * |
| 209 | * We also do not try to figure out whether the EE state has changed, |
| 210 | * we unconditionally set it if the new state calls for it. The reason |
| 211 | * for that is that we opportunistically remove the pending interrupt |
| 212 | * flag when raising CPPR, so we need to set it back here if an |
| 213 | * interrupt is still pending. |
| 214 | */ |
| 215 | if (new.out_ee) |
| 216 | icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); |
| 217 | |
| 218 | /* Expose the state change for debug purposes */ |
| 219 | this_vcpu->arch.icp->rm_dbgstate = new; |
| 220 | this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; |
| 221 | |
| 222 | bail: |
| 223 | return success; |
| 224 | } |
| 225 | |
| 226 | static inline int check_too_hard(struct kvmppc_xics *xics, |
| 227 | struct kvmppc_icp *icp) |
| 228 | { |
| 229 | return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; |
| 230 | } |
| 231 | |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 232 | static void icp_rm_check_resend(struct kvmppc_xics *xics, |
| 233 | struct kvmppc_icp *icp) |
| 234 | { |
| 235 | u32 icsid; |
| 236 | |
| 237 | /* Order this load with the test for need_resend in the caller */ |
| 238 | smp_rmb(); |
| 239 | for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) { |
| 240 | struct kvmppc_ics *ics = xics->ics[icsid]; |
| 241 | |
| 242 | if (!test_and_clear_bit(icsid, icp->resend_map)) |
| 243 | continue; |
| 244 | if (!ics) |
| 245 | continue; |
| 246 | ics_rm_check_resend(xics, ics, icp); |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority, |
| 251 | u32 *reject) |
| 252 | { |
| 253 | union kvmppc_icp_state old_state, new_state; |
| 254 | bool success; |
| 255 | |
| 256 | do { |
| 257 | old_state = new_state = READ_ONCE(icp->state); |
| 258 | |
| 259 | *reject = 0; |
| 260 | |
| 261 | /* See if we can deliver */ |
| 262 | success = new_state.cppr > priority && |
| 263 | new_state.mfrr > priority && |
| 264 | new_state.pending_pri > priority; |
| 265 | |
| 266 | /* |
| 267 | * If we can, check for a rejection and perform the |
| 268 | * delivery |
| 269 | */ |
| 270 | if (success) { |
| 271 | *reject = new_state.xisr; |
| 272 | new_state.xisr = irq; |
| 273 | new_state.pending_pri = priority; |
| 274 | } else { |
| 275 | /* |
| 276 | * If we failed to deliver we set need_resend |
| 277 | * so a subsequent CPPR state change causes us |
| 278 | * to try a new delivery. |
| 279 | */ |
| 280 | new_state.need_resend = true; |
| 281 | } |
| 282 | |
| 283 | } while (!icp_rm_try_update(icp, old_state, new_state)); |
| 284 | |
| 285 | return success; |
| 286 | } |
| 287 | |
| 288 | static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, |
| 289 | u32 new_irq) |
| 290 | { |
| 291 | struct ics_irq_state *state; |
| 292 | struct kvmppc_ics *ics; |
| 293 | u32 reject; |
| 294 | u16 src; |
| 295 | |
| 296 | /* |
| 297 | * This is used both for initial delivery of an interrupt and |
| 298 | * for subsequent rejection. |
| 299 | * |
| 300 | * Rejection can be racy vs. resends. We have evaluated the |
| 301 | * rejection in an atomic ICP transaction which is now complete, |
| 302 | * so potentially the ICP can already accept the interrupt again. |
| 303 | * |
| 304 | * So we need to retry the delivery. Essentially the reject path |
| 305 | * boils down to a failed delivery. Always. |
| 306 | * |
| 307 | * Now the interrupt could also have moved to a different target, |
| 308 | * thus we may need to re-do the ICP lookup as well |
| 309 | */ |
| 310 | |
| 311 | again: |
| 312 | /* Get the ICS state and lock it */ |
| 313 | ics = kvmppc_xics_find_ics(xics, new_irq, &src); |
| 314 | if (!ics) { |
| 315 | /* Unsafe increment, but this does not need to be accurate */ |
Suresh Warrier | 6e0365b | 2015-03-20 20:39:48 +1100 | [diff] [blame] | 316 | xics->err_noics++; |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 317 | return; |
| 318 | } |
| 319 | state = &ics->irq_state[src]; |
| 320 | |
| 321 | /* Get a lock on the ICS */ |
| 322 | arch_spin_lock(&ics->lock); |
| 323 | |
| 324 | /* Get our server */ |
| 325 | if (!icp || state->server != icp->server_num) { |
| 326 | icp = kvmppc_xics_find_server(xics->kvm, state->server); |
| 327 | if (!icp) { |
| 328 | /* Unsafe increment again*/ |
Suresh Warrier | 6e0365b | 2015-03-20 20:39:48 +1100 | [diff] [blame] | 329 | xics->err_noicp++; |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 330 | goto out; |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | /* Clear the resend bit of that interrupt */ |
| 335 | state->resend = 0; |
| 336 | |
| 337 | /* |
| 338 | * If masked, bail out |
| 339 | * |
| 340 | * Note: PAPR doesn't mention anything about masked pending |
| 341 | * when doing a resend, only when doing a delivery. |
| 342 | * |
| 343 | * However that would have the effect of losing a masked |
| 344 | * interrupt that was rejected and isn't consistent with |
| 345 | * the whole masked_pending business which is about not |
| 346 | * losing interrupts that occur while masked. |
| 347 | * |
| 348 | * I don't differentiate normal deliveries and resends, this |
| 349 | * implementation will differ from PAPR and not lose such |
| 350 | * interrupts. |
| 351 | */ |
| 352 | if (state->priority == MASKED) { |
| 353 | state->masked_pending = 1; |
| 354 | goto out; |
| 355 | } |
| 356 | |
| 357 | /* |
| 358 | * Try the delivery, this will set the need_resend flag |
| 359 | * in the ICP as part of the atomic transaction if the |
| 360 | * delivery is not possible. |
| 361 | * |
| 362 | * Note that if successful, the new delivery might have itself |
| 363 | * rejected an interrupt that was "delivered" before we took the |
| 364 | * ics spin lock. |
| 365 | * |
| 366 | * In this case we do the whole sequence all over again for the |
| 367 | * new guy. We cannot assume that the rejected interrupt is less |
| 368 | * favored than the new one, and thus doesn't need to be delivered, |
| 369 | * because by the time we exit icp_rm_try_to_deliver() the target |
| 370 | * processor may well have already consumed & completed it, and thus |
| 371 | * the rejected interrupt might actually be already acceptable. |
| 372 | */ |
| 373 | if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) { |
| 374 | /* |
| 375 | * Delivery was successful, did we reject somebody else ? |
| 376 | */ |
| 377 | if (reject && reject != XICS_IPI) { |
| 378 | arch_spin_unlock(&ics->lock); |
Li Zhong | 6f51c8a | 2016-11-11 12:57:33 +0800 | [diff] [blame] | 379 | icp->n_reject++; |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 380 | new_irq = reject; |
| 381 | goto again; |
| 382 | } |
| 383 | } else { |
| 384 | /* |
| 385 | * We failed to deliver the interrupt we need to set the |
| 386 | * resend map bit and mark the ICS state as needing a resend |
| 387 | */ |
| 388 | set_bit(ics->icsid, icp->resend_map); |
| 389 | state->resend = 1; |
| 390 | |
| 391 | /* |
| 392 | * If the need_resend flag got cleared in the ICP some time |
| 393 | * between icp_rm_try_to_deliver() atomic update and now, then |
| 394 | * we know it might have missed the resend_map bit. So we |
| 395 | * retry |
| 396 | */ |
| 397 | smp_mb(); |
| 398 | if (!icp->state.need_resend) { |
| 399 | arch_spin_unlock(&ics->lock); |
| 400 | goto again; |
| 401 | } |
| 402 | } |
| 403 | out: |
| 404 | arch_spin_unlock(&ics->lock); |
| 405 | } |
| 406 | |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 407 | static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, |
| 408 | u8 new_cppr) |
| 409 | { |
| 410 | union kvmppc_icp_state old_state, new_state; |
| 411 | bool resend; |
| 412 | |
| 413 | /* |
| 414 | * This handles several related states in one operation: |
| 415 | * |
| 416 | * ICP State: Down_CPPR |
| 417 | * |
| 418 | * Load CPPR with new value and if the XISR is 0 |
| 419 | * then check for resends: |
| 420 | * |
| 421 | * ICP State: Resend |
| 422 | * |
| 423 | * If MFRR is more favored than CPPR, check for IPIs |
| 424 | * and notify ICS of a potential resend. This is done |
| 425 | * asynchronously (when used in real mode, we will have |
| 426 | * to exit here). |
| 427 | * |
| 428 | * We do not handle the complete Check_IPI as documented |
| 429 | * here. In the PAPR, this state will be used for both |
| 430 | * Set_MFRR and Down_CPPR. However, we know that we aren't |
| 431 | * changing the MFRR state here so we don't need to handle |
| 432 | * the case of an MFRR causing a reject of a pending irq, |
| 433 | * this will have been handled when the MFRR was set in the |
| 434 | * first place. |
| 435 | * |
| 436 | * Thus we don't have to handle rejects, only resends. |
| 437 | * |
| 438 | * When implementing real mode for HV KVM, resend will lead to |
| 439 | * a H_TOO_HARD return and the whole transaction will be handled |
| 440 | * in virtual mode. |
| 441 | */ |
| 442 | do { |
Christian Borntraeger | 5ee0761 | 2015-01-06 22:41:46 +0100 | [diff] [blame] | 443 | old_state = new_state = READ_ONCE(icp->state); |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 444 | |
| 445 | /* Down_CPPR */ |
| 446 | new_state.cppr = new_cppr; |
| 447 | |
| 448 | /* |
| 449 | * Cut down Resend / Check_IPI / IPI |
| 450 | * |
| 451 | * The logic is that we cannot have a pending interrupt |
| 452 | * trumped by an IPI at this point (see above), so we |
| 453 | * know that either the pending interrupt is already an |
| 454 | * IPI (in which case we don't care to override it) or |
| 455 | * it's either more favored than us or non existent |
| 456 | */ |
| 457 | if (new_state.mfrr < new_cppr && |
| 458 | new_state.mfrr <= new_state.pending_pri) { |
| 459 | new_state.pending_pri = new_state.mfrr; |
| 460 | new_state.xisr = XICS_IPI; |
| 461 | } |
| 462 | |
| 463 | /* Latch/clear resend bit */ |
| 464 | resend = new_state.need_resend; |
| 465 | new_state.need_resend = 0; |
| 466 | |
| 467 | } while (!icp_rm_try_update(icp, old_state, new_state)); |
| 468 | |
| 469 | /* |
| 470 | * Now handle resend checks. Those are asynchronous to the ICP |
| 471 | * state update in HW (ie bus transactions) so we can handle them |
| 472 | * separately here as well. |
| 473 | */ |
Suresh E. Warrier | 5b88cda | 2014-11-03 15:51:59 +1100 | [diff] [blame] | 474 | if (resend) { |
Suresh Warrier | 6e0365b | 2015-03-20 20:39:48 +1100 | [diff] [blame] | 475 | icp->n_check_resend++; |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 476 | icp_rm_check_resend(xics, icp); |
Suresh E. Warrier | 5b88cda | 2014-11-03 15:51:59 +1100 | [diff] [blame] | 477 | } |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 478 | } |
| 479 | |
| 480 | |
| 481 | unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) |
| 482 | { |
| 483 | union kvmppc_icp_state old_state, new_state; |
| 484 | struct kvmppc_xics *xics = vcpu->kvm->arch.xics; |
| 485 | struct kvmppc_icp *icp = vcpu->arch.icp; |
| 486 | u32 xirr; |
| 487 | |
| 488 | if (!xics || !xics->real_mode) |
| 489 | return H_TOO_HARD; |
| 490 | |
| 491 | /* First clear the interrupt */ |
| 492 | icp_rm_clr_vcpu_irq(icp->vcpu); |
| 493 | |
| 494 | /* |
| 495 | * ICP State: Accept_Interrupt |
| 496 | * |
| 497 | * Return the pending interrupt (if any) along with the |
| 498 | * current CPPR, then clear the XISR & set CPPR to the |
| 499 | * pending priority |
| 500 | */ |
| 501 | do { |
Christian Borntraeger | 5ee0761 | 2015-01-06 22:41:46 +0100 | [diff] [blame] | 502 | old_state = new_state = READ_ONCE(icp->state); |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 503 | |
| 504 | xirr = old_state.xisr | (((u32)old_state.cppr) << 24); |
| 505 | if (!old_state.xisr) |
| 506 | break; |
| 507 | new_state.cppr = new_state.pending_pri; |
| 508 | new_state.pending_pri = 0xff; |
| 509 | new_state.xisr = 0; |
| 510 | |
| 511 | } while (!icp_rm_try_update(icp, old_state, new_state)); |
| 512 | |
| 513 | /* Return the result in GPR4 */ |
| 514 | vcpu->arch.gpr[4] = xirr; |
| 515 | |
| 516 | return check_too_hard(xics, icp); |
| 517 | } |
| 518 | |
| 519 | int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, |
| 520 | unsigned long mfrr) |
| 521 | { |
| 522 | union kvmppc_icp_state old_state, new_state; |
| 523 | struct kvmppc_xics *xics = vcpu->kvm->arch.xics; |
| 524 | struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; |
| 525 | u32 reject; |
| 526 | bool resend; |
| 527 | bool local; |
| 528 | |
| 529 | if (!xics || !xics->real_mode) |
| 530 | return H_TOO_HARD; |
| 531 | |
| 532 | local = this_icp->server_num == server; |
| 533 | if (local) |
| 534 | icp = this_icp; |
| 535 | else |
| 536 | icp = kvmppc_xics_find_server(vcpu->kvm, server); |
| 537 | if (!icp) |
| 538 | return H_PARAMETER; |
| 539 | |
| 540 | /* |
| 541 | * ICP state: Set_MFRR |
| 542 | * |
| 543 | * If the CPPR is more favored than the new MFRR, then |
| 544 | * nothing needs to be done as there can be no XISR to |
| 545 | * reject. |
| 546 | * |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 547 | * ICP state: Check_IPI |
Suresh E. Warrier | 5b88cda | 2014-11-03 15:51:59 +1100 | [diff] [blame] | 548 | * |
| 549 | * If the CPPR is less favored, then we might be replacing |
| 550 | * an interrupt, and thus need to possibly reject it. |
| 551 | * |
| 552 | * ICP State: IPI |
| 553 | * |
| 554 | * Besides rejecting any pending interrupts, we also |
| 555 | * update XISR and pending_pri to mark IPI as pending. |
| 556 | * |
| 557 | * PAPR does not describe this state, but if the MFRR is being |
| 558 | * made less favored than its earlier value, there might be |
| 559 | * a previously-rejected interrupt needing to be resent. |
| 560 | * Ideally, we would want to resend only if |
| 561 | * prio(pending_interrupt) < mfrr && |
| 562 | * prio(pending_interrupt) < cppr |
| 563 | * where pending interrupt is the one that was rejected. But |
| 564 | * we don't have that state, so we simply trigger a resend |
| 565 | * whenever the MFRR is made less favored. |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 566 | */ |
| 567 | do { |
Christian Borntraeger | 5ee0761 | 2015-01-06 22:41:46 +0100 | [diff] [blame] | 568 | old_state = new_state = READ_ONCE(icp->state); |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 569 | |
| 570 | /* Set_MFRR */ |
| 571 | new_state.mfrr = mfrr; |
| 572 | |
| 573 | /* Check_IPI */ |
| 574 | reject = 0; |
| 575 | resend = false; |
| 576 | if (mfrr < new_state.cppr) { |
| 577 | /* Reject a pending interrupt if not an IPI */ |
Suresh E. Warrier | 5b88cda | 2014-11-03 15:51:59 +1100 | [diff] [blame] | 578 | if (mfrr <= new_state.pending_pri) { |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 579 | reject = new_state.xisr; |
Suresh E. Warrier | 5b88cda | 2014-11-03 15:51:59 +1100 | [diff] [blame] | 580 | new_state.pending_pri = mfrr; |
| 581 | new_state.xisr = XICS_IPI; |
| 582 | } |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 583 | } |
| 584 | |
Suresh E. Warrier | 5b88cda | 2014-11-03 15:51:59 +1100 | [diff] [blame] | 585 | if (mfrr > old_state.mfrr) { |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 586 | resend = new_state.need_resend; |
| 587 | new_state.need_resend = 0; |
| 588 | } |
| 589 | } while (!icp_rm_try_update(icp, old_state, new_state)); |
| 590 | |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 591 | /* Handle reject in real mode */ |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 592 | if (reject && reject != XICS_IPI) { |
Suresh Warrier | 6e0365b | 2015-03-20 20:39:48 +1100 | [diff] [blame] | 593 | this_icp->n_reject++; |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 594 | icp_rm_deliver_irq(xics, icp, reject); |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 595 | } |
| 596 | |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 597 | /* Handle resends in real mode */ |
Suresh E. Warrier | 5b88cda | 2014-11-03 15:51:59 +1100 | [diff] [blame] | 598 | if (resend) { |
Suresh Warrier | 6e0365b | 2015-03-20 20:39:48 +1100 | [diff] [blame] | 599 | this_icp->n_check_resend++; |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 600 | icp_rm_check_resend(xics, icp); |
Suresh E. Warrier | 5b88cda | 2014-11-03 15:51:59 +1100 | [diff] [blame] | 601 | } |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 602 | |
| 603 | return check_too_hard(xics, this_icp); |
| 604 | } |
| 605 | |
| 606 | int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) |
| 607 | { |
| 608 | union kvmppc_icp_state old_state, new_state; |
| 609 | struct kvmppc_xics *xics = vcpu->kvm->arch.xics; |
| 610 | struct kvmppc_icp *icp = vcpu->arch.icp; |
| 611 | u32 reject; |
| 612 | |
| 613 | if (!xics || !xics->real_mode) |
| 614 | return H_TOO_HARD; |
| 615 | |
| 616 | /* |
| 617 | * ICP State: Set_CPPR |
| 618 | * |
| 619 | * We can safely compare the new value with the current |
| 620 | * value outside of the transaction as the CPPR is only |
| 621 | * ever changed by the processor on itself |
| 622 | */ |
| 623 | if (cppr > icp->state.cppr) { |
| 624 | icp_rm_down_cppr(xics, icp, cppr); |
| 625 | goto bail; |
| 626 | } else if (cppr == icp->state.cppr) |
| 627 | return H_SUCCESS; |
| 628 | |
| 629 | /* |
| 630 | * ICP State: Up_CPPR |
| 631 | * |
| 632 | * The processor is raising its priority, this can result |
| 633 | * in a rejection of a pending interrupt: |
| 634 | * |
| 635 | * ICP State: Reject_Current |
| 636 | * |
| 637 | * We can remove EE from the current processor, the update |
| 638 | * transaction will set it again if needed |
| 639 | */ |
| 640 | icp_rm_clr_vcpu_irq(icp->vcpu); |
| 641 | |
| 642 | do { |
Christian Borntraeger | 5ee0761 | 2015-01-06 22:41:46 +0100 | [diff] [blame] | 643 | old_state = new_state = READ_ONCE(icp->state); |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 644 | |
| 645 | reject = 0; |
| 646 | new_state.cppr = cppr; |
| 647 | |
| 648 | if (cppr <= new_state.pending_pri) { |
| 649 | reject = new_state.xisr; |
| 650 | new_state.xisr = 0; |
| 651 | new_state.pending_pri = 0xff; |
| 652 | } |
| 653 | |
| 654 | } while (!icp_rm_try_update(icp, old_state, new_state)); |
| 655 | |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 656 | /* |
| 657 | * Check for rejects. They are handled by doing a new delivery |
| 658 | * attempt (see comments in icp_rm_deliver_irq). |
| 659 | */ |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 660 | if (reject && reject != XICS_IPI) { |
Suresh Warrier | 6e0365b | 2015-03-20 20:39:48 +1100 | [diff] [blame] | 661 | icp->n_reject++; |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 662 | icp_rm_deliver_irq(xics, icp, reject); |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 663 | } |
| 664 | bail: |
| 665 | return check_too_hard(xics, icp); |
| 666 | } |
| 667 | |
| 668 | int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) |
| 669 | { |
| 670 | struct kvmppc_xics *xics = vcpu->kvm->arch.xics; |
| 671 | struct kvmppc_icp *icp = vcpu->arch.icp; |
| 672 | struct kvmppc_ics *ics; |
| 673 | struct ics_irq_state *state; |
| 674 | u32 irq = xirr & 0x00ffffff; |
| 675 | u16 src; |
| 676 | |
| 677 | if (!xics || !xics->real_mode) |
| 678 | return H_TOO_HARD; |
| 679 | |
| 680 | /* |
| 681 | * ICP State: EOI |
| 682 | * |
| 683 | * Note: If EOI is incorrectly used by SW to lower the CPPR |
| 684 | * value (ie more favored), we do not check for rejection of |
| 685 | * a pending interrupt, this is a SW error and PAPR sepcifies |
| 686 | * that we don't have to deal with it. |
| 687 | * |
| 688 | * The sending of an EOI to the ICS is handled after the |
| 689 | * CPPR update |
| 690 | * |
| 691 | * ICP State: Down_CPPR which we handle |
| 692 | * in a separate function as it's shared with H_CPPR. |
| 693 | */ |
| 694 | icp_rm_down_cppr(xics, icp, xirr >> 24); |
| 695 | |
| 696 | /* IPIs have no EOI */ |
| 697 | if (irq == XICS_IPI) |
| 698 | goto bail; |
| 699 | /* |
| 700 | * EOI handling: If the interrupt is still asserted, we need to |
| 701 | * resend it. We can take a lockless "peek" at the ICS state here. |
| 702 | * |
| 703 | * "Message" interrupts will never have "asserted" set |
| 704 | */ |
| 705 | ics = kvmppc_xics_find_ics(xics, irq, &src); |
| 706 | if (!ics) |
| 707 | goto bail; |
| 708 | state = &ics->irq_state[src]; |
| 709 | |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 710 | /* Still asserted, resend it */ |
Li Zhong | 6f51c8a | 2016-11-11 12:57:33 +0800 | [diff] [blame] | 711 | if (state->asserted) |
Suresh Warrier | b022155 | 2015-03-20 20:39:47 +1100 | [diff] [blame] | 712 | icp_rm_deliver_irq(xics, icp, irq); |
Paul Mackerras | 25a2150b | 2014-06-30 20:51:14 +1000 | [diff] [blame] | 713 | |
| 714 | if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { |
| 715 | icp->rm_action |= XICS_RM_NOTIFY_EOI; |
| 716 | icp->rm_eoied_irq = irq; |
| 717 | } |
Paul Mackerras | 5d37519 | 2016-08-19 15:35:56 +1000 | [diff] [blame] | 718 | |
Suresh Warrier | 65e7026 | 2016-08-19 15:35:57 +1000 | [diff] [blame] | 719 | if (state->host_irq) { |
| 720 | ++vcpu->stat.pthru_all; |
| 721 | if (state->intr_cpu != -1) { |
| 722 | int pcpu = raw_smp_processor_id(); |
| 723 | |
| 724 | pcpu = cpu_first_thread_sibling(pcpu); |
| 725 | ++vcpu->stat.pthru_host; |
| 726 | if (state->intr_cpu != pcpu) { |
| 727 | ++vcpu->stat.pthru_bad_aff; |
| 728 | xics_opal_rm_set_server(state->host_irq, pcpu); |
| 729 | } |
| 730 | state->intr_cpu = -1; |
| 731 | } |
Paul Mackerras | 5d37519 | 2016-08-19 15:35:56 +1000 | [diff] [blame] | 732 | } |
Benjamin Herrenschmidt | e7d26f2 | 2013-04-17 20:31:15 +0000 | [diff] [blame] | 733 | bail: |
| 734 | return check_too_hard(xics, icp); |
| 735 | } |
Suresh Warrier | 0c2a660 | 2015-12-17 14:59:09 -0600 | [diff] [blame] | 736 | |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 737 | unsigned long eoi_rc; |
| 738 | |
| 739 | static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr) |
| 740 | { |
| 741 | unsigned long xics_phys; |
| 742 | int64_t rc; |
| 743 | |
| 744 | rc = pnv_opal_pci_msi_eoi(c, hwirq); |
| 745 | |
| 746 | if (rc) |
| 747 | eoi_rc = rc; |
| 748 | |
| 749 | iosync(); |
| 750 | |
| 751 | /* EOI it */ |
| 752 | xics_phys = local_paca->kvm_hstate.xics_phys; |
| 753 | _stwcix(xics_phys + XICS_XIRR, xirr); |
| 754 | } |
| 755 | |
Paul Mackerras | 5d37519 | 2016-08-19 15:35:56 +1000 | [diff] [blame] | 756 | static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu) |
| 757 | { |
| 758 | unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2; |
| 759 | |
| 760 | return opal_rm_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY); |
| 761 | } |
| 762 | |
Suresh Warrier | 366274f | 2016-08-19 15:35:55 +1000 | [diff] [blame] | 763 | /* |
| 764 | * Increment a per-CPU 32-bit unsigned integer variable. |
| 765 | * Safe to call in real-mode. Handles vmalloc'ed addresses |
| 766 | * |
| 767 | * ToDo: Make this work for any integral type |
| 768 | */ |
| 769 | |
| 770 | static inline void this_cpu_inc_rm(unsigned int __percpu *addr) |
| 771 | { |
| 772 | unsigned long l; |
| 773 | unsigned int *raddr; |
| 774 | int cpu = smp_processor_id(); |
| 775 | |
| 776 | raddr = per_cpu_ptr(addr, cpu); |
| 777 | l = (unsigned long)raddr; |
| 778 | |
| 779 | if (REGION_ID(l) == VMALLOC_REGION_ID) { |
| 780 | l = vmalloc_to_phys(raddr); |
| 781 | raddr = (unsigned int *)l; |
| 782 | } |
| 783 | ++*raddr; |
| 784 | } |
| 785 | |
| 786 | /* |
| 787 | * We don't try to update the flags in the irq_desc 'istate' field in |
| 788 | * here as would happen in the normal IRQ handling path for several reasons: |
| 789 | * - state flags represent internal IRQ state and are not expected to be |
| 790 | * updated outside the IRQ subsystem |
| 791 | * - more importantly, these are useful for edge triggered interrupts, |
| 792 | * IRQ probing, etc., but we are only handling MSI/MSIx interrupts here |
| 793 | * and these states shouldn't apply to us. |
| 794 | * |
| 795 | * However, we do update irq_stats - we somewhat duplicate the code in |
| 796 | * kstat_incr_irqs_this_cpu() for this since this function is defined |
| 797 | * in irq/internal.h which we don't want to include here. |
| 798 | * The only difference is that desc->kstat_irqs is an allocated per CPU |
| 799 | * variable and could have been vmalloc'ed, so we can't directly |
| 800 | * call __this_cpu_inc() on it. The kstat structure is a static |
| 801 | * per CPU variable and it should be accessible by real-mode KVM. |
| 802 | * |
| 803 | */ |
| 804 | static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc) |
| 805 | { |
| 806 | this_cpu_inc_rm(desc->kstat_irqs); |
| 807 | __this_cpu_inc(kstat.irqs_sum); |
| 808 | } |
| 809 | |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 810 | long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, |
| 811 | u32 xirr, |
| 812 | struct kvmppc_irq_map *irq_map, |
| 813 | struct kvmppc_passthru_irqmap *pimap) |
| 814 | { |
| 815 | struct kvmppc_xics *xics; |
| 816 | struct kvmppc_icp *icp; |
| 817 | u32 irq; |
| 818 | |
| 819 | irq = irq_map->v_hwirq; |
| 820 | xics = vcpu->kvm->arch.xics; |
| 821 | icp = vcpu->arch.icp; |
| 822 | |
Suresh Warrier | 366274f | 2016-08-19 15:35:55 +1000 | [diff] [blame] | 823 | kvmppc_rm_handle_irq_desc(irq_map->desc); |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 824 | icp_rm_deliver_irq(xics, icp, irq); |
| 825 | |
| 826 | /* EOI the interrupt */ |
| 827 | icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr); |
| 828 | |
| 829 | if (check_too_hard(xics, icp) == H_TOO_HARD) |
Suresh Warrier | f7af520 | 2016-08-19 15:35:52 +1000 | [diff] [blame] | 830 | return 2; |
Suresh Warrier | e3c13e5 | 2016-08-19 15:35:51 +1000 | [diff] [blame] | 831 | else |
| 832 | return -2; |
| 833 | } |
| 834 | |
Suresh Warrier | 0c2a660 | 2015-12-17 14:59:09 -0600 | [diff] [blame] | 835 | /* --- Non-real mode XICS-related built-in routines --- */ |
| 836 | |
| 837 | /** |
| 838 | * Host Operations poked by RM KVM |
| 839 | */ |
| 840 | static void rm_host_ipi_action(int action, void *data) |
| 841 | { |
| 842 | switch (action) { |
| 843 | case XICS_RM_KICK_VCPU: |
| 844 | kvmppc_host_rm_ops_hv->vcpu_kick(data); |
| 845 | break; |
| 846 | default: |
| 847 | WARN(1, "Unexpected rm_action=%d data=%p\n", action, data); |
| 848 | break; |
| 849 | } |
| 850 | |
| 851 | } |
| 852 | |
| 853 | void kvmppc_xics_ipi_action(void) |
| 854 | { |
| 855 | int core; |
| 856 | unsigned int cpu = smp_processor_id(); |
| 857 | struct kvmppc_host_rm_core *rm_corep; |
| 858 | |
| 859 | core = cpu >> threads_shift; |
| 860 | rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core]; |
| 861 | |
| 862 | if (rm_corep->rm_data) { |
| 863 | rm_host_ipi_action(rm_corep->rm_state.rm_action, |
| 864 | rm_corep->rm_data); |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 865 | /* Order these stores against the real mode KVM */ |
Suresh Warrier | 0c2a660 | 2015-12-17 14:59:09 -0600 | [diff] [blame] | 866 | rm_corep->rm_data = NULL; |
Suresh E. Warrier | e17769e | 2015-12-21 16:22:51 -0600 | [diff] [blame] | 867 | smp_wmb(); |
Suresh Warrier | 0c2a660 | 2015-12-17 14:59:09 -0600 | [diff] [blame] | 868 | rm_corep->rm_state.rm_action = 0; |
| 869 | } |
| 870 | } |