Benjamin Herrenschmidt | 5af5099 | 2017-04-05 17:54:56 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License, version 2, as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #ifndef _KVM_PPC_BOOK3S_XIVE_H |
| 10 | #define _KVM_PPC_BOOK3S_XIVE_H |
| 11 | |
| 12 | #ifdef CONFIG_KVM_XICS |
| 13 | #include "book3s_xics.h" |
| 14 | |
| 15 | /* |
| 16 | * State for one guest irq source. |
| 17 | * |
| 18 | * For each guest source we allocate a HW interrupt in the XIVE |
| 19 | * which we use for all SW triggers. It will be unused for |
| 20 | * pass-through but it's easier to keep around as the same |
| 21 | * guest interrupt can alternatively be emulated or pass-through |
| 22 | * if a physical device is hot unplugged and replaced with an |
| 23 | * emulated one. |
| 24 | * |
| 25 | * This state structure is very similar to the XICS one with |
| 26 | * additional XIVE specific tracking. |
| 27 | */ |
| 28 | struct kvmppc_xive_irq_state { |
| 29 | bool valid; /* Interrupt entry is valid */ |
| 30 | |
| 31 | u32 number; /* Guest IRQ number */ |
| 32 | u32 ipi_number; /* XIVE IPI HW number */ |
| 33 | struct xive_irq_data ipi_data; /* XIVE IPI associated data */ |
| 34 | u32 pt_number; /* XIVE Pass-through number if any */ |
| 35 | struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */ |
| 36 | |
| 37 | /* Targetting as set by guest */ |
| 38 | u32 guest_server; /* Current guest selected target */ |
| 39 | u8 guest_priority; /* Guest set priority */ |
| 40 | u8 saved_priority; /* Saved priority when masking */ |
| 41 | |
| 42 | /* Actual targetting */ |
| 43 | u32 act_server; /* Actual server */ |
| 44 | u8 act_priority; /* Actual priority */ |
| 45 | |
| 46 | /* Various state bits */ |
| 47 | bool in_eoi; /* Synchronize with H_EOI */ |
| 48 | bool old_p; /* P bit state when masking */ |
| 49 | bool old_q; /* Q bit state when masking */ |
| 50 | bool lsi; /* level-sensitive interrupt */ |
| 51 | bool asserted; /* Only for emulated LSI: current state */ |
| 52 | |
| 53 | /* Saved for migration state */ |
| 54 | bool in_queue; |
| 55 | bool saved_p; |
| 56 | bool saved_q; |
| 57 | u8 saved_scan_prio; |
| 58 | }; |
| 59 | |
| 60 | /* Select the "right" interrupt (IPI vs. passthrough) */ |
| 61 | static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state, |
| 62 | u32 *out_hw_irq, |
| 63 | struct xive_irq_data **out_xd) |
| 64 | { |
| 65 | if (state->pt_number) { |
| 66 | if (out_hw_irq) |
| 67 | *out_hw_irq = state->pt_number; |
| 68 | if (out_xd) |
| 69 | *out_xd = state->pt_data; |
| 70 | } else { |
| 71 | if (out_hw_irq) |
| 72 | *out_hw_irq = state->ipi_number; |
| 73 | if (out_xd) |
| 74 | *out_xd = &state->ipi_data; |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | /* |
| 79 | * This corresponds to an "ICS" in XICS terminology, we use it |
| 80 | * as a mean to break up source information into multiple structures. |
| 81 | */ |
| 82 | struct kvmppc_xive_src_block { |
| 83 | arch_spinlock_t lock; |
| 84 | u16 id; |
| 85 | struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS]; |
| 86 | }; |
| 87 | |
| 88 | |
| 89 | struct kvmppc_xive { |
| 90 | struct kvm *kvm; |
| 91 | struct kvm_device *dev; |
| 92 | struct dentry *dentry; |
| 93 | |
| 94 | /* VP block associated with the VM */ |
| 95 | u32 vp_base; |
| 96 | |
| 97 | /* Blocks of sources */ |
| 98 | struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1]; |
| 99 | u32 max_sbid; |
| 100 | |
| 101 | /* |
| 102 | * For state save, we lazily scan the queues on the first interrupt |
| 103 | * being migrated. We don't have a clean way to reset that flags |
| 104 | * so we keep track of the number of valid sources and how many of |
| 105 | * them were migrated so we can reset when all of them have been |
| 106 | * processed. |
| 107 | */ |
| 108 | u32 src_count; |
| 109 | u32 saved_src_count; |
| 110 | |
| 111 | /* |
| 112 | * Some irqs are delayed on restore until the source is created, |
| 113 | * keep track here of how many of them |
| 114 | */ |
| 115 | u32 delayed_irqs; |
| 116 | |
| 117 | /* Which queues (priorities) are in use by the guest */ |
| 118 | u8 qmap; |
| 119 | |
| 120 | /* Queue orders */ |
| 121 | u32 q_order; |
| 122 | u32 q_page_order; |
| 123 | |
| 124 | }; |
| 125 | |
| 126 | #define KVMPPC_XIVE_Q_COUNT 8 |
| 127 | |
| 128 | struct kvmppc_xive_vcpu { |
| 129 | struct kvmppc_xive *xive; |
| 130 | struct kvm_vcpu *vcpu; |
| 131 | bool valid; |
| 132 | |
| 133 | /* Server number. This is the HW CPU ID from a guest perspective */ |
| 134 | u32 server_num; |
| 135 | |
| 136 | /* |
| 137 | * HW VP corresponding to this VCPU. This is the base of the VP |
| 138 | * block plus the server number. |
| 139 | */ |
| 140 | u32 vp_id; |
| 141 | u32 vp_chip_id; |
| 142 | u32 vp_cam; |
| 143 | |
| 144 | /* IPI used for sending ... IPIs */ |
| 145 | u32 vp_ipi; |
| 146 | struct xive_irq_data vp_ipi_data; |
| 147 | |
| 148 | /* Local emulation state */ |
| 149 | uint8_t cppr; /* guest CPPR */ |
| 150 | uint8_t hw_cppr;/* Hardware CPPR */ |
| 151 | uint8_t mfrr; |
| 152 | uint8_t pending; |
| 153 | |
| 154 | /* Each VP has 8 queues though we only provision some */ |
| 155 | struct xive_q queues[KVMPPC_XIVE_Q_COUNT]; |
| 156 | u32 esc_virq[KVMPPC_XIVE_Q_COUNT]; |
| 157 | char *esc_virq_names[KVMPPC_XIVE_Q_COUNT]; |
| 158 | |
| 159 | /* Stash a delayed irq on restore from migration (see set_icp) */ |
| 160 | u32 delayed_irq; |
| 161 | |
| 162 | /* Stats */ |
| 163 | u64 stat_rm_h_xirr; |
| 164 | u64 stat_rm_h_ipoll; |
| 165 | u64 stat_rm_h_cppr; |
| 166 | u64 stat_rm_h_eoi; |
| 167 | u64 stat_rm_h_ipi; |
| 168 | u64 stat_vm_h_xirr; |
| 169 | u64 stat_vm_h_ipoll; |
| 170 | u64 stat_vm_h_cppr; |
| 171 | u64 stat_vm_h_eoi; |
| 172 | u64 stat_vm_h_ipi; |
| 173 | }; |
| 174 | |
| 175 | static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr) |
| 176 | { |
| 177 | struct kvm_vcpu *vcpu = NULL; |
| 178 | int i; |
| 179 | |
| 180 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 181 | if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num) |
| 182 | return vcpu; |
| 183 | } |
| 184 | return NULL; |
| 185 | } |
| 186 | |
| 187 | static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive, |
| 188 | u32 irq, u16 *source) |
| 189 | { |
| 190 | u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT; |
| 191 | u16 src = irq & KVMPPC_XICS_SRC_MASK; |
| 192 | |
| 193 | if (source) |
| 194 | *source = src; |
| 195 | if (bid > KVMPPC_XICS_MAX_ICS_ID) |
| 196 | return NULL; |
| 197 | return xive->src_blocks[bid]; |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * Mapping between guest priorities and host priorities |
| 202 | * is as follow. |
| 203 | * |
| 204 | * Guest request for 0...6 are honored. Guest request for anything |
| 205 | * higher results in a priority of 7 being applied. |
| 206 | * |
| 207 | * However, when XIRR is returned via H_XIRR, 7 is translated to 0xb |
| 208 | * in order to match AIX expectations |
| 209 | * |
| 210 | * Similar mapping is done for CPPR values |
| 211 | */ |
| 212 | static inline u8 xive_prio_from_guest(u8 prio) |
| 213 | { |
| 214 | if (prio == 0xff || prio < 8) |
| 215 | return prio; |
| 216 | return 7; |
| 217 | } |
| 218 | |
| 219 | static inline u8 xive_prio_to_guest(u8 prio) |
| 220 | { |
| 221 | if (prio == 0xff || prio < 7) |
| 222 | return prio; |
| 223 | return 0xb; |
| 224 | } |
| 225 | |
| 226 | static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle) |
| 227 | { |
| 228 | u32 cur; |
| 229 | |
| 230 | if (!qpage) |
| 231 | return 0; |
| 232 | cur = be32_to_cpup(qpage + *idx); |
| 233 | if ((cur >> 31) == *toggle) |
| 234 | return 0; |
| 235 | *idx = (*idx + 1) & msk; |
| 236 | if (*idx == 0) |
| 237 | (*toggle) ^= 1; |
| 238 | return cur & 0x7fffffff; |
| 239 | } |
| 240 | |
| 241 | extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu); |
| 242 | extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server); |
| 243 | extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, |
| 244 | unsigned long mfrr); |
| 245 | extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); |
| 246 | extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); |
| 247 | |
| 248 | extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); |
| 249 | extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); |
| 250 | extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, |
| 251 | unsigned long mfrr); |
| 252 | extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); |
| 253 | extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); |
| 254 | |
| 255 | #endif /* CONFIG_KVM_XICS */ |
| 256 | #endif /* _KVM_PPC_BOOK3S_XICS_H */ |