Greg Kroah-Hartman | d809aa2 | 2017-11-24 15:00:33 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 2 | /* |
| 3 | * kvm nested virtualization support for s390x |
| 4 | * |
| 5 | * Copyright IBM Corp. 2016 |
| 6 | * |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 7 | * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> |
| 8 | */ |
| 9 | #include <linux/vmalloc.h> |
| 10 | #include <linux/kvm_host.h> |
| 11 | #include <linux/bug.h> |
| 12 | #include <linux/list.h> |
| 13 | #include <linux/bitmap.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 14 | #include <linux/sched/signal.h> |
| 15 | |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 16 | #include <asm/gmap.h> |
| 17 | #include <asm/mmu_context.h> |
| 18 | #include <asm/sclp.h> |
| 19 | #include <asm/nmi.h> |
David Hildenbrand | 66b630d | 2015-11-26 14:11:19 +0100 | [diff] [blame] | 20 | #include <asm/dis.h> |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 21 | #include "kvm-s390.h" |
| 22 | #include "gaccess.h" |
| 23 | |
| 24 | struct vsie_page { |
| 25 | struct kvm_s390_sie_block scb_s; /* 0x0000 */ |
QingFeng Hao | d52cd20 | 2017-06-07 12:11:18 +0200 | [diff] [blame] | 26 | /* |
| 27 | * the backup info for machine check. ensure it's at |
| 28 | * the same offset as that in struct sie_page! |
| 29 | */ |
| 30 | struct mcck_volatile_info mcck_info; /* 0x0200 */ |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 31 | /* |
| 32 | * The pinned original scb. Be aware that other VCPUs can modify |
| 33 | * it while we read from it. Values that are used for conditions or |
| 34 | * are reused conditionally, should be accessed via READ_ONCE. |
| 35 | */ |
QingFeng Hao | d52cd20 | 2017-06-07 12:11:18 +0200 | [diff] [blame] | 36 | struct kvm_s390_sie_block *scb_o; /* 0x0218 */ |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 37 | /* the shadow gmap in use by the vsie_page */ |
QingFeng Hao | d52cd20 | 2017-06-07 12:11:18 +0200 | [diff] [blame] | 38 | struct gmap *gmap; /* 0x0220 */ |
David Hildenbrand | 1b7029b | 2015-07-08 13:25:31 +0200 | [diff] [blame] | 39 | /* address of the last reported fault to guest2 */ |
QingFeng Hao | d52cd20 | 2017-06-07 12:11:18 +0200 | [diff] [blame] | 40 | unsigned long fault_addr; /* 0x0228 */ |
| 41 | __u8 reserved[0x0700 - 0x0230]; /* 0x0230 */ |
David Hildenbrand | bbeaa58 | 2015-11-26 13:11:42 +0100 | [diff] [blame] | 42 | struct kvm_s390_crypto_cb crycb; /* 0x0700 */ |
David Hildenbrand | 66b630d | 2015-11-26 14:11:19 +0100 | [diff] [blame] | 43 | __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */ |
Martin Schwidefsky | 1cae025 | 2017-06-21 16:49:15 +0200 | [diff] [blame] | 44 | }; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 45 | |
| 46 | /* trigger a validity icpt for the given scb */ |
| 47 | static int set_validity_icpt(struct kvm_s390_sie_block *scb, |
| 48 | __u16 reason_code) |
| 49 | { |
| 50 | scb->ipa = 0x1000; |
| 51 | scb->ipb = ((__u32) reason_code) << 16; |
| 52 | scb->icptcode = ICPT_VALIDITY; |
| 53 | return 1; |
| 54 | } |
| 55 | |
| 56 | /* mark the prefix as unmapped, this will block the VSIE */ |
| 57 | static void prefix_unmapped(struct vsie_page *vsie_page) |
| 58 | { |
| 59 | atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20); |
| 60 | } |
| 61 | |
| 62 | /* mark the prefix as unmapped and wait until the VSIE has been left */ |
| 63 | static void prefix_unmapped_sync(struct vsie_page *vsie_page) |
| 64 | { |
| 65 | prefix_unmapped(vsie_page); |
| 66 | if (vsie_page->scb_s.prog0c & PROG_IN_SIE) |
| 67 | atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags); |
| 68 | while (vsie_page->scb_s.prog0c & PROG_IN_SIE) |
| 69 | cpu_relax(); |
| 70 | } |
| 71 | |
| 72 | /* mark the prefix as mapped, this will allow the VSIE to run */ |
| 73 | static void prefix_mapped(struct vsie_page *vsie_page) |
| 74 | { |
| 75 | atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20); |
| 76 | } |
| 77 | |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 78 | /* test if the prefix is mapped into the gmap shadow */ |
| 79 | static int prefix_is_mapped(struct vsie_page *vsie_page) |
| 80 | { |
| 81 | return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST); |
| 82 | } |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 83 | |
| 84 | /* copy the updated intervention request bits into the shadow scb */ |
| 85 | static void update_intervention_requests(struct vsie_page *vsie_page) |
| 86 | { |
| 87 | const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT; |
| 88 | int cpuflags; |
| 89 | |
| 90 | cpuflags = atomic_read(&vsie_page->scb_o->cpuflags); |
| 91 | atomic_andnot(bits, &vsie_page->scb_s.cpuflags); |
| 92 | atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags); |
| 93 | } |
| 94 | |
| 95 | /* shadow (filter and validate) the cpuflags */ |
| 96 | static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 97 | { |
| 98 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 99 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
| 100 | int newflags, cpuflags = atomic_read(&scb_o->cpuflags); |
| 101 | |
| 102 | /* we don't allow ESA/390 guests */ |
| 103 | if (!(cpuflags & CPUSTAT_ZARCH)) |
| 104 | return set_validity_icpt(scb_s, 0x0001U); |
| 105 | |
| 106 | if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS)) |
| 107 | return set_validity_icpt(scb_s, 0x0001U); |
| 108 | else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR)) |
| 109 | return set_validity_icpt(scb_s, 0x0007U); |
| 110 | |
| 111 | /* intervention requests will be set later */ |
| 112 | newflags = CPUSTAT_ZARCH; |
David Hildenbrand | 535ef81 | 2016-02-12 12:24:20 +0100 | [diff] [blame] | 113 | if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8)) |
| 114 | newflags |= CPUSTAT_GED; |
| 115 | if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) { |
| 116 | if (cpuflags & CPUSTAT_GED) |
| 117 | return set_validity_icpt(scb_s, 0x0001U); |
| 118 | newflags |= CPUSTAT_GED2; |
| 119 | } |
David Hildenbrand | 77d18f6 | 2015-11-24 16:32:35 +0100 | [diff] [blame] | 120 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE)) |
| 121 | newflags |= cpuflags & CPUSTAT_P; |
David Hildenbrand | a1b7b9b | 2015-11-24 16:41:33 +0100 | [diff] [blame] | 122 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS)) |
| 123 | newflags |= cpuflags & CPUSTAT_SM; |
David Hildenbrand | 7fd7f39 | 2015-11-24 16:56:23 +0100 | [diff] [blame] | 124 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS)) |
| 125 | newflags |= cpuflags & CPUSTAT_IBS; |
Farhan Ali | 730cd63 | 2017-02-24 16:12:56 -0500 | [diff] [blame] | 126 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS)) |
| 127 | newflags |= cpuflags & CPUSTAT_KSS; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 128 | |
| 129 | atomic_set(&scb_s->cpuflags, newflags); |
| 130 | return 0; |
| 131 | } |
| 132 | |
David Hildenbrand | bbeaa58 | 2015-11-26 13:11:42 +0100 | [diff] [blame] | 133 | /* |
| 134 | * Create a shadow copy of the crycb block and setup key wrapping, if |
| 135 | * requested for guest 3 and enabled for guest 2. |
| 136 | * |
| 137 | * We only accept format-1 (no AP in g2), but convert it into format-2 |
| 138 | * There is nothing to do for format-0. |
| 139 | * |
| 140 | * Returns: - 0 if shadowed or nothing to do |
| 141 | * - > 0 if control has to be given to guest 2 |
| 142 | */ |
| 143 | static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 144 | { |
| 145 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 146 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 147 | const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd); |
| 148 | const u32 crycb_addr = crycbd_o & 0x7ffffff8U; |
David Hildenbrand | bbeaa58 | 2015-11-26 13:11:42 +0100 | [diff] [blame] | 149 | unsigned long *b1, *b2; |
| 150 | u8 ecb3_flags; |
| 151 | |
| 152 | scb_s->crycbd = 0; |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 153 | if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) |
David Hildenbrand | bbeaa58 | 2015-11-26 13:11:42 +0100 | [diff] [blame] | 154 | return 0; |
| 155 | /* format-1 is supported with message-security-assist extension 3 */ |
| 156 | if (!test_kvm_facility(vcpu->kvm, 76)) |
| 157 | return 0; |
| 158 | /* we may only allow it if enabled for guest 2 */ |
| 159 | ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & |
| 160 | (ECB3_AES | ECB3_DEA); |
| 161 | if (!ecb3_flags) |
| 162 | return 0; |
| 163 | |
| 164 | if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK)) |
| 165 | return set_validity_icpt(scb_s, 0x003CU); |
| 166 | else if (!crycb_addr) |
| 167 | return set_validity_icpt(scb_s, 0x0039U); |
| 168 | |
| 169 | /* copy only the wrapping keys */ |
| 170 | if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56)) |
| 171 | return set_validity_icpt(scb_s, 0x0035U); |
| 172 | |
| 173 | scb_s->ecb3 |= ecb3_flags; |
| 174 | scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 | |
| 175 | CRYCB_FORMAT2; |
| 176 | |
| 177 | /* xor both blocks in one run */ |
| 178 | b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; |
| 179 | b2 = (unsigned long *) |
| 180 | vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; |
| 181 | /* as 56%8 == 0, bitmap_xor won't overwrite any data */ |
| 182 | bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56); |
| 183 | return 0; |
| 184 | } |
| 185 | |
David Hildenbrand | 3573602 | 2016-02-19 10:11:24 +0100 | [diff] [blame] | 186 | /* shadow (round up/down) the ibc to avoid validity icpt */ |
| 187 | static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 188 | { |
| 189 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 190 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 191 | /* READ_ONCE does not work on bitfields - use a temporary variable */ |
| 192 | const uint32_t __new_ibc = scb_o->ibc; |
| 193 | const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU; |
David Hildenbrand | 3573602 | 2016-02-19 10:11:24 +0100 | [diff] [blame] | 194 | __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU; |
| 195 | |
| 196 | scb_s->ibc = 0; |
| 197 | /* ibc installed in g2 and requested for g3 */ |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 198 | if (vcpu->kvm->arch.model.ibc && new_ibc) { |
| 199 | scb_s->ibc = new_ibc; |
David Hildenbrand | 3573602 | 2016-02-19 10:11:24 +0100 | [diff] [blame] | 200 | /* takte care of the minimum ibc level of the machine */ |
| 201 | if (scb_s->ibc < min_ibc) |
| 202 | scb_s->ibc = min_ibc; |
| 203 | /* take care of the maximum ibc level set for the guest */ |
| 204 | if (scb_s->ibc > vcpu->kvm->arch.model.ibc) |
| 205 | scb_s->ibc = vcpu->kvm->arch.model.ibc; |
| 206 | } |
| 207 | } |
| 208 | |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 209 | /* unshadow the scb, copying parameters back to the real scb */ |
| 210 | static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 211 | { |
| 212 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 213 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
| 214 | |
| 215 | /* interception */ |
| 216 | scb_o->icptcode = scb_s->icptcode; |
| 217 | scb_o->icptstatus = scb_s->icptstatus; |
| 218 | scb_o->ipa = scb_s->ipa; |
| 219 | scb_o->ipb = scb_s->ipb; |
| 220 | scb_o->gbea = scb_s->gbea; |
| 221 | |
| 222 | /* timer */ |
| 223 | scb_o->cputm = scb_s->cputm; |
| 224 | scb_o->ckc = scb_s->ckc; |
| 225 | scb_o->todpr = scb_s->todpr; |
| 226 | |
| 227 | /* guest state */ |
| 228 | scb_o->gpsw = scb_s->gpsw; |
| 229 | scb_o->gg14 = scb_s->gg14; |
| 230 | scb_o->gg15 = scb_s->gg15; |
| 231 | memcpy(scb_o->gcr, scb_s->gcr, 128); |
| 232 | scb_o->pp = scb_s->pp; |
| 233 | |
| 234 | /* interrupt intercept */ |
| 235 | switch (scb_s->icptcode) { |
| 236 | case ICPT_PROGI: |
| 237 | case ICPT_INSTPROGI: |
| 238 | case ICPT_EXTINT: |
| 239 | memcpy((void *)((u64)scb_o + 0xc0), |
| 240 | (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0); |
| 241 | break; |
| 242 | case ICPT_PARTEXEC: |
| 243 | /* MVPG only */ |
| 244 | memcpy((void *)((u64)scb_o + 0xc0), |
| 245 | (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0); |
| 246 | break; |
| 247 | } |
| 248 | |
| 249 | if (scb_s->ihcpu != 0xffffU) |
| 250 | scb_o->ihcpu = scb_s->ihcpu; |
| 251 | } |
| 252 | |
| 253 | /* |
| 254 | * Setup the shadow scb by copying and checking the relevant parts of the g2 |
| 255 | * provided scb. |
| 256 | * |
| 257 | * Returns: - 0 if the scb has been shadowed |
| 258 | * - > 0 if control has to be given to guest 2 |
| 259 | */ |
| 260 | static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 261 | { |
| 262 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
| 263 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 264 | /* READ_ONCE does not work on bitfields - use a temporary variable */ |
| 265 | const uint32_t __new_prefix = scb_o->prefix; |
| 266 | const uint32_t new_prefix = READ_ONCE(__new_prefix); |
| 267 | const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE; |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 268 | bool had_tx = scb_s->ecb & ECB_TE; |
David Hildenbrand | a1b7b9b | 2015-11-24 16:41:33 +0100 | [diff] [blame] | 269 | unsigned long new_mso = 0; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 270 | int rc; |
| 271 | |
| 272 | /* make sure we don't have any leftovers when reusing the scb */ |
| 273 | scb_s->icptcode = 0; |
| 274 | scb_s->eca = 0; |
| 275 | scb_s->ecb = 0; |
| 276 | scb_s->ecb2 = 0; |
| 277 | scb_s->ecb3 = 0; |
| 278 | scb_s->ecd = 0; |
David Hildenbrand | 66b630d | 2015-11-26 14:11:19 +0100 | [diff] [blame] | 279 | scb_s->fac = 0; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 280 | |
| 281 | rc = prepare_cpuflags(vcpu, vsie_page); |
| 282 | if (rc) |
| 283 | goto out; |
| 284 | |
| 285 | /* timer */ |
| 286 | scb_s->cputm = scb_o->cputm; |
| 287 | scb_s->ckc = scb_o->ckc; |
| 288 | scb_s->todpr = scb_o->todpr; |
| 289 | scb_s->epoch = scb_o->epoch; |
| 290 | |
| 291 | /* guest state */ |
| 292 | scb_s->gpsw = scb_o->gpsw; |
| 293 | scb_s->gg14 = scb_o->gg14; |
| 294 | scb_s->gg15 = scb_o->gg15; |
| 295 | memcpy(scb_s->gcr, scb_o->gcr, 128); |
| 296 | scb_s->pp = scb_o->pp; |
| 297 | |
| 298 | /* interception / execution handling */ |
| 299 | scb_s->gbea = scb_o->gbea; |
| 300 | scb_s->lctl = scb_o->lctl; |
| 301 | scb_s->svcc = scb_o->svcc; |
| 302 | scb_s->ictl = scb_o->ictl; |
| 303 | /* |
| 304 | * SKEY handling functions can't deal with false setting of PTE invalid |
| 305 | * bits. Therefore we cannot provide interpretation and would later |
| 306 | * have to provide own emulation handlers. |
| 307 | */ |
Farhan Ali | 730cd63 | 2017-02-24 16:12:56 -0500 | [diff] [blame] | 308 | if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS)) |
| 309 | scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; |
| 310 | |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 311 | scb_s->icpua = scb_o->icpua; |
| 312 | |
David Hildenbrand | a1b7b9b | 2015-11-24 16:41:33 +0100 | [diff] [blame] | 313 | if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM)) |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 314 | new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL; |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 315 | /* if the hva of the prefix changes, we have to remap the prefix */ |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 316 | if (scb_s->mso != new_mso || scb_s->prefix != new_prefix) |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 317 | prefix_unmapped(vsie_page); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 318 | /* SIE will do mso/msl validity and exception checks for us */ |
| 319 | scb_s->msl = scb_o->msl & 0xfffffffffff00000UL; |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 320 | scb_s->mso = new_mso; |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 321 | scb_s->prefix = new_prefix; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 322 | |
| 323 | /* We have to definetly flush the tlb if this scb never ran */ |
| 324 | if (scb_s->ihcpu != 0xffffU) |
| 325 | scb_s->ihcpu = scb_o->ihcpu; |
| 326 | |
| 327 | /* MVPG and Protection Exception Interpretation are always available */ |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 328 | scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI); |
David Hildenbrand | 4ceafa9 | 2015-11-27 12:34:28 +0100 | [diff] [blame] | 329 | /* Host-protection-interruption introduced with ESOP */ |
| 330 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 331 | scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT; |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 332 | /* transactional execution */ |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 333 | if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) { |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 334 | /* remap the prefix is tx is toggled on */ |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 335 | if (!had_tx) |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 336 | prefix_unmapped(vsie_page); |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 337 | scb_s->ecb |= ECB_TE; |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 338 | } |
David Hildenbrand | c9bc1ea | 2015-11-25 11:08:32 +0100 | [diff] [blame] | 339 | /* SIMD */ |
| 340 | if (test_kvm_facility(vcpu->kvm, 129)) { |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 341 | scb_s->eca |= scb_o->eca & ECA_VX; |
| 342 | scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; |
David Hildenbrand | c9bc1ea | 2015-11-25 11:08:32 +0100 | [diff] [blame] | 343 | } |
David Hildenbrand | 588438c | 2016-01-26 12:51:06 +0100 | [diff] [blame] | 344 | /* Run-time-Instrumentation */ |
| 345 | if (test_kvm_facility(vcpu->kvm, 64)) |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 346 | scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI; |
Janosch Frank | cd1836f | 2016-08-04 09:57:36 +0200 | [diff] [blame] | 347 | /* Instruction Execution Prevention */ |
| 348 | if (test_kvm_facility(vcpu->kvm, 130)) |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 349 | scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP; |
Fan Zhang | 4e0b1ab | 2016-11-29 07:17:55 +0100 | [diff] [blame] | 350 | /* Guarded Storage */ |
| 351 | if (test_kvm_facility(vcpu->kvm, 133)) { |
| 352 | scb_s->ecb |= scb_o->ecb & ECB_GS; |
| 353 | scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; |
| 354 | } |
David Hildenbrand | 0615a32 | 2015-11-25 09:59:49 +0100 | [diff] [blame] | 355 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF)) |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 356 | scb_s->eca |= scb_o->eca & ECA_SII; |
David Hildenbrand | 5630a8e | 2015-11-24 16:53:51 +0100 | [diff] [blame] | 357 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB)) |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 358 | scb_s->eca |= scb_o->eca & ECA_IB; |
David Hildenbrand | 13ee3f6 | 2015-11-24 16:54:37 +0100 | [diff] [blame] | 359 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI)) |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 360 | scb_s->eca |= scb_o->eca & ECA_CEI; |
Collin L. Walling | 8fa1696 | 2016-07-26 15:29:44 -0400 | [diff] [blame] | 361 | /* Epoch Extension */ |
| 362 | if (test_kvm_facility(vcpu->kvm, 139)) |
| 363 | scb_s->ecd |= scb_o->ecd & ECD_MEF; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 364 | |
David Hildenbrand | 3573602 | 2016-02-19 10:11:24 +0100 | [diff] [blame] | 365 | prepare_ibc(vcpu, vsie_page); |
David Hildenbrand | bbeaa58 | 2015-11-26 13:11:42 +0100 | [diff] [blame] | 366 | rc = shadow_crycb(vcpu, vsie_page); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 367 | out: |
| 368 | if (rc) |
| 369 | unshadow_scb(vcpu, vsie_page); |
| 370 | return rc; |
| 371 | } |
| 372 | |
| 373 | void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start, |
| 374 | unsigned long end) |
| 375 | { |
| 376 | struct kvm *kvm = gmap->private; |
| 377 | struct vsie_page *cur; |
| 378 | unsigned long prefix; |
| 379 | struct page *page; |
| 380 | int i; |
| 381 | |
| 382 | if (!gmap_is_shadow(gmap)) |
| 383 | return; |
| 384 | if (start >= 1UL << 31) |
| 385 | /* We are only interested in prefix pages */ |
| 386 | return; |
| 387 | |
| 388 | /* |
| 389 | * Only new shadow blocks are added to the list during runtime, |
| 390 | * therefore we can safely reference them all the time. |
| 391 | */ |
| 392 | for (i = 0; i < kvm->arch.vsie.page_count; i++) { |
| 393 | page = READ_ONCE(kvm->arch.vsie.pages[i]); |
| 394 | if (!page) |
| 395 | continue; |
| 396 | cur = page_to_virt(page); |
| 397 | if (READ_ONCE(cur->gmap) != gmap) |
| 398 | continue; |
| 399 | prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT; |
| 400 | /* with mso/msl, the prefix lies at an offset */ |
| 401 | prefix += cur->scb_s.mso; |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 402 | if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1) |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 403 | prefix_unmapped_sync(cur); |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | /* |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 408 | * Map the first prefix page and if tx is enabled also the second prefix page. |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 409 | * |
| 410 | * The prefix will be protected, a gmap notifier will inform about unmaps. |
| 411 | * The shadow scb must not be executed until the prefix is remapped, this is |
| 412 | * guaranteed by properly handling PROG_REQUEST. |
| 413 | * |
| 414 | * Returns: - 0 on if successfully mapped or already mapped |
| 415 | * - > 0 if control has to be given to guest 2 |
| 416 | * - -EAGAIN if the caller can retry immediately |
| 417 | * - -ENOMEM if out of memory |
| 418 | */ |
| 419 | static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 420 | { |
| 421 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 422 | u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT; |
| 423 | int rc; |
| 424 | |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 425 | if (prefix_is_mapped(vsie_page)) |
| 426 | return 0; |
| 427 | |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 428 | /* mark it as mapped so we can catch any concurrent unmappers */ |
| 429 | prefix_mapped(vsie_page); |
| 430 | |
| 431 | /* with mso/msl, the prefix lies at offset *mso* */ |
| 432 | prefix += scb_s->mso; |
| 433 | |
| 434 | rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix); |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 435 | if (!rc && (scb_s->ecb & ECB_TE)) |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 436 | rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, |
| 437 | prefix + PAGE_SIZE); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 438 | /* |
| 439 | * We don't have to mprotect, we will be called for all unshadows. |
| 440 | * SIE will detect if protection applies and trigger a validity. |
| 441 | */ |
| 442 | if (rc) |
| 443 | prefix_unmapped(vsie_page); |
| 444 | if (rc > 0 || rc == -EFAULT) |
| 445 | rc = set_validity_icpt(scb_s, 0x0037U); |
| 446 | return rc; |
| 447 | } |
| 448 | |
| 449 | /* |
| 450 | * Pin the guest page given by gpa and set hpa to the pinned host address. |
| 451 | * Will always be pinned writable. |
| 452 | * |
| 453 | * Returns: - 0 on success |
| 454 | * - -EINVAL if the gpa is not valid guest storage |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 455 | */ |
| 456 | static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) |
| 457 | { |
| 458 | struct page *page; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 459 | |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 460 | page = gfn_to_page(kvm, gpa_to_gfn(gpa)); |
| 461 | if (is_error_page(page)) |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 462 | return -EINVAL; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 463 | *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK); |
| 464 | return 0; |
| 465 | } |
| 466 | |
| 467 | /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */ |
| 468 | static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) |
| 469 | { |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 470 | kvm_release_pfn_dirty(hpa >> PAGE_SHIFT); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 471 | /* mark the page always as dirty for migration */ |
| 472 | mark_page_dirty(kvm, gpa_to_gfn(gpa)); |
| 473 | } |
| 474 | |
| 475 | /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */ |
| 476 | static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 477 | { |
| 478 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
| 479 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 480 | hpa_t hpa; |
| 481 | gpa_t gpa; |
| 482 | |
| 483 | hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol; |
| 484 | if (hpa) { |
| 485 | gpa = scb_o->scaol & ~0xfUL; |
David Hildenbrand | 19c439b | 2015-11-25 11:02:26 +0100 | [diff] [blame] | 486 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) |
| 487 | gpa |= (u64) scb_o->scaoh << 32; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 488 | unpin_guest_page(vcpu->kvm, gpa, hpa); |
| 489 | scb_s->scaol = 0; |
| 490 | scb_s->scaoh = 0; |
| 491 | } |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 492 | |
| 493 | hpa = scb_s->itdba; |
| 494 | if (hpa) { |
| 495 | gpa = scb_o->itdba & ~0xffUL; |
| 496 | unpin_guest_page(vcpu->kvm, gpa, hpa); |
| 497 | scb_s->itdba = 0; |
| 498 | } |
David Hildenbrand | c9bc1ea | 2015-11-25 11:08:32 +0100 | [diff] [blame] | 499 | |
| 500 | hpa = scb_s->gvrd; |
| 501 | if (hpa) { |
| 502 | gpa = scb_o->gvrd & ~0x1ffUL; |
| 503 | unpin_guest_page(vcpu->kvm, gpa, hpa); |
| 504 | scb_s->gvrd = 0; |
| 505 | } |
David Hildenbrand | 588438c | 2016-01-26 12:51:06 +0100 | [diff] [blame] | 506 | |
| 507 | hpa = scb_s->riccbd; |
| 508 | if (hpa) { |
| 509 | gpa = scb_o->riccbd & ~0x3fUL; |
| 510 | unpin_guest_page(vcpu->kvm, gpa, hpa); |
| 511 | scb_s->riccbd = 0; |
| 512 | } |
Fan Zhang | 4e0b1ab | 2016-11-29 07:17:55 +0100 | [diff] [blame] | 513 | |
| 514 | hpa = scb_s->sdnxo; |
| 515 | if (hpa) { |
| 516 | gpa = scb_o->sdnxo; |
| 517 | unpin_guest_page(vcpu->kvm, gpa, hpa); |
| 518 | scb_s->sdnxo = 0; |
| 519 | } |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | /* |
| 523 | * Instead of shadowing some blocks, we can simply forward them because the |
| 524 | * addresses in the scb are 64 bit long. |
| 525 | * |
| 526 | * This works as long as the data lies in one page. If blocks ever exceed one |
| 527 | * page, we have to fall back to shadowing. |
| 528 | * |
| 529 | * As we reuse the sca, the vcpu pointers contained in it are invalid. We must |
| 530 | * therefore not enable any facilities that access these pointers (e.g. SIGPIF). |
| 531 | * |
| 532 | * Returns: - 0 if all blocks were pinned. |
| 533 | * - > 0 if control has to be given to guest 2 |
| 534 | * - -ENOMEM if out of memory |
| 535 | */ |
| 536 | static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 537 | { |
| 538 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
| 539 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 540 | hpa_t hpa; |
| 541 | gpa_t gpa; |
| 542 | int rc = 0; |
| 543 | |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 544 | gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; |
David Hildenbrand | 19c439b | 2015-11-25 11:02:26 +0100 | [diff] [blame] | 545 | if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 546 | gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 547 | if (gpa) { |
| 548 | if (!(gpa & ~0x1fffUL)) |
| 549 | rc = set_validity_icpt(scb_s, 0x0038U); |
| 550 | else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu)) |
| 551 | rc = set_validity_icpt(scb_s, 0x0011U); |
| 552 | else if ((gpa & PAGE_MASK) != |
| 553 | ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK)) |
| 554 | rc = set_validity_icpt(scb_s, 0x003bU); |
| 555 | if (!rc) { |
| 556 | rc = pin_guest_page(vcpu->kvm, gpa, &hpa); |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 557 | if (rc) |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 558 | rc = set_validity_icpt(scb_s, 0x0034U); |
| 559 | } |
| 560 | if (rc) |
| 561 | goto unpin; |
| 562 | scb_s->scaoh = (u32)((u64)hpa >> 32); |
| 563 | scb_s->scaol = (u32)(u64)hpa; |
| 564 | } |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 565 | |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 566 | gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 567 | if (gpa && (scb_s->ecb & ECB_TE)) { |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 568 | if (!(gpa & ~0x1fffU)) { |
| 569 | rc = set_validity_icpt(scb_s, 0x0080U); |
| 570 | goto unpin; |
| 571 | } |
| 572 | /* 256 bytes cannot cross page boundaries */ |
| 573 | rc = pin_guest_page(vcpu->kvm, gpa, &hpa); |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 574 | if (rc) { |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 575 | rc = set_validity_icpt(scb_s, 0x0080U); |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 576 | goto unpin; |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 577 | } |
David Hildenbrand | 166ecb3 | 2015-11-25 11:13:32 +0100 | [diff] [blame] | 578 | scb_s->itdba = hpa; |
| 579 | } |
David Hildenbrand | c9bc1ea | 2015-11-25 11:08:32 +0100 | [diff] [blame] | 580 | |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 581 | gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL; |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 582 | if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { |
David Hildenbrand | c9bc1ea | 2015-11-25 11:08:32 +0100 | [diff] [blame] | 583 | if (!(gpa & ~0x1fffUL)) { |
| 584 | rc = set_validity_icpt(scb_s, 0x1310U); |
| 585 | goto unpin; |
| 586 | } |
| 587 | /* |
| 588 | * 512 bytes vector registers cannot cross page boundaries |
| 589 | * if this block gets bigger, we have to shadow it. |
| 590 | */ |
| 591 | rc = pin_guest_page(vcpu->kvm, gpa, &hpa); |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 592 | if (rc) { |
David Hildenbrand | c9bc1ea | 2015-11-25 11:08:32 +0100 | [diff] [blame] | 593 | rc = set_validity_icpt(scb_s, 0x1310U); |
David Hildenbrand | c9bc1ea | 2015-11-25 11:08:32 +0100 | [diff] [blame] | 594 | goto unpin; |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 595 | } |
David Hildenbrand | c9bc1ea | 2015-11-25 11:08:32 +0100 | [diff] [blame] | 596 | scb_s->gvrd = hpa; |
| 597 | } |
David Hildenbrand | 588438c | 2016-01-26 12:51:06 +0100 | [diff] [blame] | 598 | |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 599 | gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL; |
David Hildenbrand | 0c9d868 | 2017-03-13 11:48:28 +0100 | [diff] [blame] | 600 | if (gpa && (scb_s->ecb3 & ECB3_RI)) { |
David Hildenbrand | 588438c | 2016-01-26 12:51:06 +0100 | [diff] [blame] | 601 | if (!(gpa & ~0x1fffUL)) { |
| 602 | rc = set_validity_icpt(scb_s, 0x0043U); |
| 603 | goto unpin; |
| 604 | } |
| 605 | /* 64 bytes cannot cross page boundaries */ |
| 606 | rc = pin_guest_page(vcpu->kvm, gpa, &hpa); |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 607 | if (rc) { |
David Hildenbrand | 588438c | 2016-01-26 12:51:06 +0100 | [diff] [blame] | 608 | rc = set_validity_icpt(scb_s, 0x0043U); |
David Hildenbrand | 588438c | 2016-01-26 12:51:06 +0100 | [diff] [blame] | 609 | goto unpin; |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 610 | } |
| 611 | /* Validity 0x0044 will be checked by SIE */ |
David Hildenbrand | 4d21cef3 | 2016-09-02 12:33:49 +0200 | [diff] [blame] | 612 | scb_s->riccbd = hpa; |
David Hildenbrand | 588438c | 2016-01-26 12:51:06 +0100 | [diff] [blame] | 613 | } |
Fan Zhang | 4e0b1ab | 2016-11-29 07:17:55 +0100 | [diff] [blame] | 614 | if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { |
| 615 | unsigned long sdnxc; |
| 616 | |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 617 | gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL; |
| 618 | sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL; |
Fan Zhang | 4e0b1ab | 2016-11-29 07:17:55 +0100 | [diff] [blame] | 619 | if (!gpa || !(gpa & ~0x1fffUL)) { |
| 620 | rc = set_validity_icpt(scb_s, 0x10b0U); |
| 621 | goto unpin; |
| 622 | } |
| 623 | if (sdnxc < 6 || sdnxc > 12) { |
| 624 | rc = set_validity_icpt(scb_s, 0x10b1U); |
| 625 | goto unpin; |
| 626 | } |
| 627 | if (gpa & ((1 << sdnxc) - 1)) { |
| 628 | rc = set_validity_icpt(scb_s, 0x10b2U); |
| 629 | goto unpin; |
| 630 | } |
| 631 | /* Due to alignment rules (checked above) this cannot |
| 632 | * cross page boundaries |
| 633 | */ |
| 634 | rc = pin_guest_page(vcpu->kvm, gpa, &hpa); |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 635 | if (rc) { |
Fan Zhang | 4e0b1ab | 2016-11-29 07:17:55 +0100 | [diff] [blame] | 636 | rc = set_validity_icpt(scb_s, 0x10b0U); |
Fan Zhang | 4e0b1ab | 2016-11-29 07:17:55 +0100 | [diff] [blame] | 637 | goto unpin; |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 638 | } |
Christian Borntraeger | fe722d1 | 2017-04-07 14:23:13 +0200 | [diff] [blame] | 639 | scb_s->sdnxo = hpa | sdnxc; |
Fan Zhang | 4e0b1ab | 2016-11-29 07:17:55 +0100 | [diff] [blame] | 640 | } |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 641 | return 0; |
| 642 | unpin: |
| 643 | unpin_blocks(vcpu, vsie_page); |
| 644 | return rc; |
| 645 | } |
| 646 | |
| 647 | /* unpin the scb provided by guest 2, marking it as dirty */ |
| 648 | static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, |
| 649 | gpa_t gpa) |
| 650 | { |
| 651 | hpa_t hpa = (hpa_t) vsie_page->scb_o; |
| 652 | |
| 653 | if (hpa) |
| 654 | unpin_guest_page(vcpu->kvm, gpa, hpa); |
| 655 | vsie_page->scb_o = NULL; |
| 656 | } |
| 657 | |
| 658 | /* |
| 659 | * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o. |
| 660 | * |
| 661 | * Returns: - 0 if the scb was pinned. |
| 662 | * - > 0 if control has to be given to guest 2 |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 663 | */ |
| 664 | static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, |
| 665 | gpa_t gpa) |
| 666 | { |
| 667 | hpa_t hpa; |
| 668 | int rc; |
| 669 | |
| 670 | rc = pin_guest_page(vcpu->kvm, gpa, &hpa); |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 671 | if (rc) { |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 672 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 673 | WARN_ON_ONCE(rc); |
| 674 | return 1; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 675 | } |
David Hildenbrand | f7a6509 | 2017-09-01 17:11:43 +0200 | [diff] [blame] | 676 | vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa; |
| 677 | return 0; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 678 | } |
| 679 | |
| 680 | /* |
| 681 | * Inject a fault into guest 2. |
| 682 | * |
| 683 | * Returns: - > 0 if control has to be given to guest 2 |
| 684 | * < 0 if an error occurred during injection. |
| 685 | */ |
| 686 | static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr, |
| 687 | bool write_flag) |
| 688 | { |
| 689 | struct kvm_s390_pgm_info pgm = { |
| 690 | .code = code, |
| 691 | .trans_exc_code = |
| 692 | /* 0-51: virtual address */ |
| 693 | (vaddr & 0xfffffffffffff000UL) | |
| 694 | /* 52-53: store / fetch */ |
| 695 | (((unsigned int) !write_flag) + 1) << 10, |
| 696 | /* 62-63: asce id (alway primary == 0) */ |
| 697 | .exc_access_id = 0, /* always primary */ |
| 698 | .op_access_id = 0, /* not MVPG */ |
| 699 | }; |
| 700 | int rc; |
| 701 | |
| 702 | if (code == PGM_PROTECTION) |
| 703 | pgm.trans_exc_code |= 0x4UL; |
| 704 | |
| 705 | rc = kvm_s390_inject_prog_irq(vcpu, &pgm); |
| 706 | return rc ? rc : 1; |
| 707 | } |
| 708 | |
| 709 | /* |
| 710 | * Handle a fault during vsie execution on a gmap shadow. |
| 711 | * |
| 712 | * Returns: - 0 if the fault was resolved |
| 713 | * - > 0 if control has to be given to guest 2 |
| 714 | * - < 0 if an error occurred |
| 715 | */ |
| 716 | static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 717 | { |
| 718 | int rc; |
| 719 | |
| 720 | if (current->thread.gmap_int_code == PGM_PROTECTION) |
| 721 | /* we can directly forward all protection exceptions */ |
| 722 | return inject_fault(vcpu, PGM_PROTECTION, |
| 723 | current->thread.gmap_addr, 1); |
| 724 | |
| 725 | rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, |
| 726 | current->thread.gmap_addr); |
| 727 | if (rc > 0) { |
| 728 | rc = inject_fault(vcpu, rc, |
| 729 | current->thread.gmap_addr, |
| 730 | current->thread.gmap_write_flag); |
David Hildenbrand | 1b7029b | 2015-07-08 13:25:31 +0200 | [diff] [blame] | 731 | if (rc >= 0) |
| 732 | vsie_page->fault_addr = current->thread.gmap_addr; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 733 | } |
| 734 | return rc; |
| 735 | } |
| 736 | |
David Hildenbrand | 1b7029b | 2015-07-08 13:25:31 +0200 | [diff] [blame] | 737 | /* |
| 738 | * Retry the previous fault that required guest 2 intervention. This avoids |
| 739 | * one superfluous SIE re-entry and direct exit. |
| 740 | * |
| 741 | * Will ignore any errors. The next SIE fault will do proper fault handling. |
| 742 | */ |
| 743 | static void handle_last_fault(struct kvm_vcpu *vcpu, |
| 744 | struct vsie_page *vsie_page) |
| 745 | { |
| 746 | if (vsie_page->fault_addr) |
| 747 | kvm_s390_shadow_fault(vcpu, vsie_page->gmap, |
| 748 | vsie_page->fault_addr); |
| 749 | vsie_page->fault_addr = 0; |
| 750 | } |
| 751 | |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 752 | static inline void clear_vsie_icpt(struct vsie_page *vsie_page) |
| 753 | { |
| 754 | vsie_page->scb_s.icptcode = 0; |
| 755 | } |
| 756 | |
David Hildenbrand | 66b630d | 2015-11-26 14:11:19 +0100 | [diff] [blame] | 757 | /* rewind the psw and clear the vsie icpt, so we can retry execution */ |
| 758 | static void retry_vsie_icpt(struct vsie_page *vsie_page) |
| 759 | { |
| 760 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 761 | int ilen = insn_length(scb_s->ipa >> 8); |
| 762 | |
| 763 | /* take care of EXECUTE instructions */ |
| 764 | if (scb_s->icptstatus & 1) { |
| 765 | ilen = (scb_s->icptstatus >> 4) & 0x6; |
| 766 | if (!ilen) |
| 767 | ilen = 4; |
| 768 | } |
| 769 | scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen); |
| 770 | clear_vsie_icpt(vsie_page); |
| 771 | } |
| 772 | |
| 773 | /* |
| 774 | * Try to shadow + enable the guest 2 provided facility list. |
| 775 | * Retry instruction execution if enabled for and provided by guest 2. |
| 776 | * |
| 777 | * Returns: - 0 if handled (retry or guest 2 icpt) |
| 778 | * - > 0 if control has to be given to guest 2 |
| 779 | */ |
| 780 | static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 781 | { |
| 782 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
David Hildenbrand | b3ecd4a | 2018-01-16 18:15:25 +0100 | [diff] [blame^] | 783 | __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U; |
David Hildenbrand | 66b630d | 2015-11-26 14:11:19 +0100 | [diff] [blame] | 784 | |
| 785 | if (fac && test_kvm_facility(vcpu->kvm, 7)) { |
| 786 | retry_vsie_icpt(vsie_page); |
| 787 | if (read_guest_real(vcpu, fac, &vsie_page->fac, |
| 788 | sizeof(vsie_page->fac))) |
| 789 | return set_validity_icpt(scb_s, 0x1090U); |
| 790 | scb_s->fac = (__u32)(__u64) &vsie_page->fac; |
| 791 | } |
| 792 | return 0; |
| 793 | } |
| 794 | |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 795 | /* |
| 796 | * Run the vsie on a shadow scb and a shadow gmap, without any further |
| 797 | * sanity checks, handling SIE faults. |
| 798 | * |
| 799 | * Returns: - 0 everything went fine |
| 800 | * - > 0 if control has to be given to guest 2 |
| 801 | * - < 0 if an error occurred |
| 802 | */ |
| 803 | static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 804 | { |
| 805 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 806 | struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; |
| 807 | int rc; |
| 808 | |
David Hildenbrand | 1b7029b | 2015-07-08 13:25:31 +0200 | [diff] [blame] | 809 | handle_last_fault(vcpu, vsie_page); |
| 810 | |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 811 | if (need_resched()) |
| 812 | schedule(); |
| 813 | if (test_cpu_flag(CIF_MCCK_PENDING)) |
| 814 | s390_handle_mcck(); |
| 815 | |
| 816 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
| 817 | local_irq_disable(); |
Paolo Bonzini | 6edaa53 | 2016-06-15 15:18:26 +0200 | [diff] [blame] | 818 | guest_enter_irqoff(); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 819 | local_irq_enable(); |
| 820 | |
| 821 | rc = sie64a(scb_s, vcpu->run->s.regs.gprs); |
| 822 | |
| 823 | local_irq_disable(); |
Paolo Bonzini | 6edaa53 | 2016-06-15 15:18:26 +0200 | [diff] [blame] | 824 | guest_exit_irqoff(); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 825 | local_irq_enable(); |
| 826 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 827 | |
QingFeng Hao | d52cd20 | 2017-06-07 12:11:18 +0200 | [diff] [blame] | 828 | if (rc == -EINTR) { |
| 829 | VCPU_EVENT(vcpu, 3, "%s", "machine check"); |
David Hildenbrand | c95c895 | 2017-08-30 18:06:02 +0200 | [diff] [blame] | 830 | kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info); |
QingFeng Hao | d52cd20 | 2017-06-07 12:11:18 +0200 | [diff] [blame] | 831 | return 0; |
| 832 | } |
| 833 | |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 834 | if (rc > 0) |
| 835 | rc = 0; /* we could still have an icpt */ |
| 836 | else if (rc == -EFAULT) |
| 837 | return handle_fault(vcpu, vsie_page); |
| 838 | |
| 839 | switch (scb_s->icptcode) { |
David Hildenbrand | 66b630d | 2015-11-26 14:11:19 +0100 | [diff] [blame] | 840 | case ICPT_INST: |
| 841 | if (scb_s->ipa == 0xb2b0) |
| 842 | rc = handle_stfle(vcpu, vsie_page); |
| 843 | break; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 844 | case ICPT_STOP: |
| 845 | /* stop not requested by g2 - must have been a kick */ |
| 846 | if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT)) |
| 847 | clear_vsie_icpt(vsie_page); |
| 848 | break; |
| 849 | case ICPT_VALIDITY: |
| 850 | if ((scb_s->ipa & 0xf000) != 0xf000) |
| 851 | scb_s->ipa += 0x1000; |
| 852 | break; |
| 853 | } |
| 854 | return rc; |
| 855 | } |
| 856 | |
| 857 | static void release_gmap_shadow(struct vsie_page *vsie_page) |
| 858 | { |
| 859 | if (vsie_page->gmap) |
| 860 | gmap_put(vsie_page->gmap); |
| 861 | WRITE_ONCE(vsie_page->gmap, NULL); |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 862 | prefix_unmapped(vsie_page); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 863 | } |
| 864 | |
| 865 | static int acquire_gmap_shadow(struct kvm_vcpu *vcpu, |
| 866 | struct vsie_page *vsie_page) |
| 867 | { |
| 868 | unsigned long asce; |
| 869 | union ctlreg0 cr0; |
| 870 | struct gmap *gmap; |
| 871 | int edat; |
| 872 | |
| 873 | asce = vcpu->arch.sie_block->gcr[1]; |
| 874 | cr0.val = vcpu->arch.sie_block->gcr[0]; |
| 875 | edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8); |
| 876 | edat += edat && test_kvm_facility(vcpu->kvm, 78); |
| 877 | |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 878 | /* |
| 879 | * ASCE or EDAT could have changed since last icpt, or the gmap |
| 880 | * we're holding has been unshadowed. If the gmap is still valid, |
| 881 | * we can safely reuse it. |
| 882 | */ |
| 883 | if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) |
| 884 | return 0; |
| 885 | |
| 886 | /* release the old shadow - if any, and mark the prefix as unmapped */ |
| 887 | release_gmap_shadow(vsie_page); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 888 | gmap = gmap_shadow(vcpu->arch.gmap, asce, edat); |
| 889 | if (IS_ERR(gmap)) |
| 890 | return PTR_ERR(gmap); |
| 891 | gmap->private = vcpu->kvm; |
| 892 | WRITE_ONCE(vsie_page->gmap, gmap); |
| 893 | return 0; |
| 894 | } |
| 895 | |
| 896 | /* |
David Hildenbrand | adbf169 | 2016-05-27 22:03:52 +0200 | [diff] [blame] | 897 | * Register the shadow scb at the VCPU, e.g. for kicking out of vsie. |
| 898 | */ |
| 899 | static void register_shadow_scb(struct kvm_vcpu *vcpu, |
| 900 | struct vsie_page *vsie_page) |
| 901 | { |
David Hildenbrand | 91473b4 | 2015-10-29 10:30:36 +0100 | [diff] [blame] | 902 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 903 | |
David Hildenbrand | adbf169 | 2016-05-27 22:03:52 +0200 | [diff] [blame] | 904 | WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s); |
David Hildenbrand | b917ae5 | 2015-07-07 20:39:35 +0200 | [diff] [blame] | 905 | /* |
| 906 | * External calls have to lead to a kick of the vcpu and |
| 907 | * therefore the vsie -> Simulate Wait state. |
| 908 | */ |
| 909 | atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
David Hildenbrand | 91473b4 | 2015-10-29 10:30:36 +0100 | [diff] [blame] | 910 | /* |
| 911 | * We have to adjust the g3 epoch by the g2 epoch. The epoch will |
| 912 | * automatically be adjusted on tod clock changes via kvm_sync_clock. |
| 913 | */ |
| 914 | preempt_disable(); |
| 915 | scb_s->epoch += vcpu->kvm->arch.epoch; |
Collin L. Walling | 8fa1696 | 2016-07-26 15:29:44 -0400 | [diff] [blame] | 916 | |
| 917 | if (scb_s->ecd & ECD_MEF) { |
| 918 | scb_s->epdx += vcpu->kvm->arch.epdx; |
| 919 | if (scb_s->epoch < vcpu->kvm->arch.epoch) |
| 920 | scb_s->epdx += 1; |
| 921 | } |
| 922 | |
David Hildenbrand | 91473b4 | 2015-10-29 10:30:36 +0100 | [diff] [blame] | 923 | preempt_enable(); |
David Hildenbrand | adbf169 | 2016-05-27 22:03:52 +0200 | [diff] [blame] | 924 | } |
| 925 | |
| 926 | /* |
| 927 | * Unregister a shadow scb from a VCPU. |
| 928 | */ |
| 929 | static void unregister_shadow_scb(struct kvm_vcpu *vcpu) |
| 930 | { |
David Hildenbrand | b917ae5 | 2015-07-07 20:39:35 +0200 | [diff] [blame] | 931 | atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
David Hildenbrand | adbf169 | 2016-05-27 22:03:52 +0200 | [diff] [blame] | 932 | WRITE_ONCE(vcpu->arch.vsie_block, NULL); |
| 933 | } |
| 934 | |
| 935 | /* |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 936 | * Run the vsie on a shadowed scb, managing the gmap shadow, handling |
| 937 | * prefix pages and faults. |
| 938 | * |
| 939 | * Returns: - 0 if no errors occurred |
| 940 | * - > 0 if control has to be given to guest 2 |
| 941 | * - -ENOMEM if out of memory |
| 942 | */ |
| 943 | static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) |
| 944 | { |
| 945 | struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; |
| 946 | int rc = 0; |
| 947 | |
| 948 | while (1) { |
| 949 | rc = acquire_gmap_shadow(vcpu, vsie_page); |
| 950 | if (!rc) |
| 951 | rc = map_prefix(vcpu, vsie_page); |
| 952 | if (!rc) { |
| 953 | gmap_enable(vsie_page->gmap); |
| 954 | update_intervention_requests(vsie_page); |
| 955 | rc = do_vsie_run(vcpu, vsie_page); |
| 956 | gmap_enable(vcpu->arch.gmap); |
| 957 | } |
David Hildenbrand | adbf169 | 2016-05-27 22:03:52 +0200 | [diff] [blame] | 958 | atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 959 | |
| 960 | if (rc == -EAGAIN) |
| 961 | rc = 0; |
| 962 | if (rc || scb_s->icptcode || signal_pending(current) || |
| 963 | kvm_s390_vcpu_has_irq(vcpu, 0)) |
| 964 | break; |
Heiko Carstens | 0b92515 | 2017-01-02 08:51:02 +0100 | [diff] [blame] | 965 | } |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 966 | |
| 967 | if (rc == -EFAULT) { |
| 968 | /* |
| 969 | * Addressing exceptions are always presentes as intercepts. |
| 970 | * As addressing exceptions are suppressing and our guest 3 PSW |
| 971 | * points at the responsible instruction, we have to |
| 972 | * forward the PSW and set the ilc. If we can't read guest 3 |
| 973 | * instruction, we can use an arbitrary ilc. Let's always use |
| 974 | * ilen = 4 for now, so we can avoid reading in guest 3 virtual |
| 975 | * memory. (we could also fake the shadow so the hardware |
| 976 | * handles it). |
| 977 | */ |
| 978 | scb_s->icptcode = ICPT_PROGI; |
| 979 | scb_s->iprcc = PGM_ADDRESSING; |
| 980 | scb_s->pgmilc = 4; |
| 981 | scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4); |
| 982 | } |
| 983 | return rc; |
| 984 | } |
| 985 | |
| 986 | /* |
| 987 | * Get or create a vsie page for a scb address. |
| 988 | * |
| 989 | * Returns: - address of a vsie page (cached or new one) |
| 990 | * - NULL if the same scb address is already used by another VCPU |
| 991 | * - ERR_PTR(-ENOMEM) if out of memory |
| 992 | */ |
| 993 | static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr) |
| 994 | { |
| 995 | struct vsie_page *vsie_page; |
| 996 | struct page *page; |
| 997 | int nr_vcpus; |
| 998 | |
| 999 | rcu_read_lock(); |
| 1000 | page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9); |
| 1001 | rcu_read_unlock(); |
| 1002 | if (page) { |
| 1003 | if (page_ref_inc_return(page) == 2) |
| 1004 | return page_to_virt(page); |
| 1005 | page_ref_dec(page); |
| 1006 | } |
| 1007 | |
| 1008 | /* |
| 1009 | * We want at least #online_vcpus shadows, so every VCPU can execute |
| 1010 | * the VSIE in parallel. |
| 1011 | */ |
| 1012 | nr_vcpus = atomic_read(&kvm->online_vcpus); |
| 1013 | |
| 1014 | mutex_lock(&kvm->arch.vsie.mutex); |
| 1015 | if (kvm->arch.vsie.page_count < nr_vcpus) { |
David Hildenbrand | 66b630d | 2015-11-26 14:11:19 +0100 | [diff] [blame] | 1016 | page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 1017 | if (!page) { |
| 1018 | mutex_unlock(&kvm->arch.vsie.mutex); |
| 1019 | return ERR_PTR(-ENOMEM); |
| 1020 | } |
| 1021 | page_ref_inc(page); |
| 1022 | kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page; |
| 1023 | kvm->arch.vsie.page_count++; |
| 1024 | } else { |
| 1025 | /* reuse an existing entry that belongs to nobody */ |
| 1026 | while (true) { |
| 1027 | page = kvm->arch.vsie.pages[kvm->arch.vsie.next]; |
| 1028 | if (page_ref_inc_return(page) == 2) |
| 1029 | break; |
| 1030 | page_ref_dec(page); |
| 1031 | kvm->arch.vsie.next++; |
| 1032 | kvm->arch.vsie.next %= nr_vcpus; |
| 1033 | } |
| 1034 | radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); |
| 1035 | } |
| 1036 | page->index = addr; |
| 1037 | /* double use of the same address */ |
| 1038 | if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) { |
| 1039 | page_ref_dec(page); |
| 1040 | mutex_unlock(&kvm->arch.vsie.mutex); |
| 1041 | return NULL; |
| 1042 | } |
| 1043 | mutex_unlock(&kvm->arch.vsie.mutex); |
| 1044 | |
| 1045 | vsie_page = page_to_virt(page); |
| 1046 | memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block)); |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 1047 | release_gmap_shadow(vsie_page); |
David Hildenbrand | 1b7029b | 2015-07-08 13:25:31 +0200 | [diff] [blame] | 1048 | vsie_page->fault_addr = 0; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 1049 | vsie_page->scb_s.ihcpu = 0xffffU; |
| 1050 | return vsie_page; |
| 1051 | } |
| 1052 | |
| 1053 | /* put a vsie page acquired via get_vsie_page */ |
| 1054 | static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page) |
| 1055 | { |
| 1056 | struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT); |
| 1057 | |
| 1058 | page_ref_dec(page); |
| 1059 | } |
| 1060 | |
| 1061 | int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu) |
| 1062 | { |
| 1063 | struct vsie_page *vsie_page; |
| 1064 | unsigned long scb_addr; |
| 1065 | int rc; |
| 1066 | |
| 1067 | vcpu->stat.instruction_sie++; |
| 1068 | if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2)) |
| 1069 | return -EOPNOTSUPP; |
| 1070 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
| 1071 | return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); |
| 1072 | |
Heiko Carstens | 58cdf5e | 2017-07-05 07:37:14 +0200 | [diff] [blame] | 1073 | BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 1074 | scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL); |
| 1075 | |
| 1076 | /* 512 byte alignment */ |
| 1077 | if (unlikely(scb_addr & 0x1ffUL)) |
| 1078 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
| 1079 | |
| 1080 | if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0)) |
| 1081 | return 0; |
| 1082 | |
| 1083 | vsie_page = get_vsie_page(vcpu->kvm, scb_addr); |
| 1084 | if (IS_ERR(vsie_page)) |
| 1085 | return PTR_ERR(vsie_page); |
| 1086 | else if (!vsie_page) |
| 1087 | /* double use of sie control block - simply do nothing */ |
| 1088 | return 0; |
| 1089 | |
| 1090 | rc = pin_scb(vcpu, vsie_page, scb_addr); |
| 1091 | if (rc) |
| 1092 | goto out_put; |
| 1093 | rc = shadow_scb(vcpu, vsie_page); |
| 1094 | if (rc) |
| 1095 | goto out_unpin_scb; |
| 1096 | rc = pin_blocks(vcpu, vsie_page); |
| 1097 | if (rc) |
| 1098 | goto out_unshadow; |
David Hildenbrand | adbf169 | 2016-05-27 22:03:52 +0200 | [diff] [blame] | 1099 | register_shadow_scb(vcpu, vsie_page); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 1100 | rc = vsie_run(vcpu, vsie_page); |
David Hildenbrand | adbf169 | 2016-05-27 22:03:52 +0200 | [diff] [blame] | 1101 | unregister_shadow_scb(vcpu); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 1102 | unpin_blocks(vcpu, vsie_page); |
| 1103 | out_unshadow: |
| 1104 | unshadow_scb(vcpu, vsie_page); |
| 1105 | out_unpin_scb: |
| 1106 | unpin_scb(vcpu, vsie_page, scb_addr); |
| 1107 | out_put: |
| 1108 | put_vsie_page(vcpu->kvm, vsie_page); |
| 1109 | |
| 1110 | return rc < 0 ? rc : 0; |
| 1111 | } |
| 1112 | |
| 1113 | /* Init the vsie data structures. To be called when a vm is initialized. */ |
| 1114 | void kvm_s390_vsie_init(struct kvm *kvm) |
| 1115 | { |
| 1116 | mutex_init(&kvm->arch.vsie.mutex); |
| 1117 | INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL); |
| 1118 | } |
| 1119 | |
| 1120 | /* Destroy the vsie data structures. To be called when a vm is destroyed. */ |
| 1121 | void kvm_s390_vsie_destroy(struct kvm *kvm) |
| 1122 | { |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 1123 | struct vsie_page *vsie_page; |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 1124 | struct page *page; |
| 1125 | int i; |
| 1126 | |
| 1127 | mutex_lock(&kvm->arch.vsie.mutex); |
| 1128 | for (i = 0; i < kvm->arch.vsie.page_count; i++) { |
| 1129 | page = kvm->arch.vsie.pages[i]; |
| 1130 | kvm->arch.vsie.pages[i] = NULL; |
David Hildenbrand | 06d68a6 | 2016-04-22 13:50:09 +0200 | [diff] [blame] | 1131 | vsie_page = page_to_virt(page); |
| 1132 | release_gmap_shadow(vsie_page); |
David Hildenbrand | a3508fb | 2015-07-08 13:19:48 +0200 | [diff] [blame] | 1133 | /* free the radix tree entry */ |
| 1134 | radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); |
| 1135 | __free_page(page); |
| 1136 | } |
| 1137 | kvm->arch.vsie.page_count = 0; |
| 1138 | mutex_unlock(&kvm->arch.vsie.mutex); |
| 1139 | } |
David Hildenbrand | adbf169 | 2016-05-27 22:03:52 +0200 | [diff] [blame] | 1140 | |
| 1141 | void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu) |
| 1142 | { |
| 1143 | struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block); |
| 1144 | |
| 1145 | /* |
| 1146 | * Even if the VCPU lets go of the shadow sie block reference, it is |
| 1147 | * still valid in the cache. So we can safely kick it. |
| 1148 | */ |
| 1149 | if (scb) { |
| 1150 | atomic_or(PROG_BLOCK_SIE, &scb->prog20); |
| 1151 | if (scb->prog0c & PROG_IN_SIE) |
| 1152 | atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags); |
| 1153 | } |
| 1154 | } |