blob: bde04a6191caf0b43f1fe870404458cc3740e948 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Carsten Otteba5c1e92008-03-25 18:47:26 +01002/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * handling kvm guest interrupts
Carsten Otteba5c1e92008-03-25 18:47:26 +01004 *
Thomas Huth33b412a2015-02-11 10:38:46 +01005 * Copyright IBM Corp. 2008, 2015
Carsten Otteba5c1e92008-03-25 18:47:26 +01006 *
Carsten Otteba5c1e92008-03-25 18:47:26 +01007 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 */
9
Christian Borntraegerca872302009-05-12 17:21:49 +020010#include <linux/interrupt.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010011#include <linux/kvm_host.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010012#include <linux/hrtimer.h>
Cornelia Huck84223592013-07-15 13:36:01 +020013#include <linux/mmu_context.h>
Christian Borntraeger3cd61292008-07-25 15:51:54 +020014#include <linux/signal.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Jens Freimann383d0b02014-07-29 15:11:49 +020016#include <linux/bitmap.h>
Jens Freimann94aa0332015-03-16 12:17:13 +010017#include <linux/vmalloc.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010018#include <asm/asm-offsets.h>
Thomas Huth33b412a2015-02-11 10:38:46 +010019#include <asm/dis.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080020#include <linux/uaccess.h>
David Hildenbrandea5f4962014-10-14 15:29:30 +020021#include <asm/sclp.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020022#include <asm/isc.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010023#include <asm/gmap.h>
David Hildenbrand0319dae2016-08-03 11:18:57 +020024#include <asm/switch_to.h>
David Hildenbrandff5dc142015-10-14 16:57:56 +020025#include <asm/nmi.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include "kvm-s390.h"
27#include "gaccess.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020028#include "trace-s390.h"
Carsten Otteba5c1e92008-03-25 18:47:26 +010029
Jens Freimann44c6ca32014-04-16 13:57:18 +020030#define PFAULT_INIT 0x0600
Jens Freimann60f90a12014-11-10 17:20:07 +010031#define PFAULT_DONE 0x0680
32#define VIRTIO_PARAM 0x0d00
Cornelia Huckd8346b72012-12-20 15:32:08 +010033
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020034/* handle external calls via sigp interpretation facility */
35static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
36{
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020037 int c, scn;
38
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +010039 if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
David Hildenbrand2c1bb2b2015-09-23 09:45:50 +020040 return 0;
41
David Hildenbranda6940672016-08-08 22:39:32 +020042 BUG_ON(!kvm_s390_use_sca_entries());
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +020043 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020044 if (vcpu->kvm->arch.use_esca) {
45 struct esca_block *sca = vcpu->kvm->arch.sca;
46 union esca_sigp_ctrl sigp_ctrl =
47 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
48
49 c = sigp_ctrl.c;
50 scn = sigp_ctrl.scn;
51 } else {
52 struct bsca_block *sca = vcpu->kvm->arch.sca;
53 union bsca_sigp_ctrl sigp_ctrl =
54 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
55
56 c = sigp_ctrl.c;
57 scn = sigp_ctrl.scn;
58 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +020059 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020060
61 if (src_id)
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020062 *src_id = scn;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020063
David Hildenbrand2c1bb2b2015-09-23 09:45:50 +020064 return c;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020065}
66
67static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
68{
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +020069 int expect, rc;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020070
David Hildenbranda6940672016-08-08 22:39:32 +020071 BUG_ON(!kvm_s390_use_sca_entries());
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +020072 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020073 if (vcpu->kvm->arch.use_esca) {
74 struct esca_block *sca = vcpu->kvm->arch.sca;
75 union esca_sigp_ctrl *sigp_ctrl =
76 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
77 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +020078
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020079 new_val.scn = src_id;
80 new_val.c = 1;
81 old_val.c = 0;
82
83 expect = old_val.value;
84 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
85 } else {
86 struct bsca_block *sca = vcpu->kvm->arch.sca;
87 union bsca_sigp_ctrl *sigp_ctrl =
88 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
89 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
90
91 new_val.scn = src_id;
92 new_val.c = 1;
93 old_val.c = 0;
94
95 expect = old_val.value;
96 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
97 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +020098 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +020099
100 if (rc != expect) {
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +0200101 /* another external call is pending */
102 return -EBUSY;
103 }
David Hildenbrandef8f4f42018-01-23 18:05:29 +0100104 kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +0200105 return 0;
106}
107
108static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
109{
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +0200110 int rc, expect;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +0200111
David Hildenbranda6940672016-08-08 22:39:32 +0200112 if (!kvm_s390_use_sca_entries())
113 return;
David Hildenbrand9daecfc2018-01-23 18:05:30 +0100114 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +0200115 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +0200116 if (vcpu->kvm->arch.use_esca) {
117 struct esca_block *sca = vcpu->kvm->arch.sca;
118 union esca_sigp_ctrl *sigp_ctrl =
119 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
120 union esca_sigp_ctrl old = *sigp_ctrl;
121
122 expect = old.value;
123 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
124 } else {
125 struct bsca_block *sca = vcpu->kvm->arch.sca;
126 union bsca_sigp_ctrl *sigp_ctrl =
127 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
128 union bsca_sigp_ctrl old = *sigp_ctrl;
129
130 expect = old.value;
131 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
132 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +0200133 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +0200134 WARN_ON(rc != expect); /* cannot clear? */
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +0200135}
136
Dominik Dingel3c038e62013-10-07 17:11:48 +0200137int psw_extint_disabled(struct kvm_vcpu *vcpu)
Carsten Otteba5c1e92008-03-25 18:47:26 +0100138{
139 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
140}
141
Cornelia Huckd8346b72012-12-20 15:32:08 +0100142static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
143{
144 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
145}
146
Cornelia Huck48a3e952012-12-20 15:32:09 +0100147static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
148{
149 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
150}
151
Carsten Otteba5c1e92008-03-25 18:47:26 +0100152static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
153{
David Hildenbrandfee0e0f2015-09-28 13:32:38 +0200154 return psw_extint_disabled(vcpu) &&
155 psw_ioint_disabled(vcpu) &&
156 psw_mchk_disabled(vcpu);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100157}
158
David Hildenbrandbb78c5e2014-03-18 10:03:26 +0100159static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
160{
161 if (psw_extint_disabled(vcpu) ||
162 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
163 return 0;
David Hildenbrandf71d0dc2014-03-18 10:06:14 +0100164 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
165 /* No timer interrupts when single stepping */
166 return 0;
David Hildenbrandbb78c5e2014-03-18 10:03:26 +0100167 return 1;
168}
169
David Hildenbrandb4aec922014-12-01 15:55:42 +0100170static int ckc_irq_pending(struct kvm_vcpu *vcpu)
171{
David Hildenbrand5fe01792018-02-07 12:46:42 +0100172 const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
173 const u64 ckc = vcpu->arch.sie_block->ckc;
174
175 if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
176 if ((s64)ckc >= (s64)now)
177 return 0;
178 } else if (ckc >= now) {
David Hildenbrandb4aec922014-12-01 15:55:42 +0100179 return 0;
David Hildenbrand5fe01792018-02-07 12:46:42 +0100180 }
David Hildenbrandb4aec922014-12-01 15:55:42 +0100181 return ckc_interrupts_enabled(vcpu);
182}
183
184static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
185{
186 return !psw_extint_disabled(vcpu) &&
187 (vcpu->arch.sie_block->gcr[0] & 0x400ul);
188}
189
190static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
191{
David Hildenbrand4287f242016-02-15 09:40:12 +0100192 if (!cpu_timer_interrupts_enabled(vcpu))
193 return 0;
194 return kvm_s390_get_cpu_timer(vcpu) >> 63;
David Hildenbrandb4aec922014-12-01 15:55:42 +0100195}
196
Jens Freimann6d3da242013-07-03 15:18:35 +0200197static uint64_t isc_to_isc_bits(int isc)
198{
Cornelia Huck79fd50c2013-02-07 13:20:52 +0100199 return (0x80 >> isc) << 24;
200}
201
Michael Mueller2496c8e2017-08-31 11:10:28 +0200202static inline u32 isc_to_int_word(u8 isc)
203{
204 return ((u32)isc << 27) | 0x80000000;
205}
206
Jens Freimann6d3da242013-07-03 15:18:35 +0200207static inline u8 int_word_to_isc(u32 int_word)
Carsten Otteba5c1e92008-03-25 18:47:26 +0100208{
Jens Freimann6d3da242013-07-03 15:18:35 +0200209 return (int_word & 0x38000000) >> 27;
210}
211
Michael Muellerd77e6412017-06-12 12:37:57 +0200212/*
213 * To use atomic bitmap functions, we have to provide a bitmap address
214 * that is u64 aligned. However, the ipm might be u32 aligned.
215 * Therefore, we logically start the bitmap at the very beginning of the
216 * struct and fixup the bit number.
217 */
218#define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
219
220static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
221{
222 set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
223}
224
225static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa)
226{
227 return READ_ONCE(gisa->ipm);
228}
229
230static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
231{
232 clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
233}
234
235static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
236{
237 return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
238}
239
Christian Borntraeger8846f312018-02-12 12:33:39 +0000240static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
Jens Freimann6d3da242013-07-03 15:18:35 +0200241{
David Hildenbrand5f94c582015-09-28 14:27:51 +0200242 return vcpu->kvm->arch.float_int.pending_irqs |
Christian Borntraeger8846f312018-02-12 12:33:39 +0000243 vcpu->arch.local_int.pending_irqs;
244}
245
246static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
247{
248 return pending_irqs_no_gisa(vcpu) |
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200249 kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
Jens Freimann383d0b02014-07-29 15:11:49 +0200250}
251
Michael Muelleree739f42017-07-03 15:32:50 +0200252static inline int isc_to_irq_type(unsigned long isc)
253{
Michael Muellerc7901a62017-06-29 18:39:27 +0200254 return IRQ_PEND_IO_ISC_0 - isc;
Michael Muelleree739f42017-07-03 15:32:50 +0200255}
256
257static inline int irq_type_to_isc(unsigned long irq_type)
258{
Michael Muellerc7901a62017-06-29 18:39:27 +0200259 return IRQ_PEND_IO_ISC_0 - irq_type;
Michael Muelleree739f42017-07-03 15:32:50 +0200260}
261
Jens Freimann6d3da242013-07-03 15:18:35 +0200262static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
263 unsigned long active_mask)
Jens Freimann383d0b02014-07-29 15:11:49 +0200264{
Jens Freimann6d3da242013-07-03 15:18:35 +0200265 int i;
266
267 for (i = 0; i <= MAX_ISC; i++)
268 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
Michael Muelleree739f42017-07-03 15:32:50 +0200269 active_mask &= ~(1UL << (isc_to_irq_type(i)));
Jens Freimann6d3da242013-07-03 15:18:35 +0200270
271 return active_mask;
272}
273
274static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
275{
276 unsigned long active_mask;
277
David Hildenbrand5f94c582015-09-28 14:27:51 +0200278 active_mask = pending_irqs(vcpu);
Jens Freimannffeca0a2015-04-17 10:21:04 +0200279 if (!active_mask)
280 return 0;
Jens Freimann383d0b02014-07-29 15:11:49 +0200281
282 if (psw_extint_disabled(vcpu))
283 active_mask &= ~IRQ_PEND_EXT_MASK;
Jens Freimann6d3da242013-07-03 15:18:35 +0200284 if (psw_ioint_disabled(vcpu))
285 active_mask &= ~IRQ_PEND_IO_MASK;
286 else
287 active_mask = disable_iscs(vcpu, active_mask);
Jens Freimann383d0b02014-07-29 15:11:49 +0200288 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
289 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
290 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
291 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
292 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
293 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
294 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
295 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
Jens Freimann6d3da242013-07-03 15:18:35 +0200296 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
297 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
Jens Freimann383d0b02014-07-29 15:11:49 +0200298 if (psw_mchk_disabled(vcpu))
299 active_mask &= ~IRQ_PEND_MCHK_MASK;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +0200300 /*
301 * Check both floating and local interrupt's cr14 because
302 * bit IRQ_PEND_MCHK_REP could be set in both cases.
303 */
Jens Freimann6d3da242013-07-03 15:18:35 +0200304 if (!(vcpu->arch.sie_block->gcr[14] &
QingFeng Hao4d62fcc2017-06-07 12:03:05 +0200305 (vcpu->kvm->arch.float_int.mchk.cr14 |
306 vcpu->arch.local_int.irq.mchk.cr14)))
Jens Freimann6d3da242013-07-03 15:18:35 +0200307 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
Jens Freimann383d0b02014-07-29 15:11:49 +0200308
David Hildenbrand6cddd432014-10-15 16:48:53 +0200309 /*
310 * STOP irqs will never be actively delivered. They are triggered via
311 * intercept requests and cleared when the stop intercept is performed.
312 */
313 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
314
Jens Freimann383d0b02014-07-29 15:11:49 +0200315 return active_mask;
316}
317
Carsten Otteba5c1e92008-03-25 18:47:26 +0100318static void __set_cpu_idle(struct kvm_vcpu *vcpu)
319{
David Hildenbrandef8f4f42018-01-23 18:05:29 +0100320 kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
David Hildenbranda9f6c9a2018-01-08 20:37:47 +0100321 set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100322}
323
324static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
325{
David Hildenbrand9daecfc2018-01-23 18:05:30 +0100326 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
David Hildenbranda9f6c9a2018-01-08 20:37:47 +0100327 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100328}
329
330static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
331{
David Hildenbrand9daecfc2018-01-23 18:05:30 +0100332 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
333 CPUSTAT_STOP_INT);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100334 vcpu->arch.sie_block->lctl = 0x0000;
David Hildenbrand27291e22014-01-23 12:26:52 +0100335 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
336
337 if (guestdbg_enabled(vcpu)) {
338 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
339 LCTL_CR10 | LCTL_CR11);
340 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
341 }
Carsten Otteba5c1e92008-03-25 18:47:26 +0100342}
343
Jens Freimann6d3da242013-07-03 15:18:35 +0200344static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
345{
Christian Borntraeger8846f312018-02-12 12:33:39 +0000346 if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
Jens Freimann6d3da242013-07-03 15:18:35 +0200347 return;
348 else if (psw_ioint_disabled(vcpu))
David Hildenbrand20182242018-01-23 18:05:28 +0100349 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
Jens Freimann6d3da242013-07-03 15:18:35 +0200350 else
351 vcpu->arch.sie_block->lctl |= LCTL_CR6;
352}
353
Jens Freimann383d0b02014-07-29 15:11:49 +0200354static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
355{
David Hildenbrand5f94c582015-09-28 14:27:51 +0200356 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
Jens Freimann383d0b02014-07-29 15:11:49 +0200357 return;
358 if (psw_extint_disabled(vcpu))
David Hildenbrand20182242018-01-23 18:05:28 +0100359 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann383d0b02014-07-29 15:11:49 +0200360 else
361 vcpu->arch.sie_block->lctl |= LCTL_CR0;
362}
363
364static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
365{
David Hildenbrand5f94c582015-09-28 14:27:51 +0200366 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
Jens Freimann383d0b02014-07-29 15:11:49 +0200367 return;
368 if (psw_mchk_disabled(vcpu))
369 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
370 else
371 vcpu->arch.sie_block->lctl |= LCTL_CR14;
372}
373
David Hildenbrand6cddd432014-10-15 16:48:53 +0200374static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
375{
376 if (kvm_s390_is_stop_irq_pending(vcpu))
David Hildenbrand20182242018-01-23 18:05:28 +0100377 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand6cddd432014-10-15 16:48:53 +0200378}
379
Jens Freimann6d3da242013-07-03 15:18:35 +0200380/* Set interception request for non-deliverable interrupts */
381static void set_intercept_indicators(struct kvm_vcpu *vcpu)
Jens Freimann383d0b02014-07-29 15:11:49 +0200382{
Jens Freimann6d3da242013-07-03 15:18:35 +0200383 set_intercept_indicators_io(vcpu);
Jens Freimann383d0b02014-07-29 15:11:49 +0200384 set_intercept_indicators_ext(vcpu);
385 set_intercept_indicators_mchk(vcpu);
David Hildenbrand6cddd432014-10-15 16:48:53 +0200386 set_intercept_indicators_stop(vcpu);
Jens Freimann383d0b02014-07-29 15:11:49 +0200387}
388
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200389static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
David Hildenbrand87128362014-03-03 10:55:13 +0100390{
Jens Freimann383d0b02014-07-29 15:11:49 +0200391 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200392 int rc;
393
394 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
395 0, 0);
396
397 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
398 (u16 *)__LC_EXT_INT_CODE);
David Hildenbrand467fc292014-12-01 12:02:44 +0100399 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200400 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
401 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
402 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
403 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann383d0b02014-07-29 15:11:49 +0200404 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
Jens Freimann99e20002014-12-01 17:05:39 +0100405 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200406}
407
408static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
409{
Jens Freimann383d0b02014-07-29 15:11:49 +0200410 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200411 int rc;
412
413 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
414 0, 0);
415
416 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
417 (u16 __user *)__LC_EXT_INT_CODE);
David Hildenbrand467fc292014-12-01 12:02:44 +0100418 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200419 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
420 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
421 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
422 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann383d0b02014-07-29 15:11:49 +0200423 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
Jens Freimann99e20002014-12-01 17:05:39 +0100424 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200425}
426
Jens Freimann383d0b02014-07-29 15:11:49 +0200427static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200428{
Jens Freimann383d0b02014-07-29 15:11:49 +0200429 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
430 struct kvm_s390_ext_info ext;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200431 int rc;
432
Jens Freimann383d0b02014-07-29 15:11:49 +0200433 spin_lock(&li->lock);
434 ext = li->irq.ext;
435 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
436 li->irq.ext.ext_params2 = 0;
437 spin_unlock(&li->lock);
438
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200439 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
440 ext.ext_params2);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200441 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
442 KVM_S390_INT_PFAULT_INIT,
Jens Freimann383d0b02014-07-29 15:11:49 +0200443 0, ext.ext_params2);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200444
445 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
446 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
447 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
448 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
449 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
450 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann383d0b02014-07-29 15:11:49 +0200451 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
Jens Freimann99e20002014-12-01 17:05:39 +0100452 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200453}
454
David Hildenbrandd6404de2015-10-14 16:47:36 +0200455static int __write_machine_check(struct kvm_vcpu *vcpu,
456 struct kvm_s390_mchk_info *mchk)
457{
458 unsigned long ext_sa_addr;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100459 unsigned long lc;
David Hildenbrand0319dae2016-08-03 11:18:57 +0200460 freg_t fprs[NUM_FPRS];
David Hildenbrandff5dc142015-10-14 16:57:56 +0200461 union mci mci;
David Hildenbrandd6404de2015-10-14 16:47:36 +0200462 int rc;
463
David Hildenbrandff5dc142015-10-14 16:57:56 +0200464 mci.val = mchk->mcic;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +0100465 /* take care of lazy register loading */
David Hildenbrand0319dae2016-08-03 11:18:57 +0200466 save_fpu_regs();
467 save_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger80248552017-04-12 12:59:59 +0200468 if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
469 save_gs_cb(current->thread.gs_cb);
David Hildenbrand0319dae2016-08-03 11:18:57 +0200470
David Hildenbrandd6404de2015-10-14 16:47:36 +0200471 /* Extended save area */
Martin Schwidefsky916cda12016-01-26 14:10:34 +0100472 rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
473 sizeof(unsigned long));
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100474 /* Only bits 0 through 63-LC are used for address formation */
475 lc = ext_sa_addr & MCESA_LC_MASK;
476 if (test_kvm_facility(vcpu->kvm, 133)) {
477 switch (lc) {
478 case 0:
479 case 10:
480 ext_sa_addr &= ~0x3ffUL;
481 break;
482 case 11:
483 ext_sa_addr &= ~0x7ffUL;
484 break;
485 case 12:
486 ext_sa_addr &= ~0xfffUL;
487 break;
488 default:
489 ext_sa_addr = 0;
490 break;
491 }
492 } else {
493 ext_sa_addr &= ~0x3ffUL;
494 }
495
David Hildenbrandff5dc142015-10-14 16:57:56 +0200496 if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
497 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
498 512))
499 mci.vr = 0;
500 } else {
501 mci.vr = 0;
502 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100503 if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
504 && (lc == 11 || lc == 12)) {
505 if (write_guest_abs(vcpu, ext_sa_addr + 1024,
506 &vcpu->run->s.regs.gscb, 32))
507 mci.gs = 0;
508 } else {
509 mci.gs = 0;
510 }
David Hildenbrandd6404de2015-10-14 16:47:36 +0200511
512 /* General interruption information */
David Hildenbrand0319dae2016-08-03 11:18:57 +0200513 rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
David Hildenbrandd6404de2015-10-14 16:47:36 +0200514 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
515 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
516 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
517 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
David Hildenbrandff5dc142015-10-14 16:57:56 +0200518 rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
David Hildenbrandd6404de2015-10-14 16:47:36 +0200519
520 /* Register-save areas */
David Hildenbrand0319dae2016-08-03 11:18:57 +0200521 if (MACHINE_HAS_VX) {
522 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
523 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
524 } else {
525 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
526 vcpu->run->s.regs.fprs, 128);
527 }
528 rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
529 vcpu->run->s.regs.gprs, 128);
530 rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
531 (u32 __user *) __LC_FP_CREG_SAVE_AREA);
532 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
533 (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
534 rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
535 (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
536 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
537 (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
538 rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
539 &vcpu->run->s.regs.acrs, 64);
540 rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
541 &vcpu->arch.sie_block->gcr, 128);
David Hildenbrandd6404de2015-10-14 16:47:36 +0200542
543 /* Extended interruption information */
David Hildenbrand8953fb02016-08-03 12:25:08 +0200544 rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
545 (u32 __user *) __LC_EXT_DAMAGE_CODE);
David Hildenbrandd6404de2015-10-14 16:47:36 +0200546 rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
547 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
548 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
549 sizeof(mchk->fixed_logout));
550 return rc ? -EFAULT : 0;
551}
552
Jens Freimann383d0b02014-07-29 15:11:49 +0200553static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200554{
Jens Freimann6d3da242013-07-03 15:18:35 +0200555 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
Jens Freimann383d0b02014-07-29 15:11:49 +0200556 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann6d3da242013-07-03 15:18:35 +0200557 struct kvm_s390_mchk_info mchk = {};
Jens Freimann6d3da242013-07-03 15:18:35 +0200558 int deliver = 0;
559 int rc = 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200560
Jens Freimann6d3da242013-07-03 15:18:35 +0200561 spin_lock(&fi->lock);
Jens Freimann383d0b02014-07-29 15:11:49 +0200562 spin_lock(&li->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +0200563 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
564 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
565 /*
566 * If there was an exigent machine check pending, then any
567 * repressible machine checks that might have been pending
568 * are indicated along with it, so always clear bits for
569 * repressible and exigent interrupts
570 */
571 mchk = li->irq.mchk;
572 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
573 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
574 memset(&li->irq.mchk, 0, sizeof(mchk));
575 deliver = 1;
576 }
Jens Freimann383d0b02014-07-29 15:11:49 +0200577 /*
Jens Freimann6d3da242013-07-03 15:18:35 +0200578 * We indicate floating repressible conditions along with
579 * other pending conditions. Channel Report Pending and Channel
580 * Subsystem damage are the only two and and are indicated by
581 * bits in mcic and masked in cr14.
Jens Freimann383d0b02014-07-29 15:11:49 +0200582 */
Jens Freimann6d3da242013-07-03 15:18:35 +0200583 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
584 mchk.mcic |= fi->mchk.mcic;
585 mchk.cr14 |= fi->mchk.cr14;
586 memset(&fi->mchk, 0, sizeof(mchk));
587 deliver = 1;
588 }
Jens Freimann383d0b02014-07-29 15:11:49 +0200589 spin_unlock(&li->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +0200590 spin_unlock(&fi->lock);
Jens Freimann383d0b02014-07-29 15:11:49 +0200591
Jens Freimann6d3da242013-07-03 15:18:35 +0200592 if (deliver) {
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200593 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
Jens Freimann6d3da242013-07-03 15:18:35 +0200594 mchk.mcic);
595 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
596 KVM_S390_MCHK,
597 mchk.cr14, mchk.mcic);
QingFeng Hao32de0742018-03-02 11:56:47 +0100598 vcpu->stat.deliver_machine_check++;
David Hildenbrandd6404de2015-10-14 16:47:36 +0200599 rc = __write_machine_check(vcpu, &mchk);
Jens Freimann6d3da242013-07-03 15:18:35 +0200600 }
David Hildenbrandd6404de2015-10-14 16:47:36 +0200601 return rc;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200602}
603
604static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
605{
Jens Freimann383d0b02014-07-29 15:11:49 +0200606 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200607 int rc;
608
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200609 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200610 vcpu->stat.deliver_restart_signal++;
611 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
612
613 rc = write_guest_lc(vcpu,
Heiko Carstensc667aea2015-12-31 10:29:00 +0100614 offsetof(struct lowcore, restart_old_psw),
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200615 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Heiko Carstensc667aea2015-12-31 10:29:00 +0100616 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200617 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann383d0b02014-07-29 15:11:49 +0200618 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
Jens Freimann99e20002014-12-01 17:05:39 +0100619 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200620}
621
Jens Freimann383d0b02014-07-29 15:11:49 +0200622static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200623{
Jens Freimann383d0b02014-07-29 15:11:49 +0200624 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
625 struct kvm_s390_prefix_info prefix;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200626
Jens Freimann383d0b02014-07-29 15:11:49 +0200627 spin_lock(&li->lock);
628 prefix = li->irq.prefix;
629 li->irq.prefix.address = 0;
630 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
631 spin_unlock(&li->lock);
632
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200633 vcpu->stat.deliver_prefix_signal++;
634 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
635 KVM_S390_SIGP_SET_PREFIX,
Jens Freimann383d0b02014-07-29 15:11:49 +0200636 prefix.address, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200637
Jens Freimann383d0b02014-07-29 15:11:49 +0200638 kvm_s390_set_prefix(vcpu, prefix.address);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200639 return 0;
640}
641
Jens Freimann383d0b02014-07-29 15:11:49 +0200642static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200643{
Jens Freimann383d0b02014-07-29 15:11:49 +0200644 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200645 int rc;
Jens Freimann383d0b02014-07-29 15:11:49 +0200646 int cpu_addr;
647
648 spin_lock(&li->lock);
649 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
650 clear_bit(cpu_addr, li->sigp_emerg_pending);
651 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
652 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
653 spin_unlock(&li->lock);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200654
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200655 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200656 vcpu->stat.deliver_emergency_signal++;
Jens Freimann383d0b02014-07-29 15:11:49 +0200657 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
658 cpu_addr, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200659
660 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
661 (u16 *)__LC_EXT_INT_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200662 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200663 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
664 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
665 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
666 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann99e20002014-12-01 17:05:39 +0100667 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200668}
669
Jens Freimann383d0b02014-07-29 15:11:49 +0200670static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200671{
Jens Freimann383d0b02014-07-29 15:11:49 +0200672 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
673 struct kvm_s390_extcall_info extcall;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200674 int rc;
675
Jens Freimann383d0b02014-07-29 15:11:49 +0200676 spin_lock(&li->lock);
677 extcall = li->irq.extcall;
678 li->irq.extcall.code = 0;
679 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
680 spin_unlock(&li->lock);
681
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200682 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200683 vcpu->stat.deliver_external_call++;
684 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
685 KVM_S390_INT_EXTERNAL_CALL,
Jens Freimann383d0b02014-07-29 15:11:49 +0200686 extcall.code, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200687
688 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
689 (u16 *)__LC_EXT_INT_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200690 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200691 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
692 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
693 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
694 sizeof(psw_t));
Jens Freimann99e20002014-12-01 17:05:39 +0100695 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200696}
697
Jens Freimann383d0b02014-07-29 15:11:49 +0200698static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200699{
Jens Freimann383d0b02014-07-29 15:11:49 +0200700 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
701 struct kvm_s390_pgm_info pgm_info;
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100702 int rc = 0, nullifying = false;
David Hildenbrand634790b2015-11-04 16:33:33 +0100703 u16 ilen;
David Hildenbrand87128362014-03-03 10:55:13 +0100704
Jens Freimann383d0b02014-07-29 15:11:49 +0200705 spin_lock(&li->lock);
706 pgm_info = li->irq.pgm;
707 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
708 memset(&li->irq.pgm, 0, sizeof(pgm_info));
709 spin_unlock(&li->lock);
710
David Hildenbrand634790b2015-11-04 16:33:33 +0100711 ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
David Hildenbrand0e8bc062015-11-04 13:47:58 +0100712 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
713 pgm_info.code, ilen);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200714 vcpu->stat.deliver_program_int++;
715 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
Jens Freimann383d0b02014-07-29 15:11:49 +0200716 pgm_info.code, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200717
Jens Freimann383d0b02014-07-29 15:11:49 +0200718 switch (pgm_info.code & ~PGM_PER) {
David Hildenbrand87128362014-03-03 10:55:13 +0100719 case PGM_AFX_TRANSLATION:
720 case PGM_ASX_TRANSLATION:
721 case PGM_EX_TRANSLATION:
722 case PGM_LFX_TRANSLATION:
723 case PGM_LSTE_SEQUENCE:
724 case PGM_LSX_TRANSLATION:
725 case PGM_LX_TRANSLATION:
726 case PGM_PRIMARY_AUTHORITY:
727 case PGM_SECONDARY_AUTHORITY:
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100728 nullifying = true;
729 /* fall through */
David Hildenbrand87128362014-03-03 10:55:13 +0100730 case PGM_SPACE_SWITCH:
Jens Freimann383d0b02014-07-29 15:11:49 +0200731 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100732 (u64 *)__LC_TRANS_EXC_CODE);
733 break;
734 case PGM_ALEN_TRANSLATION:
735 case PGM_ALE_SEQUENCE:
736 case PGM_ASTE_INSTANCE:
737 case PGM_ASTE_SEQUENCE:
738 case PGM_ASTE_VALIDITY:
739 case PGM_EXTENDED_AUTHORITY:
Jens Freimann383d0b02014-07-29 15:11:49 +0200740 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100741 (u8 *)__LC_EXC_ACCESS_ID);
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100742 nullifying = true;
David Hildenbrand87128362014-03-03 10:55:13 +0100743 break;
744 case PGM_ASCE_TYPE:
745 case PGM_PAGE_TRANSLATION:
746 case PGM_REGION_FIRST_TRANS:
747 case PGM_REGION_SECOND_TRANS:
748 case PGM_REGION_THIRD_TRANS:
749 case PGM_SEGMENT_TRANSLATION:
Jens Freimann383d0b02014-07-29 15:11:49 +0200750 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100751 (u64 *)__LC_TRANS_EXC_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200752 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100753 (u8 *)__LC_EXC_ACCESS_ID);
Jens Freimann383d0b02014-07-29 15:11:49 +0200754 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100755 (u8 *)__LC_OP_ACCESS_ID);
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100756 nullifying = true;
David Hildenbrand87128362014-03-03 10:55:13 +0100757 break;
758 case PGM_MONITOR:
Jens Freimann383d0b02014-07-29 15:11:49 +0200759 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
Thomas Hutha36c5392014-10-16 14:31:53 +0200760 (u16 *)__LC_MON_CLASS_NR);
Jens Freimann383d0b02014-07-29 15:11:49 +0200761 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100762 (u64 *)__LC_MON_CODE);
763 break;
Eric Farman403c8642015-02-02 15:01:06 -0500764 case PGM_VECTOR_PROCESSING:
David Hildenbrand87128362014-03-03 10:55:13 +0100765 case PGM_DATA:
Jens Freimann383d0b02014-07-29 15:11:49 +0200766 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100767 (u32 *)__LC_DATA_EXC_CODE);
768 break;
769 case PGM_PROTECTION:
Jens Freimann383d0b02014-07-29 15:11:49 +0200770 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100771 (u64 *)__LC_TRANS_EXC_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200772 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100773 (u8 *)__LC_EXC_ACCESS_ID);
774 break;
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100775 case PGM_STACK_FULL:
776 case PGM_STACK_EMPTY:
777 case PGM_STACK_SPECIFICATION:
778 case PGM_STACK_TYPE:
779 case PGM_STACK_OPERATION:
780 case PGM_TRACE_TABEL:
781 case PGM_CRYPTO_OPERATION:
782 nullifying = true;
783 break;
David Hildenbrand87128362014-03-03 10:55:13 +0100784 }
785
Jens Freimann383d0b02014-07-29 15:11:49 +0200786 if (pgm_info.code & PGM_PER) {
787 rc |= put_guest_lc(vcpu, pgm_info.per_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100788 (u8 *) __LC_PER_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200789 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
David Hildenbrand87128362014-03-03 10:55:13 +0100790 (u8 *)__LC_PER_ATMID);
Jens Freimann383d0b02014-07-29 15:11:49 +0200791 rc |= put_guest_lc(vcpu, pgm_info.per_address,
David Hildenbrand87128362014-03-03 10:55:13 +0100792 (u64 *) __LC_PER_ADDRESS);
Jens Freimann383d0b02014-07-29 15:11:49 +0200793 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100794 (u8 *) __LC_PER_ACCESS_ID);
795 }
796
David Hildenbrandeaa4f412015-11-04 16:46:55 +0100797 if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
David Hildenbrand0e8bc062015-11-04 13:47:58 +0100798 kvm_s390_rewind_psw(vcpu, ilen);
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100799
David Hildenbrand0e8bc062015-11-04 13:47:58 +0100800 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
801 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
David Hildenbrand2ba45962015-03-25 13:12:32 +0100802 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
803 (u64 *) __LC_LAST_BREAK);
Jens Freimann383d0b02014-07-29 15:11:49 +0200804 rc |= put_guest_lc(vcpu, pgm_info.code,
David Hildenbrand87128362014-03-03 10:55:13 +0100805 (u16 *)__LC_PGM_INT_CODE);
806 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
807 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
808 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
809 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann99e20002014-12-01 17:05:39 +0100810 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200811}
David Hildenbrand87128362014-03-03 10:55:13 +0100812
Jens Freimann6d3da242013-07-03 15:18:35 +0200813static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200814{
Jens Freimann6d3da242013-07-03 15:18:35 +0200815 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
816 struct kvm_s390_ext_info ext;
817 int rc = 0;
818
819 spin_lock(&fi->lock);
820 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
821 spin_unlock(&fi->lock);
822 return 0;
823 }
824 ext = fi->srv_signal;
825 memset(&fi->srv_signal, 0, sizeof(ext));
826 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
827 spin_unlock(&fi->lock);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200828
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200829 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
Jens Freimann6d3da242013-07-03 15:18:35 +0200830 ext.ext_params);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200831 vcpu->stat.deliver_service_signal++;
Jens Freimann6d3da242013-07-03 15:18:35 +0200832 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
833 ext.ext_params, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200834
835 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
David Hildenbrand467fc292014-12-01 12:02:44 +0100836 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200837 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
838 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
839 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
840 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann6d3da242013-07-03 15:18:35 +0200841 rc |= put_guest_lc(vcpu, ext.ext_params,
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200842 (u32 *)__LC_EXT_PARAMS);
Jens Freimann6d3da242013-07-03 15:18:35 +0200843
Jens Freimann99e20002014-12-01 17:05:39 +0100844 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200845}
846
Jens Freimann6d3da242013-07-03 15:18:35 +0200847static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200848{
Jens Freimann6d3da242013-07-03 15:18:35 +0200849 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
850 struct kvm_s390_interrupt_info *inti;
851 int rc = 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200852
Jens Freimann6d3da242013-07-03 15:18:35 +0200853 spin_lock(&fi->lock);
854 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
855 struct kvm_s390_interrupt_info,
856 list);
857 if (inti) {
Jens Freimann6d3da242013-07-03 15:18:35 +0200858 list_del(&inti->list);
859 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
860 }
861 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
862 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
863 spin_unlock(&fi->lock);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200864
Jens Freimann6d3da242013-07-03 15:18:35 +0200865 if (inti) {
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200866 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
867 KVM_S390_INT_PFAULT_DONE, 0,
868 inti->ext.ext_params2);
869 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
870 inti->ext.ext_params2);
871
Jens Freimann6d3da242013-07-03 15:18:35 +0200872 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
873 (u16 *)__LC_EXT_INT_CODE);
874 rc |= put_guest_lc(vcpu, PFAULT_DONE,
875 (u16 *)__LC_EXT_CPU_ADDR);
876 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
877 &vcpu->arch.sie_block->gpsw,
878 sizeof(psw_t));
879 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
880 &vcpu->arch.sie_block->gpsw,
881 sizeof(psw_t));
882 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
883 (u64 *)__LC_EXT_PARAMS2);
884 kfree(inti);
885 }
Jens Freimann99e20002014-12-01 17:05:39 +0100886 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200887}
888
Jens Freimann6d3da242013-07-03 15:18:35 +0200889static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200890{
Jens Freimann6d3da242013-07-03 15:18:35 +0200891 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
892 struct kvm_s390_interrupt_info *inti;
893 int rc = 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200894
Jens Freimann6d3da242013-07-03 15:18:35 +0200895 spin_lock(&fi->lock);
896 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
897 struct kvm_s390_interrupt_info,
898 list);
899 if (inti) {
900 VCPU_EVENT(vcpu, 4,
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200901 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
Jens Freimann6d3da242013-07-03 15:18:35 +0200902 inti->ext.ext_params, inti->ext.ext_params2);
903 vcpu->stat.deliver_virtio_interrupt++;
904 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
905 inti->type,
906 inti->ext.ext_params,
907 inti->ext.ext_params2);
908 list_del(&inti->list);
909 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
910 }
911 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
912 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
913 spin_unlock(&fi->lock);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200914
Jens Freimann6d3da242013-07-03 15:18:35 +0200915 if (inti) {
916 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
917 (u16 *)__LC_EXT_INT_CODE);
918 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
919 (u16 *)__LC_EXT_CPU_ADDR);
920 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
921 &vcpu->arch.sie_block->gpsw,
922 sizeof(psw_t));
923 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
924 &vcpu->arch.sie_block->gpsw,
925 sizeof(psw_t));
926 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
927 (u32 *)__LC_EXT_PARAMS);
928 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
929 (u64 *)__LC_EXT_PARAMS2);
930 kfree(inti);
931 }
Jens Freimann99e20002014-12-01 17:05:39 +0100932 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200933}
934
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200935static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
936{
937 int rc;
938
939 rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
940 rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
941 rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
942 rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
943 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
944 &vcpu->arch.sie_block->gpsw,
945 sizeof(psw_t));
946 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
947 &vcpu->arch.sie_block->gpsw,
948 sizeof(psw_t));
949 return rc ? -EFAULT : 0;
950}
951
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200952static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
Jens Freimann6d3da242013-07-03 15:18:35 +0200953 unsigned long irq_type)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200954{
Jens Freimann6d3da242013-07-03 15:18:35 +0200955 struct list_head *isc_list;
956 struct kvm_s390_float_interrupt *fi;
957 struct kvm_s390_interrupt_info *inti = NULL;
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200958 struct kvm_s390_io_info io;
959 u32 isc;
Jens Freimann6d3da242013-07-03 15:18:35 +0200960 int rc = 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200961
Jens Freimann6d3da242013-07-03 15:18:35 +0200962 fi = &vcpu->kvm->arch.float_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200963
Jens Freimann6d3da242013-07-03 15:18:35 +0200964 spin_lock(&fi->lock);
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200965 isc = irq_type_to_isc(irq_type);
966 isc_list = &fi->lists[isc];
Jens Freimann6d3da242013-07-03 15:18:35 +0200967 inti = list_first_entry_or_null(isc_list,
968 struct kvm_s390_interrupt_info,
969 list);
970 if (inti) {
Christian Borntraegerdcc98ea2016-06-07 09:37:17 +0200971 if (inti->type & KVM_S390_INT_IO_AI_MASK)
972 VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
973 else
974 VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
975 inti->io.subchannel_id >> 8,
976 inti->io.subchannel_id >> 1 & 0x3,
977 inti->io.subchannel_nr);
978
Jens Freimann6d3da242013-07-03 15:18:35 +0200979 vcpu->stat.deliver_io_int++;
980 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
981 inti->type,
982 ((__u32)inti->io.subchannel_id << 16) |
983 inti->io.subchannel_nr,
984 ((__u64)inti->io.io_int_parm << 32) |
985 inti->io.io_int_word);
986 list_del(&inti->list);
987 fi->counters[FIRQ_CNTR_IO] -= 1;
988 }
989 if (list_empty(isc_list))
990 clear_bit(irq_type, &fi->pending_irqs);
991 spin_unlock(&fi->lock);
David Hildenbrand87128362014-03-03 10:55:13 +0100992
Jens Freimann6d3da242013-07-03 15:18:35 +0200993 if (inti) {
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200994 rc = __do_deliver_io(vcpu, &(inti->io));
Jens Freimann6d3da242013-07-03 15:18:35 +0200995 kfree(inti);
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200996 goto out;
Jens Freimann6d3da242013-07-03 15:18:35 +0200997 }
Jens Freimann383d0b02014-07-29 15:11:49 +0200998
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200999 if (vcpu->kvm->arch.gisa &&
1000 kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) {
1001 /*
1002 * in case an adapter interrupt was not delivered
1003 * in SIE context KVM will handle the delivery
1004 */
1005 VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
1006 memset(&io, 0, sizeof(io));
Michael Mueller2496c8e2017-08-31 11:10:28 +02001007 io.io_int_word = isc_to_int_word(isc);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02001008 vcpu->stat.deliver_io_int++;
1009 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1010 KVM_S390_INT_IO(1, 0, 0, 0),
1011 ((__u32)io.subchannel_id << 16) |
1012 io.subchannel_nr,
1013 ((__u64)io.io_int_parm << 32) |
1014 io.io_int_word);
1015 rc = __do_deliver_io(vcpu, &io);
1016 }
1017out:
1018 return rc;
Jens Freimann383d0b02014-07-29 15:11:49 +02001019}
1020
David Hildenbrandea5f4962014-10-14 15:29:30 +02001021/* Check whether an external call is pending (deliverable or not) */
1022int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
David Hildenbrand49539192014-02-21 08:59:59 +01001023{
David Hildenbrandea5f4962014-10-14 15:29:30 +02001024 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
David Hildenbrand49539192014-02-21 08:59:59 +01001025
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001026 if (!sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001027 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
David Hildenbrand49539192014-02-21 08:59:59 +01001028
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02001029 return sca_ext_call_pending(vcpu, NULL);
David Hildenbrand49539192014-02-21 08:59:59 +01001030}
1031
David Hildenbrand9a022062014-08-05 17:40:47 +02001032int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
Carsten Otteba5c1e92008-03-25 18:47:26 +01001033{
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001034 if (deliverable_irqs(vcpu))
1035 return 1;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001036
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001037 if (kvm_cpu_has_pending_timer(vcpu))
1038 return 1;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001039
David Hildenbrandea5f4962014-10-14 15:29:30 +02001040 /* external call pending and deliverable */
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001041 if (kvm_s390_ext_call_pending(vcpu) &&
David Hildenbrandea5f4962014-10-14 15:29:30 +02001042 !psw_extint_disabled(vcpu) &&
1043 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001044 return 1;
David Hildenbrand49539192014-02-21 08:59:59 +01001045
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001046 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1047 return 1;
1048 return 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001049}
1050
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001051int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1052{
David Hildenbrandb4aec922014-12-01 15:55:42 +01001053 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001054}
1055
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001056static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1057{
David Hildenbrand5fe01792018-02-07 12:46:42 +01001058 const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1059 const u64 ckc = vcpu->arch.sie_block->ckc;
1060 u64 cputm, sltime = 0;
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001061
1062 if (ckc_interrupts_enabled(vcpu)) {
David Hildenbrand5fe01792018-02-07 12:46:42 +01001063 if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
1064 if ((s64)now < (s64)ckc)
1065 sltime = tod_to_ns((s64)ckc - (s64)now);
1066 } else if (now < ckc) {
1067 sltime = tod_to_ns(ckc - now);
1068 }
1069 /* already expired */
1070 if (!sltime)
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001071 return 0;
1072 if (cpu_timer_interrupts_enabled(vcpu)) {
1073 cputm = kvm_s390_get_cpu_timer(vcpu);
1074 /* already expired? */
1075 if (cputm >> 63)
1076 return 0;
1077 return min(sltime, tod_to_ns(cputm));
1078 }
1079 } else if (cpu_timer_interrupts_enabled(vcpu)) {
1080 sltime = kvm_s390_get_cpu_timer(vcpu);
1081 /* already expired? */
1082 if (sltime >> 63)
1083 return 0;
1084 }
1085 return sltime;
1086}
1087
Carsten Otteba5c1e92008-03-25 18:47:26 +01001088int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1089{
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001090 u64 sltime;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001091
1092 vcpu->stat.exit_wait_state++;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001093
David Hildenbrand0759d062014-05-13 16:54:32 +02001094 /* fast path */
David Hildenbrand118b8622015-09-23 12:25:15 +02001095 if (kvm_arch_vcpu_runnable(vcpu))
David Hildenbrand0759d062014-05-13 16:54:32 +02001096 return 0;
Carsten Ottee52b2af2008-05-21 13:37:44 +02001097
Carsten Otteba5c1e92008-03-25 18:47:26 +01001098 if (psw_interrupts_disabled(vcpu)) {
1099 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001100 return -EOPNOTSUPP; /* disabled wait */
Carsten Otteba5c1e92008-03-25 18:47:26 +01001101 }
1102
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001103 if (!ckc_interrupts_enabled(vcpu) &&
1104 !cpu_timer_interrupts_enabled(vcpu)) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001105 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
David Hildenbrandbda343e2014-12-12 12:26:40 +01001106 __set_cpu_idle(vcpu);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001107 goto no_timer;
1108 }
1109
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001110 sltime = __calculate_sltime(vcpu);
1111 if (!sltime)
David Hildenbrandbda343e2014-12-12 12:26:40 +01001112 return 0;
1113
1114 __set_cpu_idle(vcpu);
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01001115 hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001116 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001117no_timer:
Thomas Huth800c1062013-09-12 10:33:45 +02001118 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
David Hildenbrand0759d062014-05-13 16:54:32 +02001119 kvm_vcpu_block(vcpu);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001120 __unset_cpu_idle(vcpu);
Thomas Huth800c1062013-09-12 10:33:45 +02001121 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1122
David Hildenbrand2d00f752014-12-11 10:18:01 +01001123 hrtimer_cancel(&vcpu->arch.ckc_timer);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001124 return 0;
1125}
1126
David Hildenbrand0e9c85a2014-05-16 11:59:46 +02001127void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1128{
Christian Borntraeger3491caf2016-05-13 12:16:35 +02001129 /*
1130 * We cannot move this into the if, as the CPU might be already
1131 * in kvm_vcpu_block without having the waitqueue set (polling)
1132 */
1133 vcpu->valid_wakeup = true;
Christian Borntraeger72e1ad42017-09-19 12:34:06 +02001134 /*
1135 * This is mostly to document, that the read in swait_active could
1136 * be moved before other stores, leading to subtle races.
1137 * All current users do not store or use an atomic like update
1138 */
1139 smp_mb__after_atomic();
Marcelo Tosatti85773702016-02-19 09:46:39 +01001140 if (swait_active(&vcpu->wq)) {
David Hildenbrand0e9c85a2014-05-16 11:59:46 +02001141 /*
1142 * The vcpu gave up the cpu voluntarily, mark it as a good
1143 * yield-candidate.
1144 */
1145 vcpu->preempted = true;
Marcelo Tosatti85773702016-02-19 09:46:39 +01001146 swake_up(&vcpu->wq);
David Hildenbrandce2e4f02014-07-11 10:00:43 +02001147 vcpu->stat.halt_wakeup++;
David Hildenbrand0e9c85a2014-05-16 11:59:46 +02001148 }
David Hildenbrandadbf1692016-05-27 22:03:52 +02001149 /*
1150 * The VCPU might not be sleeping but is executing the VSIE. Let's
1151 * kick it, so it leaves the SIE to process the request.
1152 */
1153 kvm_s390_vsie_kick(vcpu);
David Hildenbrand0e9c85a2014-05-16 11:59:46 +02001154}
1155
Christian Borntraegerca872302009-05-12 17:21:49 +02001156enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1157{
1158 struct kvm_vcpu *vcpu;
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001159 u64 sltime;
Christian Borntraegerca872302009-05-12 17:21:49 +02001160
1161 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001162 sltime = __calculate_sltime(vcpu);
Christian Borntraegerca872302009-05-12 17:21:49 +02001163
David Hildenbrand2d00f752014-12-11 10:18:01 +01001164 /*
1165 * If the monotonic clock runs faster than the tod clock we might be
1166 * woken up too early and have to go back to sleep to avoid deadlocks.
1167 */
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001168 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
David Hildenbrand2d00f752014-12-11 10:18:01 +01001169 return HRTIMER_RESTART;
1170 kvm_s390_vcpu_wakeup(vcpu);
Christian Borntraegerca872302009-05-12 17:21:49 +02001171 return HRTIMER_NORESTART;
1172}
Carsten Otteba5c1e92008-03-25 18:47:26 +01001173
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001174void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1175{
1176 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001177
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001178 spin_lock(&li->lock);
Jens Freimann383d0b02014-07-29 15:11:49 +02001179 li->pending_irqs = 0;
1180 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1181 memset(&li->irq, 0, sizeof(li->irq));
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001182 spin_unlock(&li->lock);
David Hildenbrand49539192014-02-21 08:59:59 +01001183
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02001184 sca_clear_ext_call(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001185}
1186
Christian Borntraeger614aeab2014-08-25 12:27:29 +02001187int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
Carsten Otteba5c1e92008-03-25 18:47:26 +01001188{
Christian Borntraeger180c12f2008-06-27 15:05:40 +02001189 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann79395032014-04-17 10:10:30 +02001190 int rc = 0;
Jens Freimann383d0b02014-07-29 15:11:49 +02001191 unsigned long irq_type;
Jens Freimann6d3da242013-07-03 15:18:35 +02001192 unsigned long irqs;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001193
1194 __reset_intercept_indicators(vcpu);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001195
Jens Freimann383d0b02014-07-29 15:11:49 +02001196 /* pending ckc conditions might have been invalidated */
1197 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
David Hildenbrandb4aec922014-12-01 15:55:42 +01001198 if (ckc_irq_pending(vcpu))
Jens Freimann383d0b02014-07-29 15:11:49 +02001199 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1200
David Hildenbrandb4aec922014-12-01 15:55:42 +01001201 /* pending cpu timer conditions might have been invalidated */
1202 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1203 if (cpu_timer_irq_pending(vcpu))
1204 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1205
Jens Freimannffeca0a2015-04-17 10:21:04 +02001206 while ((irqs = deliverable_irqs(vcpu)) && !rc) {
Michael Muellerc7901a62017-06-29 18:39:27 +02001207 /* bits are in the reverse order of interrupt priority */
1208 irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
David Hildenbrandbaabee62018-02-06 15:17:43 +01001209 switch (irq_type) {
1210 case IRQ_PEND_IO_ISC_0:
1211 case IRQ_PEND_IO_ISC_1:
1212 case IRQ_PEND_IO_ISC_2:
1213 case IRQ_PEND_IO_ISC_3:
1214 case IRQ_PEND_IO_ISC_4:
1215 case IRQ_PEND_IO_ISC_5:
1216 case IRQ_PEND_IO_ISC_6:
1217 case IRQ_PEND_IO_ISC_7:
Jens Freimann6d3da242013-07-03 15:18:35 +02001218 rc = __deliver_io(vcpu, irq_type);
David Hildenbrandbaabee62018-02-06 15:17:43 +01001219 break;
1220 case IRQ_PEND_MCHK_EX:
1221 case IRQ_PEND_MCHK_REP:
1222 rc = __deliver_machine_check(vcpu);
1223 break;
1224 case IRQ_PEND_PROG:
1225 rc = __deliver_prog(vcpu);
1226 break;
1227 case IRQ_PEND_EXT_EMERGENCY:
1228 rc = __deliver_emergency_signal(vcpu);
1229 break;
1230 case IRQ_PEND_EXT_EXTERNAL:
1231 rc = __deliver_external_call(vcpu);
1232 break;
1233 case IRQ_PEND_EXT_CLOCK_COMP:
1234 rc = __deliver_ckc(vcpu);
1235 break;
1236 case IRQ_PEND_EXT_CPU_TIMER:
1237 rc = __deliver_cpu_timer(vcpu);
1238 break;
1239 case IRQ_PEND_RESTART:
1240 rc = __deliver_restart(vcpu);
1241 break;
1242 case IRQ_PEND_SET_PREFIX:
1243 rc = __deliver_set_prefix(vcpu);
1244 break;
1245 case IRQ_PEND_PFAULT_INIT:
1246 rc = __deliver_pfault_init(vcpu);
1247 break;
1248 case IRQ_PEND_EXT_SERVICE:
1249 rc = __deliver_service(vcpu);
1250 break;
1251 case IRQ_PEND_PFAULT_DONE:
1252 rc = __deliver_pfault_done(vcpu);
1253 break;
1254 case IRQ_PEND_VIRTIO:
1255 rc = __deliver_virtio(vcpu);
1256 break;
1257 default:
1258 WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
1259 clear_bit(irq_type, &li->pending_irqs);
Jens Freimann383d0b02014-07-29 15:11:49 +02001260 }
Jens Freimannffeca0a2015-04-17 10:21:04 +02001261 }
Jens Freimann383d0b02014-07-29 15:11:49 +02001262
Jens Freimann6d3da242013-07-03 15:18:35 +02001263 set_intercept_indicators(vcpu);
Jens Freimann79395032014-04-17 10:10:30 +02001264
1265 return rc;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001266}
1267
Jens Freimann383d0b02014-07-29 15:11:49 +02001268static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001269{
1270 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1271
David Hildenbranded2afcf2015-07-20 10:33:03 +02001272 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1273 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1274 irq->u.pgm.code, 0);
1275
David Hildenbrand634790b2015-11-04 16:33:33 +01001276 if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1277 /* auto detection if no valid ILC was given */
1278 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1279 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1280 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1281 }
1282
David Hildenbrand238293b2015-05-04 12:38:48 +02001283 if (irq->u.pgm.code == PGM_PER) {
1284 li->irq.pgm.code |= PGM_PER;
David Hildenbrand634790b2015-11-04 16:33:33 +01001285 li->irq.pgm.flags = irq->u.pgm.flags;
David Hildenbrand238293b2015-05-04 12:38:48 +02001286 /* only modify PER related information */
1287 li->irq.pgm.per_address = irq->u.pgm.per_address;
1288 li->irq.pgm.per_code = irq->u.pgm.per_code;
1289 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1290 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1291 } else if (!(irq->u.pgm.code & PGM_PER)) {
1292 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1293 irq->u.pgm.code;
David Hildenbrand634790b2015-11-04 16:33:33 +01001294 li->irq.pgm.flags = irq->u.pgm.flags;
David Hildenbrand238293b2015-05-04 12:38:48 +02001295 /* only modify non-PER information */
1296 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1297 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1298 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1299 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1300 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1301 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1302 } else {
1303 li->irq.pgm = irq->u.pgm;
1304 }
Jens Freimann91851242014-12-01 16:43:40 +01001305 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001306 return 0;
1307}
1308
Jens Freimann383d0b02014-07-29 15:11:49 +02001309static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001310{
1311 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1312
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001313 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1314 irq->u.ext.ext_params2);
Jens Freimann383d0b02014-07-29 15:11:49 +02001315 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1316 irq->u.ext.ext_params,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001317 irq->u.ext.ext_params2);
Jens Freimann383d0b02014-07-29 15:11:49 +02001318
1319 li->irq.ext = irq->u.ext;
1320 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
David Hildenbrand20182242018-01-23 18:05:28 +01001321 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001322 return 0;
1323}
1324
Christian Borntraeger0675d922015-01-15 12:40:42 +01001325static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001326{
1327 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001328 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001329 uint16_t src_id = irq->u.extcall.code;
Jens Freimann0146a7b2014-07-28 15:37:58 +02001330
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001331 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
David Hildenbrandea5f4962014-10-14 15:29:30 +02001332 src_id);
Jens Freimann383d0b02014-07-29 15:11:49 +02001333 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001334 src_id, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001335
David Hildenbrandea5f4962014-10-14 15:29:30 +02001336 /* sending vcpu invalid */
David Hildenbrand152e9f62015-11-05 09:06:06 +01001337 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001338 return -EINVAL;
1339
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001340 if (sclp.has_sigpif)
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02001341 return sca_inject_ext_call(vcpu, src_id);
David Hildenbrandea5f4962014-10-14 15:29:30 +02001342
David Hildenbrandb938eace2015-04-30 13:33:59 +02001343 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
David Hildenbrandea5f4962014-10-14 15:29:30 +02001344 return -EBUSY;
Jens Freimann383d0b02014-07-29 15:11:49 +02001345 *extcall = irq->u.extcall;
David Hildenbrand20182242018-01-23 18:05:28 +01001346 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001347 return 0;
1348}
1349
Jens Freimann383d0b02014-07-29 15:11:49 +02001350static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001351{
1352 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001353 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
Jens Freimann0146a7b2014-07-28 15:37:58 +02001354
David Hildenbranded2afcf2015-07-20 10:33:03 +02001355 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
Jens Freimann556cc0d2014-12-18 15:52:21 +01001356 irq->u.prefix.address);
Jens Freimann383d0b02014-07-29 15:11:49 +02001357 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001358 irq->u.prefix.address, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001359
David Hildenbranda3a9c592014-10-14 09:44:55 +02001360 if (!is_vcpu_stopped(vcpu))
1361 return -EBUSY;
1362
Jens Freimann383d0b02014-07-29 15:11:49 +02001363 *prefix = irq->u.prefix;
1364 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001365 return 0;
1366}
1367
David Hildenbrand6cddd432014-10-15 16:48:53 +02001368#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
Jens Freimann383d0b02014-07-29 15:11:49 +02001369static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001370{
1371 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
David Hildenbrand28225452014-10-15 16:48:16 +02001372 struct kvm_s390_stop_info *stop = &li->irq.stop;
David Hildenbrand6cddd432014-10-15 16:48:53 +02001373 int rc = 0;
Jens Freimann0146a7b2014-07-28 15:37:58 +02001374
David Hildenbranded2afcf2015-07-20 10:33:03 +02001375 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001376
David Hildenbrand28225452014-10-15 16:48:16 +02001377 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1378 return -EINVAL;
1379
David Hildenbrand6cddd432014-10-15 16:48:53 +02001380 if (is_vcpu_stopped(vcpu)) {
1381 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1382 rc = kvm_s390_store_status_unloaded(vcpu,
1383 KVM_S390_STORE_STATUS_NOADDR);
1384 return rc;
1385 }
1386
1387 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1388 return -EBUSY;
David Hildenbrand28225452014-10-15 16:48:16 +02001389 stop->flags = irq->u.stop.flags;
David Hildenbrand20182242018-01-23 18:05:28 +01001390 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001391 return 0;
1392}
1393
1394static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
Jens Freimann383d0b02014-07-29 15:11:49 +02001395 struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001396{
1397 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1398
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001399 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
David Hildenbranded2afcf2015-07-20 10:33:03 +02001400 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001401
1402 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001403 return 0;
1404}
1405
1406static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
Jens Freimann383d0b02014-07-29 15:11:49 +02001407 struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001408{
1409 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1410
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001411 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
Jens Freimann383d0b02014-07-29 15:11:49 +02001412 irq->u.emerg.code);
1413 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001414 irq->u.emerg.code, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001415
David Hildenbrandb85de332015-11-05 09:38:15 +01001416 /* sending vcpu invalid */
1417 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1418 return -EINVAL;
1419
Jens Freimann49538d12014-12-18 15:48:14 +01001420 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
Jens Freimann383d0b02014-07-29 15:11:49 +02001421 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
David Hildenbrand20182242018-01-23 18:05:28 +01001422 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001423 return 0;
1424}
1425
Jens Freimann383d0b02014-07-29 15:11:49 +02001426static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001427{
1428 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001429 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
Jens Freimann0146a7b2014-07-28 15:37:58 +02001430
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001431 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
Jens Freimann556cc0d2014-12-18 15:52:21 +01001432 irq->u.mchk.mcic);
Jens Freimann383d0b02014-07-29 15:11:49 +02001433 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001434 irq->u.mchk.mcic);
Jens Freimann383d0b02014-07-29 15:11:49 +02001435
1436 /*
Jens Freimannfc2020c2014-08-13 10:09:04 +02001437 * Because repressible machine checks can be indicated along with
1438 * exigent machine checks (PoP, Chapter 11, Interruption action)
1439 * we need to combine cr14, mcic and external damage code.
1440 * Failing storage address and the logout area should not be or'ed
1441 * together, we just indicate the last occurrence of the corresponding
1442 * machine check
Jens Freimann383d0b02014-07-29 15:11:49 +02001443 */
Jens Freimannfc2020c2014-08-13 10:09:04 +02001444 mchk->cr14 |= irq->u.mchk.cr14;
Jens Freimann383d0b02014-07-29 15:11:49 +02001445 mchk->mcic |= irq->u.mchk.mcic;
Jens Freimannfc2020c2014-08-13 10:09:04 +02001446 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1447 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1448 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1449 sizeof(mchk->fixed_logout));
Jens Freimann383d0b02014-07-29 15:11:49 +02001450 if (mchk->mcic & MCHK_EX_MASK)
1451 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1452 else if (mchk->mcic & MCHK_REP_MASK)
1453 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001454 return 0;
1455}
1456
Jens Freimann383d0b02014-07-29 15:11:49 +02001457static int __inject_ckc(struct kvm_vcpu *vcpu)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001458{
1459 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1460
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001461 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
Jens Freimann383d0b02014-07-29 15:11:49 +02001462 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001463 0, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001464
1465 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
David Hildenbrand20182242018-01-23 18:05:28 +01001466 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001467 return 0;
1468}
1469
Jens Freimann383d0b02014-07-29 15:11:49 +02001470static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001471{
1472 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1473
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001474 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
Jens Freimann383d0b02014-07-29 15:11:49 +02001475 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001476 0, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001477
1478 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
David Hildenbrand20182242018-01-23 18:05:28 +01001479 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimannbcd84682014-02-11 11:07:05 +01001480 return 0;
1481}
1482
Jens Freimann6d3da242013-07-03 15:18:35 +02001483static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1484 int isc, u32 schid)
1485{
1486 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1487 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1488 struct kvm_s390_interrupt_info *iter;
1489 u16 id = (schid & 0xffff0000U) >> 16;
1490 u16 nr = schid & 0x0000ffffU;
Jens Freimann383d0b02014-07-29 15:11:49 +02001491
Jens Freimann6d3da242013-07-03 15:18:35 +02001492 spin_lock(&fi->lock);
1493 list_for_each_entry(iter, isc_list, list) {
1494 if (schid && (id != iter->io.subchannel_id ||
1495 nr != iter->io.subchannel_nr))
1496 continue;
1497 /* found an appropriate entry */
1498 list_del_init(&iter->list);
1499 fi->counters[FIRQ_CNTR_IO] -= 1;
1500 if (list_empty(isc_list))
Michael Muelleree739f42017-07-03 15:32:50 +02001501 clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
Jens Freimann6d3da242013-07-03 15:18:35 +02001502 spin_unlock(&fi->lock);
1503 return iter;
1504 }
1505 spin_unlock(&fi->lock);
1506 return NULL;
1507}
1508
Michael Mueller4b35f652017-07-07 15:27:31 +02001509static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
1510 u64 isc_mask, u32 schid)
Jens Freimann6d3da242013-07-03 15:18:35 +02001511{
1512 struct kvm_s390_interrupt_info *inti = NULL;
1513 int isc;
1514
1515 for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1516 if (isc_mask & isc_to_isc_bits(isc))
1517 inti = get_io_int(kvm, isc, schid);
1518 }
1519 return inti;
1520}
1521
Michael Mueller4b35f652017-07-07 15:27:31 +02001522static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
1523{
1524 unsigned long active_mask;
1525 int isc;
1526
1527 if (schid)
1528 goto out;
1529 if (!kvm->arch.gisa)
1530 goto out;
1531
1532 active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32;
1533 while (active_mask) {
1534 isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
1535 if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc))
1536 return isc;
1537 clear_bit_inv(isc, &active_mask);
1538 }
1539out:
1540 return -EINVAL;
1541}
1542
1543/*
1544 * Dequeue and return an I/O interrupt matching any of the interruption
1545 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1546 * Take into account the interrupts pending in the interrupt list and in GISA.
1547 *
1548 * Note that for a guest that does not enable I/O interrupts
1549 * but relies on TPI, a flood of classic interrupts may starve
1550 * out adapter interrupts on the same isc. Linux does not do
1551 * that, and it is possible to work around the issue by configuring
1552 * different iscs for classic and adapter interrupts in the guest,
1553 * but we may want to revisit this in the future.
1554 */
1555struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1556 u64 isc_mask, u32 schid)
1557{
1558 struct kvm_s390_interrupt_info *inti, *tmp_inti;
1559 int isc;
1560
1561 inti = get_top_io_int(kvm, isc_mask, schid);
1562
1563 isc = get_top_gisa_isc(kvm, isc_mask, schid);
1564 if (isc < 0)
1565 /* no AI in GISA */
1566 goto out;
1567
1568 if (!inti)
1569 /* AI in GISA but no classical IO int */
1570 goto gisa_out;
1571
1572 /* both types of interrupts present */
1573 if (int_word_to_isc(inti->io.io_int_word) <= isc) {
1574 /* classical IO int with higher priority */
1575 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1576 goto out;
1577 }
1578gisa_out:
1579 tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1580 if (tmp_inti) {
1581 tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
1582 tmp_inti->io.io_int_word = isc_to_int_word(isc);
1583 if (inti)
1584 kvm_s390_reinject_io_int(kvm, inti);
1585 inti = tmp_inti;
1586 } else
1587 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1588out:
1589 return inti;
1590}
1591
Jens Freimann6d3da242013-07-03 15:18:35 +02001592#define SCCB_MASK 0xFFFFFFF8
1593#define SCCB_EVENT_PENDING 0x3
1594
1595static int __inject_service(struct kvm *kvm,
1596 struct kvm_s390_interrupt_info *inti)
1597{
1598 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1599
1600 spin_lock(&fi->lock);
1601 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1602 /*
1603 * Early versions of the QEMU s390 bios will inject several
1604 * service interrupts after another without handling a
1605 * condition code indicating busy.
1606 * We will silently ignore those superfluous sccb values.
1607 * A future version of QEMU will take care of serialization
1608 * of servc requests
1609 */
1610 if (fi->srv_signal.ext_params & SCCB_MASK)
1611 goto out;
1612 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1613 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1614out:
1615 spin_unlock(&fi->lock);
1616 kfree(inti);
1617 return 0;
1618}
1619
1620static int __inject_virtio(struct kvm *kvm,
1621 struct kvm_s390_interrupt_info *inti)
1622{
1623 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1624
1625 spin_lock(&fi->lock);
1626 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1627 spin_unlock(&fi->lock);
1628 return -EBUSY;
1629 }
1630 fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1631 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1632 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1633 spin_unlock(&fi->lock);
1634 return 0;
1635}
1636
1637static int __inject_pfault_done(struct kvm *kvm,
1638 struct kvm_s390_interrupt_info *inti)
1639{
1640 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1641
1642 spin_lock(&fi->lock);
1643 if (fi->counters[FIRQ_CNTR_PFAULT] >=
1644 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1645 spin_unlock(&fi->lock);
1646 return -EBUSY;
1647 }
1648 fi->counters[FIRQ_CNTR_PFAULT] += 1;
1649 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1650 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1651 spin_unlock(&fi->lock);
1652 return 0;
1653}
1654
1655#define CR_PENDING_SUBCLASS 28
1656static int __inject_float_mchk(struct kvm *kvm,
1657 struct kvm_s390_interrupt_info *inti)
1658{
1659 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1660
1661 spin_lock(&fi->lock);
1662 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1663 fi->mchk.mcic |= inti->mchk.mcic;
1664 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1665 spin_unlock(&fi->lock);
1666 kfree(inti);
1667 return 0;
1668}
1669
1670static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001671{
1672 struct kvm_s390_float_interrupt *fi;
Jens Freimann6d3da242013-07-03 15:18:35 +02001673 struct list_head *list;
1674 int isc;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001675
Michael Muellerd7c5cb02017-06-12 14:15:19 +02001676 isc = int_word_to_isc(inti->io.io_int_word);
1677
1678 if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) {
1679 VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
1680 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1681 kfree(inti);
1682 return 0;
1683 }
1684
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001685 fi = &kvm->arch.float_int;
1686 spin_lock(&fi->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001687 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1688 spin_unlock(&fi->lock);
1689 return -EBUSY;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001690 }
Jens Freimann6d3da242013-07-03 15:18:35 +02001691 fi->counters[FIRQ_CNTR_IO] += 1;
1692
Christian Borntraegerdcc98ea2016-06-07 09:37:17 +02001693 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1694 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1695 else
1696 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1697 inti->io.subchannel_id >> 8,
1698 inti->io.subchannel_id >> 1 & 0x3,
1699 inti->io.subchannel_nr);
Jens Freimann6d3da242013-07-03 15:18:35 +02001700 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1701 list_add_tail(&inti->list, list);
Michael Muelleree739f42017-07-03 15:32:50 +02001702 set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001703 spin_unlock(&fi->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001704 return 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001705}
1706
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001707/*
1708 * Find a destination VCPU for a floating irq and kick it.
1709 */
1710static void __floating_irq_kick(struct kvm *kvm, u64 type)
1711{
1712 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001713 struct kvm_vcpu *dst_vcpu;
1714 int sigcpu, online_vcpus, nr_tries = 0;
1715
1716 online_vcpus = atomic_read(&kvm->online_vcpus);
1717 if (!online_vcpus)
1718 return;
1719
1720 /* find idle VCPUs first, then round robin */
1721 sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1722 if (sigcpu == online_vcpus) {
1723 do {
1724 sigcpu = fi->next_rr_cpu;
1725 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1726 /* avoid endless loops if all vcpus are stopped */
1727 if (nr_tries++ >= online_vcpus)
1728 return;
1729 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1730 }
1731 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1732
1733 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001734 switch (type) {
1735 case KVM_S390_MCHK:
David Hildenbrand20182242018-01-23 18:05:28 +01001736 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001737 break;
1738 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
Christian Borntraegera9810322018-01-29 12:22:45 +01001739 if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa))
1740 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001741 break;
1742 default:
David Hildenbrand20182242018-01-23 18:05:28 +01001743 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001744 break;
1745 }
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001746 kvm_s390_vcpu_wakeup(dst_vcpu);
1747}
1748
Jens Freimanna91b8eb2014-01-30 08:40:23 +01001749static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
Carsten Otteba5c1e92008-03-25 18:47:26 +01001750{
Jens Freimann6d3da242013-07-03 15:18:35 +02001751 u64 type = READ_ONCE(inti->type);
1752 int rc;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001753
Jens Freimann6d3da242013-07-03 15:18:35 +02001754 switch (type) {
1755 case KVM_S390_MCHK:
1756 rc = __inject_float_mchk(kvm, inti);
1757 break;
1758 case KVM_S390_INT_VIRTIO:
1759 rc = __inject_virtio(kvm, inti);
1760 break;
1761 case KVM_S390_INT_SERVICE:
1762 rc = __inject_service(kvm, inti);
1763 break;
1764 case KVM_S390_INT_PFAULT_DONE:
1765 rc = __inject_pfault_done(kvm, inti);
1766 break;
1767 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1768 rc = __inject_io(kvm, inti);
1769 break;
1770 default:
1771 rc = -EINVAL;
Cornelia Huckd8346b72012-12-20 15:32:08 +01001772 }
Jens Freimann6d3da242013-07-03 15:18:35 +02001773 if (rc)
1774 return rc;
1775
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001776 __floating_irq_kick(kvm, type);
Jens Freimann6d3da242013-07-03 15:18:35 +02001777 return 0;
Jens Freimannc05c4182013-10-07 16:13:45 +02001778}
1779
1780int kvm_s390_inject_vm(struct kvm *kvm,
1781 struct kvm_s390_interrupt *s390int)
1782{
1783 struct kvm_s390_interrupt_info *inti;
David Hildenbrand428d53b2015-01-16 12:58:09 +01001784 int rc;
Jens Freimannc05c4182013-10-07 16:13:45 +02001785
1786 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1787 if (!inti)
1788 return -ENOMEM;
1789
1790 inti->type = s390int->type;
1791 switch (inti->type) {
1792 case KVM_S390_INT_VIRTIO:
1793 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1794 s390int->parm, s390int->parm64);
1795 inti->ext.ext_params = s390int->parm;
1796 inti->ext.ext_params2 = s390int->parm64;
1797 break;
1798 case KVM_S390_INT_SERVICE:
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001799 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
Jens Freimannc05c4182013-10-07 16:13:45 +02001800 inti->ext.ext_params = s390int->parm;
1801 break;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001802 case KVM_S390_INT_PFAULT_DONE:
Dominik Dingel3c038e62013-10-07 17:11:48 +02001803 inti->ext.ext_params2 = s390int->parm64;
1804 break;
Jens Freimannc05c4182013-10-07 16:13:45 +02001805 case KVM_S390_MCHK:
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001806 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
Jens Freimannc05c4182013-10-07 16:13:45 +02001807 s390int->parm64);
1808 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1809 inti->mchk.mcic = s390int->parm64;
1810 break;
1811 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
Jens Freimannc05c4182013-10-07 16:13:45 +02001812 inti->io.subchannel_id = s390int->parm >> 16;
1813 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1814 inti->io.io_int_parm = s390int->parm64 >> 32;
1815 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1816 break;
1817 default:
1818 kfree(inti);
1819 return -EINVAL;
1820 }
1821 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1822 2);
1823
David Hildenbrand428d53b2015-01-16 12:58:09 +01001824 rc = __inject_vm(kvm, inti);
1825 if (rc)
1826 kfree(inti);
1827 return rc;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001828}
1829
David Hildenbrand15462e32015-02-04 15:59:11 +01001830int kvm_s390_reinject_io_int(struct kvm *kvm,
Cornelia Huck2f32d4e2014-01-08 18:07:54 +01001831 struct kvm_s390_interrupt_info *inti)
1832{
David Hildenbrand15462e32015-02-04 15:59:11 +01001833 return __inject_vm(kvm, inti);
Cornelia Huck2f32d4e2014-01-08 18:07:54 +01001834}
1835
Jens Freimann383d0b02014-07-29 15:11:49 +02001836int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1837 struct kvm_s390_irq *irq)
Carsten Otteba5c1e92008-03-25 18:47:26 +01001838{
Jens Freimann383d0b02014-07-29 15:11:49 +02001839 irq->type = s390int->type;
1840 switch (irq->type) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001841 case KVM_S390_PROGRAM_INT:
Jens Freimann0146a7b2014-07-28 15:37:58 +02001842 if (s390int->parm & 0xffff0000)
Jens Freimann383d0b02014-07-29 15:11:49 +02001843 return -EINVAL;
1844 irq->u.pgm.code = s390int->parm;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001845 break;
Christian Borntraegerb7e6e4d2009-01-22 10:29:08 +01001846 case KVM_S390_SIGP_SET_PREFIX:
Jens Freimann383d0b02014-07-29 15:11:49 +02001847 irq->u.prefix.address = s390int->parm;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001848 break;
David Hildenbrand28225452014-10-15 16:48:16 +02001849 case KVM_S390_SIGP_STOP:
1850 irq->u.stop.flags = s390int->parm;
1851 break;
Jason J. Herne82a12732012-10-02 16:25:36 +02001852 case KVM_S390_INT_EXTERNAL_CALL:
Jens Freimann94d1f562015-01-15 14:40:34 +01001853 if (s390int->parm & 0xffff0000)
Jens Freimann383d0b02014-07-29 15:11:49 +02001854 return -EINVAL;
1855 irq->u.extcall.code = s390int->parm;
Jason J. Herne82a12732012-10-02 16:25:36 +02001856 break;
1857 case KVM_S390_INT_EMERGENCY:
Jens Freimann94d1f562015-01-15 14:40:34 +01001858 if (s390int->parm & 0xffff0000)
Jens Freimann383d0b02014-07-29 15:11:49 +02001859 return -EINVAL;
1860 irq->u.emerg.code = s390int->parm;
Jason J. Herne82a12732012-10-02 16:25:36 +02001861 break;
Cornelia Huck48a3e952012-12-20 15:32:09 +01001862 case KVM_S390_MCHK:
Jens Freimann383d0b02014-07-29 15:11:49 +02001863 irq->u.mchk.mcic = s390int->parm64;
1864 break;
1865 }
1866 return 0;
1867}
1868
David Hildenbrand6cddd432014-10-15 16:48:53 +02001869int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1870{
1871 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1872
1873 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1874}
1875
1876void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1877{
1878 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1879
1880 spin_lock(&li->lock);
1881 li->irq.stop.flags = 0;
1882 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1883 spin_unlock(&li->lock);
1884}
1885
Jens Freimann79e87a12015-03-19 15:12:12 +01001886static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann383d0b02014-07-29 15:11:49 +02001887{
Jens Freimann383d0b02014-07-29 15:11:49 +02001888 int rc;
1889
Jens Freimann383d0b02014-07-29 15:11:49 +02001890 switch (irq->type) {
1891 case KVM_S390_PROGRAM_INT:
Jens Freimann383d0b02014-07-29 15:11:49 +02001892 rc = __inject_prog(vcpu, irq);
1893 break;
1894 case KVM_S390_SIGP_SET_PREFIX:
1895 rc = __inject_set_prefix(vcpu, irq);
1896 break;
1897 case KVM_S390_SIGP_STOP:
1898 rc = __inject_sigp_stop(vcpu, irq);
1899 break;
1900 case KVM_S390_RESTART:
1901 rc = __inject_sigp_restart(vcpu, irq);
1902 break;
1903 case KVM_S390_INT_CLOCK_COMP:
1904 rc = __inject_ckc(vcpu);
1905 break;
1906 case KVM_S390_INT_CPU_TIMER:
1907 rc = __inject_cpu_timer(vcpu);
1908 break;
1909 case KVM_S390_INT_EXTERNAL_CALL:
1910 rc = __inject_extcall(vcpu, irq);
1911 break;
1912 case KVM_S390_INT_EMERGENCY:
1913 rc = __inject_sigp_emergency(vcpu, irq);
1914 break;
1915 case KVM_S390_MCHK:
1916 rc = __inject_mchk(vcpu, irq);
Cornelia Huck48a3e952012-12-20 15:32:09 +01001917 break;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001918 case KVM_S390_INT_PFAULT_INIT:
Jens Freimann383d0b02014-07-29 15:11:49 +02001919 rc = __inject_pfault_init(vcpu, irq);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001920 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001921 case KVM_S390_INT_VIRTIO:
1922 case KVM_S390_INT_SERVICE:
Cornelia Huckd8346b72012-12-20 15:32:08 +01001923 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
Carsten Otteba5c1e92008-03-25 18:47:26 +01001924 default:
Jens Freimann0146a7b2014-07-28 15:37:58 +02001925 rc = -EINVAL;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001926 }
Jens Freimann79e87a12015-03-19 15:12:12 +01001927
1928 return rc;
1929}
1930
1931int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1932{
1933 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1934 int rc;
1935
1936 spin_lock(&li->lock);
1937 rc = do_inject_vcpu(vcpu, irq);
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001938 spin_unlock(&li->lock);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001939 if (!rc)
1940 kvm_s390_vcpu_wakeup(vcpu);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001941 return rc;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001942}
Jens Freimannc05c4182013-10-07 16:13:45 +02001943
Jens Freimann6d3da242013-07-03 15:18:35 +02001944static inline void clear_irq_list(struct list_head *_list)
Jens Freimannc05c4182013-10-07 16:13:45 +02001945{
Jens Freimann6d3da242013-07-03 15:18:35 +02001946 struct kvm_s390_interrupt_info *inti, *n;
Jens Freimannc05c4182013-10-07 16:13:45 +02001947
Jens Freimann6d3da242013-07-03 15:18:35 +02001948 list_for_each_entry_safe(inti, n, _list, list) {
Jens Freimannc05c4182013-10-07 16:13:45 +02001949 list_del(&inti->list);
1950 kfree(inti);
1951 }
Jens Freimannc05c4182013-10-07 16:13:45 +02001952}
1953
Jens Freimann94aa0332015-03-16 12:17:13 +01001954static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1955 struct kvm_s390_irq *irq)
Jens Freimannc05c4182013-10-07 16:13:45 +02001956{
Jens Freimann94aa0332015-03-16 12:17:13 +01001957 irq->type = inti->type;
Jens Freimannc05c4182013-10-07 16:13:45 +02001958 switch (inti->type) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001959 case KVM_S390_INT_PFAULT_INIT:
1960 case KVM_S390_INT_PFAULT_DONE:
Jens Freimannc05c4182013-10-07 16:13:45 +02001961 case KVM_S390_INT_VIRTIO:
Jens Freimann94aa0332015-03-16 12:17:13 +01001962 irq->u.ext = inti->ext;
Jens Freimannc05c4182013-10-07 16:13:45 +02001963 break;
1964 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
Jens Freimann94aa0332015-03-16 12:17:13 +01001965 irq->u.io = inti->io;
Jens Freimannc05c4182013-10-07 16:13:45 +02001966 break;
Jens Freimannc05c4182013-10-07 16:13:45 +02001967 }
Jens Freimannc05c4182013-10-07 16:13:45 +02001968}
1969
Jens Freimann6d3da242013-07-03 15:18:35 +02001970void kvm_s390_clear_float_irqs(struct kvm *kvm)
1971{
1972 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1973 int i;
1974
1975 spin_lock(&fi->lock);
Jens Freimannf2ae45e2015-06-22 13:20:12 +02001976 fi->pending_irqs = 0;
1977 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1978 memset(&fi->mchk, 0, sizeof(fi->mchk));
Jens Freimann6d3da242013-07-03 15:18:35 +02001979 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1980 clear_irq_list(&fi->lists[i]);
1981 for (i = 0; i < FIRQ_MAX_COUNT; i++)
1982 fi->counters[i] = 0;
1983 spin_unlock(&fi->lock);
Michael Mueller24160af2017-06-14 13:21:32 +02001984 kvm_s390_gisa_clear(kvm);
Jens Freimann6d3da242013-07-03 15:18:35 +02001985};
1986
Jens Freimann94aa0332015-03-16 12:17:13 +01001987static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
Jens Freimannc05c4182013-10-07 16:13:45 +02001988{
1989 struct kvm_s390_interrupt_info *inti;
1990 struct kvm_s390_float_interrupt *fi;
Jens Freimann94aa0332015-03-16 12:17:13 +01001991 struct kvm_s390_irq *buf;
Jens Freimann6d3da242013-07-03 15:18:35 +02001992 struct kvm_s390_irq *irq;
Jens Freimann94aa0332015-03-16 12:17:13 +01001993 int max_irqs;
Jens Freimannc05c4182013-10-07 16:13:45 +02001994 int ret = 0;
1995 int n = 0;
Jens Freimann6d3da242013-07-03 15:18:35 +02001996 int i;
Jens Freimannc05c4182013-10-07 16:13:45 +02001997
Jens Freimann94aa0332015-03-16 12:17:13 +01001998 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1999 return -EINVAL;
2000
2001 /*
2002 * We are already using -ENOMEM to signal
2003 * userspace it may retry with a bigger buffer,
2004 * so we need to use something else for this case
2005 */
2006 buf = vzalloc(len);
2007 if (!buf)
2008 return -ENOBUFS;
2009
2010 max_irqs = len / sizeof(struct kvm_s390_irq);
2011
Michael Mueller24160af2017-06-14 13:21:32 +02002012 if (kvm->arch.gisa &&
2013 kvm_s390_gisa_get_ipm(kvm->arch.gisa)) {
2014 for (i = 0; i <= MAX_ISC; i++) {
2015 if (n == max_irqs) {
2016 /* signal userspace to try again */
2017 ret = -ENOMEM;
2018 goto out_nolock;
2019 }
2020 if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) {
2021 irq = (struct kvm_s390_irq *) &buf[n];
2022 irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
2023 irq->u.io.io_int_word = isc_to_int_word(i);
2024 n++;
2025 }
2026 }
2027 }
Jens Freimannc05c4182013-10-07 16:13:45 +02002028 fi = &kvm->arch.float_int;
2029 spin_lock(&fi->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002030 for (i = 0; i < FIRQ_LIST_COUNT; i++) {
2031 list_for_each_entry(inti, &fi->lists[i], list) {
2032 if (n == max_irqs) {
2033 /* signal userspace to try again */
2034 ret = -ENOMEM;
2035 goto out;
2036 }
2037 inti_to_irq(inti, &buf[n]);
2038 n++;
2039 }
2040 }
2041 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
Jens Freimann94aa0332015-03-16 12:17:13 +01002042 if (n == max_irqs) {
Jens Freimannc05c4182013-10-07 16:13:45 +02002043 /* signal userspace to try again */
2044 ret = -ENOMEM;
Jens Freimann6d3da242013-07-03 15:18:35 +02002045 goto out;
Jens Freimannc05c4182013-10-07 16:13:45 +02002046 }
Jens Freimann6d3da242013-07-03 15:18:35 +02002047 irq = (struct kvm_s390_irq *) &buf[n];
2048 irq->type = KVM_S390_INT_SERVICE;
2049 irq->u.ext = fi->srv_signal;
Jens Freimannc05c4182013-10-07 16:13:45 +02002050 n++;
2051 }
Jens Freimann6d3da242013-07-03 15:18:35 +02002052 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
2053 if (n == max_irqs) {
2054 /* signal userspace to try again */
2055 ret = -ENOMEM;
2056 goto out;
2057 }
2058 irq = (struct kvm_s390_irq *) &buf[n];
2059 irq->type = KVM_S390_MCHK;
2060 irq->u.mchk = fi->mchk;
2061 n++;
2062}
2063
2064out:
Jens Freimannc05c4182013-10-07 16:13:45 +02002065 spin_unlock(&fi->lock);
Michael Mueller24160af2017-06-14 13:21:32 +02002066out_nolock:
Jens Freimann94aa0332015-03-16 12:17:13 +01002067 if (!ret && n > 0) {
2068 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2069 ret = -EFAULT;
2070 }
2071 vfree(buf);
Jens Freimannc05c4182013-10-07 16:13:45 +02002072
2073 return ret < 0 ? ret : n;
2074}
2075
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08002076static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
2077{
2078 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2079 struct kvm_s390_ais_all ais;
2080
2081 if (attr->attr < sizeof(ais))
2082 return -EINVAL;
2083
2084 if (!test_kvm_facility(kvm, 72))
2085 return -ENOTSUPP;
2086
2087 mutex_lock(&fi->ais_lock);
2088 ais.simm = fi->simm;
2089 ais.nimm = fi->nimm;
2090 mutex_unlock(&fi->ais_lock);
2091
2092 if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
2093 return -EFAULT;
2094
2095 return 0;
2096}
2097
Jens Freimannc05c4182013-10-07 16:13:45 +02002098static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2099{
2100 int r;
2101
2102 switch (attr->group) {
2103 case KVM_DEV_FLIC_GET_ALL_IRQS:
Jens Freimann94aa0332015-03-16 12:17:13 +01002104 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
Jens Freimannc05c4182013-10-07 16:13:45 +02002105 attr->attr);
2106 break;
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08002107 case KVM_DEV_FLIC_AISM_ALL:
2108 r = flic_ais_mode_get_all(dev->kvm, attr);
2109 break;
Jens Freimannc05c4182013-10-07 16:13:45 +02002110 default:
2111 r = -EINVAL;
2112 }
2113
2114 return r;
2115}
2116
2117static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
2118 u64 addr)
2119{
2120 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2121 void *target = NULL;
2122 void __user *source;
2123 u64 size;
2124
2125 if (get_user(inti->type, (u64 __user *)addr))
2126 return -EFAULT;
2127
2128 switch (inti->type) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002129 case KVM_S390_INT_PFAULT_INIT:
2130 case KVM_S390_INT_PFAULT_DONE:
Jens Freimannc05c4182013-10-07 16:13:45 +02002131 case KVM_S390_INT_VIRTIO:
2132 case KVM_S390_INT_SERVICE:
2133 target = (void *) &inti->ext;
2134 source = &uptr->u.ext;
2135 size = sizeof(inti->ext);
2136 break;
2137 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2138 target = (void *) &inti->io;
2139 source = &uptr->u.io;
2140 size = sizeof(inti->io);
2141 break;
2142 case KVM_S390_MCHK:
2143 target = (void *) &inti->mchk;
2144 source = &uptr->u.mchk;
2145 size = sizeof(inti->mchk);
2146 break;
2147 default:
2148 return -EINVAL;
2149 }
2150
2151 if (copy_from_user(target, source, size))
2152 return -EFAULT;
2153
2154 return 0;
2155}
2156
2157static int enqueue_floating_irq(struct kvm_device *dev,
2158 struct kvm_device_attr *attr)
2159{
2160 struct kvm_s390_interrupt_info *inti = NULL;
2161 int r = 0;
2162 int len = attr->attr;
2163
2164 if (len % sizeof(struct kvm_s390_irq) != 0)
2165 return -EINVAL;
2166 else if (len > KVM_S390_FLIC_MAX_BUFFER)
2167 return -EINVAL;
2168
2169 while (len >= sizeof(struct kvm_s390_irq)) {
2170 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2171 if (!inti)
2172 return -ENOMEM;
2173
2174 r = copy_irq_from_user(inti, attr->addr);
2175 if (r) {
2176 kfree(inti);
2177 return r;
2178 }
Jens Freimanna91b8eb2014-01-30 08:40:23 +01002179 r = __inject_vm(dev->kvm, inti);
2180 if (r) {
2181 kfree(inti);
2182 return r;
2183 }
Jens Freimannc05c4182013-10-07 16:13:45 +02002184 len -= sizeof(struct kvm_s390_irq);
2185 attr->addr += sizeof(struct kvm_s390_irq);
2186 }
2187
2188 return r;
2189}
2190
Cornelia Huck841b91c2013-07-15 13:36:01 +02002191static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2192{
2193 if (id >= MAX_S390_IO_ADAPTERS)
2194 return NULL;
2195 return kvm->arch.adapters[id];
2196}
2197
2198static int register_io_adapter(struct kvm_device *dev,
2199 struct kvm_device_attr *attr)
2200{
2201 struct s390_io_adapter *adapter;
2202 struct kvm_s390_io_adapter adapter_info;
2203
2204 if (copy_from_user(&adapter_info,
2205 (void __user *)attr->addr, sizeof(adapter_info)))
2206 return -EFAULT;
2207
2208 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
2209 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
2210 return -EINVAL;
2211
2212 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2213 if (!adapter)
2214 return -ENOMEM;
2215
2216 INIT_LIST_HEAD(&adapter->maps);
2217 init_rwsem(&adapter->maps_lock);
2218 atomic_set(&adapter->nr_maps, 0);
2219 adapter->id = adapter_info.id;
2220 adapter->isc = adapter_info.isc;
2221 adapter->maskable = adapter_info.maskable;
2222 adapter->masked = false;
2223 adapter->swap = adapter_info.swap;
Fei Li08fab502017-01-19 17:02:26 +01002224 adapter->suppressible = (adapter_info.flags) &
2225 KVM_S390_ADAPTER_SUPPRESSIBLE;
Cornelia Huck841b91c2013-07-15 13:36:01 +02002226 dev->kvm->arch.adapters[adapter->id] = adapter;
2227
2228 return 0;
2229}
2230
2231int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2232{
2233 int ret;
2234 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2235
2236 if (!adapter || !adapter->maskable)
2237 return -EINVAL;
2238 ret = adapter->masked;
2239 adapter->masked = masked;
2240 return ret;
2241}
2242
2243static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
2244{
2245 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2246 struct s390_map_info *map;
2247 int ret;
2248
2249 if (!adapter || !addr)
2250 return -EINVAL;
2251
2252 map = kzalloc(sizeof(*map), GFP_KERNEL);
2253 if (!map) {
2254 ret = -ENOMEM;
2255 goto out;
2256 }
2257 INIT_LIST_HEAD(&map->list);
2258 map->guest_addr = addr;
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +02002259 map->addr = gmap_translate(kvm->arch.gmap, addr);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002260 if (map->addr == -EFAULT) {
2261 ret = -EFAULT;
2262 goto out;
2263 }
2264 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
2265 if (ret < 0)
2266 goto out;
2267 BUG_ON(ret != 1);
2268 down_write(&adapter->maps_lock);
2269 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
2270 list_add_tail(&map->list, &adapter->maps);
2271 ret = 0;
2272 } else {
2273 put_page(map->page);
2274 ret = -EINVAL;
2275 }
2276 up_write(&adapter->maps_lock);
2277out:
2278 if (ret)
2279 kfree(map);
2280 return ret;
2281}
2282
2283static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
2284{
2285 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2286 struct s390_map_info *map, *tmp;
2287 int found = 0;
2288
2289 if (!adapter || !addr)
2290 return -EINVAL;
2291
2292 down_write(&adapter->maps_lock);
2293 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
2294 if (map->guest_addr == addr) {
2295 found = 1;
2296 atomic_dec(&adapter->nr_maps);
2297 list_del(&map->list);
2298 put_page(map->page);
2299 kfree(map);
2300 break;
2301 }
2302 }
2303 up_write(&adapter->maps_lock);
2304
2305 return found ? 0 : -EINVAL;
2306}
2307
2308void kvm_s390_destroy_adapters(struct kvm *kvm)
2309{
2310 int i;
2311 struct s390_map_info *map, *tmp;
2312
2313 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
2314 if (!kvm->arch.adapters[i])
2315 continue;
2316 list_for_each_entry_safe(map, tmp,
2317 &kvm->arch.adapters[i]->maps, list) {
2318 list_del(&map->list);
2319 put_page(map->page);
2320 kfree(map);
2321 }
2322 kfree(kvm->arch.adapters[i]);
2323 }
2324}
2325
2326static int modify_io_adapter(struct kvm_device *dev,
2327 struct kvm_device_attr *attr)
2328{
2329 struct kvm_s390_io_adapter_req req;
2330 struct s390_io_adapter *adapter;
2331 int ret;
2332
2333 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2334 return -EFAULT;
2335
2336 adapter = get_io_adapter(dev->kvm, req.id);
2337 if (!adapter)
2338 return -EINVAL;
2339 switch (req.type) {
2340 case KVM_S390_IO_ADAPTER_MASK:
2341 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2342 if (ret > 0)
2343 ret = 0;
2344 break;
2345 case KVM_S390_IO_ADAPTER_MAP:
2346 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2347 break;
2348 case KVM_S390_IO_ADAPTER_UNMAP:
2349 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2350 break;
2351 default:
2352 ret = -EINVAL;
2353 }
2354
2355 return ret;
2356}
2357
Halil Pasic6d28f782016-01-25 19:10:40 +01002358static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2359
2360{
2361 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2362 u32 schid;
2363
2364 if (attr->flags)
2365 return -EINVAL;
2366 if (attr->attr != sizeof(schid))
2367 return -EINVAL;
2368 if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2369 return -EFAULT;
Michael Mueller4dd6f172017-07-06 14:22:20 +02002370 if (!schid)
2371 return -EINVAL;
Halil Pasic6d28f782016-01-25 19:10:40 +01002372 kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2373 /*
2374 * If userspace is conforming to the architecture, we can have at most
2375 * one pending I/O interrupt per subchannel, so this is effectively a
2376 * clear all.
2377 */
2378 return 0;
2379}
2380
Fei Li51978392017-02-17 17:06:26 +08002381static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2382{
2383 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2384 struct kvm_s390_ais_req req;
2385 int ret = 0;
2386
Christian Borntraeger1ba15b22017-05-31 10:18:55 +02002387 if (!test_kvm_facility(kvm, 72))
Fei Li51978392017-02-17 17:06:26 +08002388 return -ENOTSUPP;
2389
2390 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2391 return -EFAULT;
2392
2393 if (req.isc > MAX_ISC)
2394 return -EINVAL;
2395
2396 trace_kvm_s390_modify_ais_mode(req.isc,
2397 (fi->simm & AIS_MODE_MASK(req.isc)) ?
2398 (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2399 2 : KVM_S390_AIS_MODE_SINGLE :
2400 KVM_S390_AIS_MODE_ALL, req.mode);
2401
2402 mutex_lock(&fi->ais_lock);
2403 switch (req.mode) {
2404 case KVM_S390_AIS_MODE_ALL:
2405 fi->simm &= ~AIS_MODE_MASK(req.isc);
2406 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2407 break;
2408 case KVM_S390_AIS_MODE_SINGLE:
2409 fi->simm |= AIS_MODE_MASK(req.isc);
2410 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2411 break;
2412 default:
2413 ret = -EINVAL;
2414 }
2415 mutex_unlock(&fi->ais_lock);
2416
2417 return ret;
2418}
2419
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002420static int kvm_s390_inject_airq(struct kvm *kvm,
2421 struct s390_io_adapter *adapter)
2422{
2423 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2424 struct kvm_s390_interrupt s390int = {
2425 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2426 .parm = 0,
Michael Mueller2496c8e2017-08-31 11:10:28 +02002427 .parm64 = isc_to_int_word(adapter->isc),
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002428 };
2429 int ret = 0;
2430
Christian Borntraeger1ba15b22017-05-31 10:18:55 +02002431 if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002432 return kvm_s390_inject_vm(kvm, &s390int);
2433
2434 mutex_lock(&fi->ais_lock);
2435 if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2436 trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2437 goto out;
2438 }
2439
2440 ret = kvm_s390_inject_vm(kvm, &s390int);
2441 if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2442 fi->nimm |= AIS_MODE_MASK(adapter->isc);
2443 trace_kvm_s390_modify_ais_mode(adapter->isc,
2444 KVM_S390_AIS_MODE_SINGLE, 2);
2445 }
2446out:
2447 mutex_unlock(&fi->ais_lock);
2448 return ret;
2449}
2450
2451static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2452{
2453 unsigned int id = attr->attr;
2454 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2455
2456 if (!adapter)
2457 return -EINVAL;
2458
2459 return kvm_s390_inject_airq(kvm, adapter);
2460}
2461
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08002462static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2463{
2464 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2465 struct kvm_s390_ais_all ais;
2466
2467 if (!test_kvm_facility(kvm, 72))
2468 return -ENOTSUPP;
2469
2470 if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2471 return -EFAULT;
2472
2473 mutex_lock(&fi->ais_lock);
2474 fi->simm = ais.simm;
2475 fi->nimm = ais.nimm;
2476 mutex_unlock(&fi->ais_lock);
2477
2478 return 0;
2479}
2480
Jens Freimannc05c4182013-10-07 16:13:45 +02002481static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2482{
2483 int r = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002484 unsigned int i;
2485 struct kvm_vcpu *vcpu;
Jens Freimannc05c4182013-10-07 16:13:45 +02002486
2487 switch (attr->group) {
2488 case KVM_DEV_FLIC_ENQUEUE:
2489 r = enqueue_floating_irq(dev, attr);
2490 break;
2491 case KVM_DEV_FLIC_CLEAR_IRQS:
Christian Borntraeger67335e62014-03-25 17:09:08 +01002492 kvm_s390_clear_float_irqs(dev->kvm);
Jens Freimannc05c4182013-10-07 16:13:45 +02002493 break;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002494 case KVM_DEV_FLIC_APF_ENABLE:
2495 dev->kvm->arch.gmap->pfault_enabled = 1;
2496 break;
2497 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2498 dev->kvm->arch.gmap->pfault_enabled = 0;
2499 /*
2500 * Make sure no async faults are in transition when
2501 * clearing the queues. So we don't need to worry
2502 * about late coming workers.
2503 */
2504 synchronize_srcu(&dev->kvm->srcu);
2505 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2506 kvm_clear_async_pf_completion_queue(vcpu);
2507 break;
Cornelia Huck841b91c2013-07-15 13:36:01 +02002508 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2509 r = register_io_adapter(dev, attr);
2510 break;
2511 case KVM_DEV_FLIC_ADAPTER_MODIFY:
2512 r = modify_io_adapter(dev, attr);
2513 break;
Halil Pasic6d28f782016-01-25 19:10:40 +01002514 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2515 r = clear_io_irq(dev->kvm, attr);
2516 break;
Fei Li51978392017-02-17 17:06:26 +08002517 case KVM_DEV_FLIC_AISM:
2518 r = modify_ais_mode(dev->kvm, attr);
2519 break;
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002520 case KVM_DEV_FLIC_AIRQ_INJECT:
2521 r = flic_inject_airq(dev->kvm, attr);
2522 break;
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08002523 case KVM_DEV_FLIC_AISM_ALL:
2524 r = flic_ais_mode_set_all(dev->kvm, attr);
2525 break;
Jens Freimannc05c4182013-10-07 16:13:45 +02002526 default:
2527 r = -EINVAL;
2528 }
2529
2530 return r;
2531}
2532
Halil Pasic4f129852016-02-25 12:44:17 +01002533static int flic_has_attr(struct kvm_device *dev,
2534 struct kvm_device_attr *attr)
2535{
2536 switch (attr->group) {
2537 case KVM_DEV_FLIC_GET_ALL_IRQS:
2538 case KVM_DEV_FLIC_ENQUEUE:
2539 case KVM_DEV_FLIC_CLEAR_IRQS:
2540 case KVM_DEV_FLIC_APF_ENABLE:
2541 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2542 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2543 case KVM_DEV_FLIC_ADAPTER_MODIFY:
Halil Pasic6d28f782016-01-25 19:10:40 +01002544 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
Fei Li51978392017-02-17 17:06:26 +08002545 case KVM_DEV_FLIC_AISM:
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002546 case KVM_DEV_FLIC_AIRQ_INJECT:
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08002547 case KVM_DEV_FLIC_AISM_ALL:
Halil Pasic4f129852016-02-25 12:44:17 +01002548 return 0;
2549 }
2550 return -ENXIO;
2551}
2552
Jens Freimannc05c4182013-10-07 16:13:45 +02002553static int flic_create(struct kvm_device *dev, u32 type)
2554{
2555 if (!dev)
2556 return -EINVAL;
2557 if (dev->kvm->arch.flic)
2558 return -EINVAL;
2559 dev->kvm->arch.flic = dev;
2560 return 0;
2561}
2562
2563static void flic_destroy(struct kvm_device *dev)
2564{
2565 dev->kvm->arch.flic = NULL;
2566 kfree(dev);
2567}
2568
2569/* s390 floating irq controller (flic) */
2570struct kvm_device_ops kvm_flic_ops = {
2571 .name = "kvm-flic",
2572 .get_attr = flic_get_attr,
2573 .set_attr = flic_set_attr,
Halil Pasic4f129852016-02-25 12:44:17 +01002574 .has_attr = flic_has_attr,
Jens Freimannc05c4182013-10-07 16:13:45 +02002575 .create = flic_create,
2576 .destroy = flic_destroy,
2577};
Cornelia Huck84223592013-07-15 13:36:01 +02002578
2579static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2580{
2581 unsigned long bit;
2582
2583 bit = bit_nr + (addr % PAGE_SIZE) * 8;
2584
2585 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2586}
2587
2588static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2589 u64 addr)
2590{
2591 struct s390_map_info *map;
2592
2593 if (!adapter)
2594 return NULL;
2595
2596 list_for_each_entry(map, &adapter->maps, list) {
2597 if (map->guest_addr == addr)
2598 return map;
2599 }
2600 return NULL;
2601}
2602
2603static int adapter_indicators_set(struct kvm *kvm,
2604 struct s390_io_adapter *adapter,
2605 struct kvm_s390_adapter_int *adapter_int)
2606{
2607 unsigned long bit;
2608 int summary_set, idx;
2609 struct s390_map_info *info;
2610 void *map;
2611
2612 info = get_map_info(adapter, adapter_int->ind_addr);
2613 if (!info)
2614 return -1;
2615 map = page_address(info->page);
2616 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2617 set_bit(bit, map);
2618 idx = srcu_read_lock(&kvm->srcu);
2619 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2620 set_page_dirty_lock(info->page);
2621 info = get_map_info(adapter, adapter_int->summary_addr);
2622 if (!info) {
2623 srcu_read_unlock(&kvm->srcu, idx);
2624 return -1;
2625 }
2626 map = page_address(info->page);
2627 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2628 adapter->swap);
2629 summary_set = test_and_set_bit(bit, map);
2630 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2631 set_page_dirty_lock(info->page);
2632 srcu_read_unlock(&kvm->srcu, idx);
2633 return summary_set ? 0 : 1;
2634}
2635
2636/*
2637 * < 0 - not injected due to error
2638 * = 0 - coalesced, summary indicator already active
2639 * > 0 - injected interrupt
2640 */
2641static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2642 struct kvm *kvm, int irq_source_id, int level,
2643 bool line_status)
2644{
2645 int ret;
2646 struct s390_io_adapter *adapter;
2647
2648 /* We're only interested in the 0->1 transition. */
2649 if (!level)
2650 return 0;
2651 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2652 if (!adapter)
2653 return -1;
2654 down_read(&adapter->maps_lock);
2655 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2656 up_read(&adapter->maps_lock);
2657 if ((ret > 0) && !adapter->masked) {
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002658 ret = kvm_s390_inject_airq(kvm, adapter);
Cornelia Huck84223592013-07-15 13:36:01 +02002659 if (ret == 0)
2660 ret = 1;
2661 }
2662 return ret;
2663}
2664
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002665/*
2666 * Inject the machine check to the guest.
2667 */
2668void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2669 struct mcck_volatile_info *mcck_info)
2670{
2671 struct kvm_s390_interrupt_info inti;
2672 struct kvm_s390_irq irq;
2673 struct kvm_s390_mchk_info *mchk;
2674 union mci mci;
2675 __u64 cr14 = 0; /* upper bits are not used */
David Hildenbrand3dbf0202017-08-30 18:06:01 +02002676 int rc;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002677
2678 mci.val = mcck_info->mcic;
2679 if (mci.sr)
Martin Schwidefskycc654502017-10-12 13:24:46 +02002680 cr14 |= CR14_RECOVERY_SUBMASK;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002681 if (mci.dg)
Martin Schwidefskycc654502017-10-12 13:24:46 +02002682 cr14 |= CR14_DEGRADATION_SUBMASK;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002683 if (mci.w)
Martin Schwidefskycc654502017-10-12 13:24:46 +02002684 cr14 |= CR14_WARNING_SUBMASK;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002685
2686 mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2687 mchk->cr14 = cr14;
2688 mchk->mcic = mcck_info->mcic;
2689 mchk->ext_damage_code = mcck_info->ext_damage_code;
2690 mchk->failing_storage_address = mcck_info->failing_storage_address;
2691 if (mci.ck) {
2692 /* Inject the floating machine check */
2693 inti.type = KVM_S390_MCHK;
David Hildenbrand3dbf0202017-08-30 18:06:01 +02002694 rc = __inject_vm(vcpu->kvm, &inti);
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002695 } else {
2696 /* Inject the machine check to specified vcpu */
2697 irq.type = KVM_S390_MCHK;
David Hildenbrand3dbf0202017-08-30 18:06:01 +02002698 rc = kvm_s390_inject_vcpu(vcpu, &irq);
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002699 }
David Hildenbrand3dbf0202017-08-30 18:06:01 +02002700 WARN_ON_ONCE(rc);
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002701}
2702
Radim Krčmářc63cf532016-07-12 22:09:26 +02002703int kvm_set_routing_entry(struct kvm *kvm,
2704 struct kvm_kernel_irq_routing_entry *e,
Cornelia Huck84223592013-07-15 13:36:01 +02002705 const struct kvm_irq_routing_entry *ue)
2706{
2707 int ret;
2708
2709 switch (ue->type) {
2710 case KVM_IRQ_ROUTING_S390_ADAPTER:
2711 e->set = set_adapter_int;
2712 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2713 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2714 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2715 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2716 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2717 ret = 0;
2718 break;
2719 default:
2720 ret = -EINVAL;
2721 }
2722
2723 return ret;
2724}
2725
2726int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2727 int irq_source_id, int level, bool line_status)
2728{
2729 return -EINVAL;
2730}
Jens Freimann816c7662014-11-24 17:13:46 +01002731
2732int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2733{
2734 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2735 struct kvm_s390_irq *buf;
2736 int r = 0;
2737 int n;
2738
2739 buf = vmalloc(len);
2740 if (!buf)
2741 return -ENOMEM;
2742
2743 if (copy_from_user((void *) buf, irqstate, len)) {
2744 r = -EFAULT;
2745 goto out_free;
2746 }
2747
2748 /*
2749 * Don't allow setting the interrupt state
2750 * when there are already interrupts pending
2751 */
2752 spin_lock(&li->lock);
2753 if (li->pending_irqs) {
2754 r = -EBUSY;
2755 goto out_unlock;
2756 }
2757
2758 for (n = 0; n < len / sizeof(*buf); n++) {
2759 r = do_inject_vcpu(vcpu, &buf[n]);
2760 if (r)
2761 break;
2762 }
2763
2764out_unlock:
2765 spin_unlock(&li->lock);
2766out_free:
2767 vfree(buf);
2768
2769 return r;
2770}
2771
2772static void store_local_irq(struct kvm_s390_local_interrupt *li,
2773 struct kvm_s390_irq *irq,
2774 unsigned long irq_type)
2775{
2776 switch (irq_type) {
2777 case IRQ_PEND_MCHK_EX:
2778 case IRQ_PEND_MCHK_REP:
2779 irq->type = KVM_S390_MCHK;
2780 irq->u.mchk = li->irq.mchk;
2781 break;
2782 case IRQ_PEND_PROG:
2783 irq->type = KVM_S390_PROGRAM_INT;
2784 irq->u.pgm = li->irq.pgm;
2785 break;
2786 case IRQ_PEND_PFAULT_INIT:
2787 irq->type = KVM_S390_INT_PFAULT_INIT;
2788 irq->u.ext = li->irq.ext;
2789 break;
2790 case IRQ_PEND_EXT_EXTERNAL:
2791 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2792 irq->u.extcall = li->irq.extcall;
2793 break;
2794 case IRQ_PEND_EXT_CLOCK_COMP:
2795 irq->type = KVM_S390_INT_CLOCK_COMP;
2796 break;
2797 case IRQ_PEND_EXT_CPU_TIMER:
2798 irq->type = KVM_S390_INT_CPU_TIMER;
2799 break;
2800 case IRQ_PEND_SIGP_STOP:
2801 irq->type = KVM_S390_SIGP_STOP;
2802 irq->u.stop = li->irq.stop;
2803 break;
2804 case IRQ_PEND_RESTART:
2805 irq->type = KVM_S390_RESTART;
2806 break;
2807 case IRQ_PEND_SET_PREFIX:
2808 irq->type = KVM_S390_SIGP_SET_PREFIX;
2809 irq->u.prefix = li->irq.prefix;
2810 break;
2811 }
2812}
2813
2814int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2815{
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02002816 int scn;
Jens Freimann816c7662014-11-24 17:13:46 +01002817 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2818 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2819 unsigned long pending_irqs;
2820 struct kvm_s390_irq irq;
2821 unsigned long irq_type;
2822 int cpuaddr;
2823 int n = 0;
2824
2825 spin_lock(&li->lock);
2826 pending_irqs = li->pending_irqs;
2827 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2828 sizeof(sigp_emerg_pending));
2829 spin_unlock(&li->lock);
2830
2831 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2832 memset(&irq, 0, sizeof(irq));
2833 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2834 continue;
2835 if (n + sizeof(irq) > len)
2836 return -ENOBUFS;
2837 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2838 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2839 return -EFAULT;
2840 n += sizeof(irq);
2841 }
2842
2843 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2844 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2845 memset(&irq, 0, sizeof(irq));
2846 if (n + sizeof(irq) > len)
2847 return -ENOBUFS;
2848 irq.type = KVM_S390_INT_EMERGENCY;
2849 irq.u.emerg.code = cpuaddr;
2850 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2851 return -EFAULT;
2852 n += sizeof(irq);
2853 }
2854 }
2855
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02002856 if (sca_ext_call_pending(vcpu, &scn)) {
Jens Freimann816c7662014-11-24 17:13:46 +01002857 if (n + sizeof(irq) > len)
2858 return -ENOBUFS;
2859 memset(&irq, 0, sizeof(irq));
2860 irq.type = KVM_S390_INT_EXTERNAL_CALL;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02002861 irq.u.extcall.code = scn;
Jens Freimann816c7662014-11-24 17:13:46 +01002862 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2863 return -EFAULT;
2864 n += sizeof(irq);
2865 }
2866
2867 return n;
2868}
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002869
2870void kvm_s390_gisa_clear(struct kvm *kvm)
2871{
2872 if (kvm->arch.gisa) {
2873 memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa));
2874 kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa;
2875 VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa);
2876 }
2877}
2878
2879void kvm_s390_gisa_init(struct kvm *kvm)
2880{
Michael Mueller4b9f9522017-06-23 13:51:25 +02002881 if (css_general_characteristics.aiv) {
Michael Muellerf180bfd2017-06-23 14:46:21 +02002882 kvm->arch.gisa = &kvm->arch.sie_page2->gisa;
2883 VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa);
2884 kvm_s390_gisa_clear(kvm);
2885 }
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002886}
2887
2888void kvm_s390_gisa_destroy(struct kvm *kvm)
2889{
2890 if (!kvm->arch.gisa)
2891 return;
2892 kvm->arch.gisa = NULL;
2893}