blob: f293c956e6dbb860bd8c6ded0418bcbc1bc9b807 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Carsten Otteba5c1e92008-03-25 18:47:26 +01002/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * handling kvm guest interrupts
Carsten Otteba5c1e92008-03-25 18:47:26 +01004 *
Thomas Huth33b412a2015-02-11 10:38:46 +01005 * Copyright IBM Corp. 2008, 2015
Carsten Otteba5c1e92008-03-25 18:47:26 +01006 *
Carsten Otteba5c1e92008-03-25 18:47:26 +01007 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 */
9
Christian Borntraegerca872302009-05-12 17:21:49 +020010#include <linux/interrupt.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010011#include <linux/kvm_host.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010012#include <linux/hrtimer.h>
Cornelia Huck84223592013-07-15 13:36:01 +020013#include <linux/mmu_context.h>
Christian Borntraeger3cd61292008-07-25 15:51:54 +020014#include <linux/signal.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Jens Freimann383d0b02014-07-29 15:11:49 +020016#include <linux/bitmap.h>
Jens Freimann94aa0332015-03-16 12:17:13 +010017#include <linux/vmalloc.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010018#include <asm/asm-offsets.h>
Thomas Huth33b412a2015-02-11 10:38:46 +010019#include <asm/dis.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080020#include <linux/uaccess.h>
David Hildenbrandea5f4962014-10-14 15:29:30 +020021#include <asm/sclp.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020022#include <asm/isc.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010023#include <asm/gmap.h>
David Hildenbrand0319dae2016-08-03 11:18:57 +020024#include <asm/switch_to.h>
David Hildenbrandff5dc142015-10-14 16:57:56 +020025#include <asm/nmi.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include "kvm-s390.h"
27#include "gaccess.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020028#include "trace-s390.h"
Carsten Otteba5c1e92008-03-25 18:47:26 +010029
Jens Freimann44c6ca32014-04-16 13:57:18 +020030#define PFAULT_INIT 0x0600
Jens Freimann60f90a12014-11-10 17:20:07 +010031#define PFAULT_DONE 0x0680
32#define VIRTIO_PARAM 0x0d00
Cornelia Huckd8346b72012-12-20 15:32:08 +010033
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020034/* handle external calls via sigp interpretation facility */
35static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
36{
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020037 int c, scn;
38
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +010039 if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
David Hildenbrand2c1bb2b2015-09-23 09:45:50 +020040 return 0;
41
David Hildenbranda6940672016-08-08 22:39:32 +020042 BUG_ON(!kvm_s390_use_sca_entries());
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +020043 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020044 if (vcpu->kvm->arch.use_esca) {
45 struct esca_block *sca = vcpu->kvm->arch.sca;
46 union esca_sigp_ctrl sigp_ctrl =
47 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
48
49 c = sigp_ctrl.c;
50 scn = sigp_ctrl.scn;
51 } else {
52 struct bsca_block *sca = vcpu->kvm->arch.sca;
53 union bsca_sigp_ctrl sigp_ctrl =
54 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
55
56 c = sigp_ctrl.c;
57 scn = sigp_ctrl.scn;
58 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +020059 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020060
61 if (src_id)
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020062 *src_id = scn;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020063
David Hildenbrand2c1bb2b2015-09-23 09:45:50 +020064 return c;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020065}
66
67static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
68{
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +020069 int expect, rc;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +020070
David Hildenbranda6940672016-08-08 22:39:32 +020071 BUG_ON(!kvm_s390_use_sca_entries());
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +020072 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020073 if (vcpu->kvm->arch.use_esca) {
74 struct esca_block *sca = vcpu->kvm->arch.sca;
75 union esca_sigp_ctrl *sigp_ctrl =
76 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
77 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +020078
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +020079 new_val.scn = src_id;
80 new_val.c = 1;
81 old_val.c = 0;
82
83 expect = old_val.value;
84 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
85 } else {
86 struct bsca_block *sca = vcpu->kvm->arch.sca;
87 union bsca_sigp_ctrl *sigp_ctrl =
88 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
89 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
90
91 new_val.scn = src_id;
92 new_val.c = 1;
93 old_val.c = 0;
94
95 expect = old_val.value;
96 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
97 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +020098 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +020099
100 if (rc != expect) {
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +0200101 /* another external call is pending */
102 return -EBUSY;
103 }
David Hildenbrandef8f4f42018-01-23 18:05:29 +0100104 kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +0200105 return 0;
106}
107
108static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
109{
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +0200110 int rc, expect;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +0200111
David Hildenbranda6940672016-08-08 22:39:32 +0200112 if (!kvm_s390_use_sca_entries())
113 return;
David Hildenbrand9daecfc2018-01-23 18:05:30 +0100114 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +0200115 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +0200116 if (vcpu->kvm->arch.use_esca) {
117 struct esca_block *sca = vcpu->kvm->arch.sca;
118 union esca_sigp_ctrl *sigp_ctrl =
119 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
120 union esca_sigp_ctrl old = *sigp_ctrl;
121
122 expect = old.value;
123 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
124 } else {
125 struct bsca_block *sca = vcpu->kvm->arch.sca;
126 union bsca_sigp_ctrl *sigp_ctrl =
127 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
128 union bsca_sigp_ctrl old = *sigp_ctrl;
129
130 expect = old.value;
131 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
132 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +0200133 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +0200134 WARN_ON(rc != expect); /* cannot clear? */
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +0200135}
136
Dominik Dingel3c038e62013-10-07 17:11:48 +0200137int psw_extint_disabled(struct kvm_vcpu *vcpu)
Carsten Otteba5c1e92008-03-25 18:47:26 +0100138{
139 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
140}
141
Cornelia Huckd8346b72012-12-20 15:32:08 +0100142static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
143{
144 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
145}
146
Cornelia Huck48a3e952012-12-20 15:32:09 +0100147static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
148{
149 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
150}
151
Carsten Otteba5c1e92008-03-25 18:47:26 +0100152static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
153{
David Hildenbrandfee0e0f2015-09-28 13:32:38 +0200154 return psw_extint_disabled(vcpu) &&
155 psw_ioint_disabled(vcpu) &&
156 psw_mchk_disabled(vcpu);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100157}
158
David Hildenbrandbb78c5e2014-03-18 10:03:26 +0100159static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
160{
161 if (psw_extint_disabled(vcpu) ||
162 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
163 return 0;
David Hildenbrandf71d0dc2014-03-18 10:06:14 +0100164 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
165 /* No timer interrupts when single stepping */
166 return 0;
David Hildenbrandbb78c5e2014-03-18 10:03:26 +0100167 return 1;
168}
169
David Hildenbrandb4aec922014-12-01 15:55:42 +0100170static int ckc_irq_pending(struct kvm_vcpu *vcpu)
171{
David Hildenbrand60417fc2015-09-29 16:20:36 +0200172 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
David Hildenbrandb4aec922014-12-01 15:55:42 +0100173 return 0;
174 return ckc_interrupts_enabled(vcpu);
175}
176
177static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
178{
179 return !psw_extint_disabled(vcpu) &&
180 (vcpu->arch.sie_block->gcr[0] & 0x400ul);
181}
182
183static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
184{
David Hildenbrand4287f242016-02-15 09:40:12 +0100185 if (!cpu_timer_interrupts_enabled(vcpu))
186 return 0;
187 return kvm_s390_get_cpu_timer(vcpu) >> 63;
David Hildenbrandb4aec922014-12-01 15:55:42 +0100188}
189
Jens Freimann6d3da242013-07-03 15:18:35 +0200190static inline int is_ioirq(unsigned long irq_type)
Cornelia Huck79fd50c2013-02-07 13:20:52 +0100191{
Michael Muellerc7901a62017-06-29 18:39:27 +0200192 return ((irq_type >= IRQ_PEND_IO_ISC_7) &&
193 (irq_type <= IRQ_PEND_IO_ISC_0));
Jens Freimann6d3da242013-07-03 15:18:35 +0200194}
Cornelia Huck79fd50c2013-02-07 13:20:52 +0100195
Jens Freimann6d3da242013-07-03 15:18:35 +0200196static uint64_t isc_to_isc_bits(int isc)
197{
Cornelia Huck79fd50c2013-02-07 13:20:52 +0100198 return (0x80 >> isc) << 24;
199}
200
Jens Freimann6d3da242013-07-03 15:18:35 +0200201static inline u8 int_word_to_isc(u32 int_word)
Carsten Otteba5c1e92008-03-25 18:47:26 +0100202{
Jens Freimann6d3da242013-07-03 15:18:35 +0200203 return (int_word & 0x38000000) >> 27;
204}
205
Michael Muellerd77e6412017-06-12 12:37:57 +0200206/*
207 * To use atomic bitmap functions, we have to provide a bitmap address
208 * that is u64 aligned. However, the ipm might be u32 aligned.
209 * Therefore, we logically start the bitmap at the very beginning of the
210 * struct and fixup the bit number.
211 */
212#define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
213
214static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
215{
216 set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
217}
218
219static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa)
220{
221 return READ_ONCE(gisa->ipm);
222}
223
224static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
225{
226 clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
227}
228
229static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
230{
231 return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
232}
233
David Hildenbrand5f94c582015-09-28 14:27:51 +0200234static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
Jens Freimann6d3da242013-07-03 15:18:35 +0200235{
David Hildenbrand5f94c582015-09-28 14:27:51 +0200236 return vcpu->kvm->arch.float_int.pending_irqs |
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200237 vcpu->arch.local_int.pending_irqs |
238 kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
Jens Freimann383d0b02014-07-29 15:11:49 +0200239}
240
Michael Muelleree739f42017-07-03 15:32:50 +0200241static inline int isc_to_irq_type(unsigned long isc)
242{
Michael Muellerc7901a62017-06-29 18:39:27 +0200243 return IRQ_PEND_IO_ISC_0 - isc;
Michael Muelleree739f42017-07-03 15:32:50 +0200244}
245
246static inline int irq_type_to_isc(unsigned long irq_type)
247{
Michael Muellerc7901a62017-06-29 18:39:27 +0200248 return IRQ_PEND_IO_ISC_0 - irq_type;
Michael Muelleree739f42017-07-03 15:32:50 +0200249}
250
Jens Freimann6d3da242013-07-03 15:18:35 +0200251static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
252 unsigned long active_mask)
Jens Freimann383d0b02014-07-29 15:11:49 +0200253{
Jens Freimann6d3da242013-07-03 15:18:35 +0200254 int i;
255
256 for (i = 0; i <= MAX_ISC; i++)
257 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
Michael Muelleree739f42017-07-03 15:32:50 +0200258 active_mask &= ~(1UL << (isc_to_irq_type(i)));
Jens Freimann6d3da242013-07-03 15:18:35 +0200259
260 return active_mask;
261}
262
263static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
264{
265 unsigned long active_mask;
266
David Hildenbrand5f94c582015-09-28 14:27:51 +0200267 active_mask = pending_irqs(vcpu);
Jens Freimannffeca0a2015-04-17 10:21:04 +0200268 if (!active_mask)
269 return 0;
Jens Freimann383d0b02014-07-29 15:11:49 +0200270
271 if (psw_extint_disabled(vcpu))
272 active_mask &= ~IRQ_PEND_EXT_MASK;
Jens Freimann6d3da242013-07-03 15:18:35 +0200273 if (psw_ioint_disabled(vcpu))
274 active_mask &= ~IRQ_PEND_IO_MASK;
275 else
276 active_mask = disable_iscs(vcpu, active_mask);
Jens Freimann383d0b02014-07-29 15:11:49 +0200277 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
278 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
279 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
280 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
281 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
282 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
283 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
284 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
Jens Freimann6d3da242013-07-03 15:18:35 +0200285 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
286 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
Jens Freimann383d0b02014-07-29 15:11:49 +0200287 if (psw_mchk_disabled(vcpu))
288 active_mask &= ~IRQ_PEND_MCHK_MASK;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +0200289 /*
290 * Check both floating and local interrupt's cr14 because
291 * bit IRQ_PEND_MCHK_REP could be set in both cases.
292 */
Jens Freimann6d3da242013-07-03 15:18:35 +0200293 if (!(vcpu->arch.sie_block->gcr[14] &
QingFeng Hao4d62fcc2017-06-07 12:03:05 +0200294 (vcpu->kvm->arch.float_int.mchk.cr14 |
295 vcpu->arch.local_int.irq.mchk.cr14)))
Jens Freimann6d3da242013-07-03 15:18:35 +0200296 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
Jens Freimann383d0b02014-07-29 15:11:49 +0200297
David Hildenbrand6cddd432014-10-15 16:48:53 +0200298 /*
299 * STOP irqs will never be actively delivered. They are triggered via
300 * intercept requests and cleared when the stop intercept is performed.
301 */
302 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
303
Jens Freimann383d0b02014-07-29 15:11:49 +0200304 return active_mask;
305}
306
Carsten Otteba5c1e92008-03-25 18:47:26 +0100307static void __set_cpu_idle(struct kvm_vcpu *vcpu)
308{
David Hildenbrandef8f4f42018-01-23 18:05:29 +0100309 kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
David Hildenbranda9f6c9a2018-01-08 20:37:47 +0100310 set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100311}
312
313static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
314{
David Hildenbrand9daecfc2018-01-23 18:05:30 +0100315 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
David Hildenbranda9f6c9a2018-01-08 20:37:47 +0100316 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100317}
318
319static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
320{
David Hildenbrand9daecfc2018-01-23 18:05:30 +0100321 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
322 CPUSTAT_STOP_INT);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100323 vcpu->arch.sie_block->lctl = 0x0000;
David Hildenbrand27291e22014-01-23 12:26:52 +0100324 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
325
326 if (guestdbg_enabled(vcpu)) {
327 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
328 LCTL_CR10 | LCTL_CR11);
329 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
330 }
Carsten Otteba5c1e92008-03-25 18:47:26 +0100331}
332
Jens Freimann6d3da242013-07-03 15:18:35 +0200333static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
334{
David Hildenbrand5f94c582015-09-28 14:27:51 +0200335 if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
Jens Freimann6d3da242013-07-03 15:18:35 +0200336 return;
337 else if (psw_ioint_disabled(vcpu))
David Hildenbrand20182242018-01-23 18:05:28 +0100338 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
Jens Freimann6d3da242013-07-03 15:18:35 +0200339 else
340 vcpu->arch.sie_block->lctl |= LCTL_CR6;
341}
342
Jens Freimann383d0b02014-07-29 15:11:49 +0200343static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
344{
David Hildenbrand5f94c582015-09-28 14:27:51 +0200345 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
Jens Freimann383d0b02014-07-29 15:11:49 +0200346 return;
347 if (psw_extint_disabled(vcpu))
David Hildenbrand20182242018-01-23 18:05:28 +0100348 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann383d0b02014-07-29 15:11:49 +0200349 else
350 vcpu->arch.sie_block->lctl |= LCTL_CR0;
351}
352
353static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
354{
David Hildenbrand5f94c582015-09-28 14:27:51 +0200355 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
Jens Freimann383d0b02014-07-29 15:11:49 +0200356 return;
357 if (psw_mchk_disabled(vcpu))
358 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
359 else
360 vcpu->arch.sie_block->lctl |= LCTL_CR14;
361}
362
David Hildenbrand6cddd432014-10-15 16:48:53 +0200363static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
364{
365 if (kvm_s390_is_stop_irq_pending(vcpu))
David Hildenbrand20182242018-01-23 18:05:28 +0100366 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand6cddd432014-10-15 16:48:53 +0200367}
368
Jens Freimann6d3da242013-07-03 15:18:35 +0200369/* Set interception request for non-deliverable interrupts */
370static void set_intercept_indicators(struct kvm_vcpu *vcpu)
Jens Freimann383d0b02014-07-29 15:11:49 +0200371{
Jens Freimann6d3da242013-07-03 15:18:35 +0200372 set_intercept_indicators_io(vcpu);
Jens Freimann383d0b02014-07-29 15:11:49 +0200373 set_intercept_indicators_ext(vcpu);
374 set_intercept_indicators_mchk(vcpu);
David Hildenbrand6cddd432014-10-15 16:48:53 +0200375 set_intercept_indicators_stop(vcpu);
Jens Freimann383d0b02014-07-29 15:11:49 +0200376}
377
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200378static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
David Hildenbrand87128362014-03-03 10:55:13 +0100379{
Jens Freimann383d0b02014-07-29 15:11:49 +0200380 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200381 int rc;
382
383 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
384 0, 0);
385
386 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
387 (u16 *)__LC_EXT_INT_CODE);
David Hildenbrand467fc292014-12-01 12:02:44 +0100388 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200389 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
390 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
391 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
392 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann383d0b02014-07-29 15:11:49 +0200393 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
Jens Freimann99e20002014-12-01 17:05:39 +0100394 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200395}
396
397static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
398{
Jens Freimann383d0b02014-07-29 15:11:49 +0200399 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200400 int rc;
401
402 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
403 0, 0);
404
405 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
406 (u16 __user *)__LC_EXT_INT_CODE);
David Hildenbrand467fc292014-12-01 12:02:44 +0100407 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200408 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
409 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
410 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
411 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann383d0b02014-07-29 15:11:49 +0200412 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
Jens Freimann99e20002014-12-01 17:05:39 +0100413 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200414}
415
Jens Freimann383d0b02014-07-29 15:11:49 +0200416static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200417{
Jens Freimann383d0b02014-07-29 15:11:49 +0200418 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
419 struct kvm_s390_ext_info ext;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200420 int rc;
421
Jens Freimann383d0b02014-07-29 15:11:49 +0200422 spin_lock(&li->lock);
423 ext = li->irq.ext;
424 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
425 li->irq.ext.ext_params2 = 0;
426 spin_unlock(&li->lock);
427
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200428 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
429 ext.ext_params2);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200430 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
431 KVM_S390_INT_PFAULT_INIT,
Jens Freimann383d0b02014-07-29 15:11:49 +0200432 0, ext.ext_params2);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200433
434 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
435 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
436 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
437 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
438 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
439 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann383d0b02014-07-29 15:11:49 +0200440 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
Jens Freimann99e20002014-12-01 17:05:39 +0100441 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200442}
443
David Hildenbrandd6404de2015-10-14 16:47:36 +0200444static int __write_machine_check(struct kvm_vcpu *vcpu,
445 struct kvm_s390_mchk_info *mchk)
446{
447 unsigned long ext_sa_addr;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100448 unsigned long lc;
David Hildenbrand0319dae2016-08-03 11:18:57 +0200449 freg_t fprs[NUM_FPRS];
David Hildenbrandff5dc142015-10-14 16:57:56 +0200450 union mci mci;
David Hildenbrandd6404de2015-10-14 16:47:36 +0200451 int rc;
452
David Hildenbrandff5dc142015-10-14 16:57:56 +0200453 mci.val = mchk->mcic;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +0100454 /* take care of lazy register loading */
David Hildenbrand0319dae2016-08-03 11:18:57 +0200455 save_fpu_regs();
456 save_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger80248552017-04-12 12:59:59 +0200457 if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
458 save_gs_cb(current->thread.gs_cb);
David Hildenbrand0319dae2016-08-03 11:18:57 +0200459
David Hildenbrandd6404de2015-10-14 16:47:36 +0200460 /* Extended save area */
Martin Schwidefsky916cda12016-01-26 14:10:34 +0100461 rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
462 sizeof(unsigned long));
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100463 /* Only bits 0 through 63-LC are used for address formation */
464 lc = ext_sa_addr & MCESA_LC_MASK;
465 if (test_kvm_facility(vcpu->kvm, 133)) {
466 switch (lc) {
467 case 0:
468 case 10:
469 ext_sa_addr &= ~0x3ffUL;
470 break;
471 case 11:
472 ext_sa_addr &= ~0x7ffUL;
473 break;
474 case 12:
475 ext_sa_addr &= ~0xfffUL;
476 break;
477 default:
478 ext_sa_addr = 0;
479 break;
480 }
481 } else {
482 ext_sa_addr &= ~0x3ffUL;
483 }
484
David Hildenbrandff5dc142015-10-14 16:57:56 +0200485 if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
486 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
487 512))
488 mci.vr = 0;
489 } else {
490 mci.vr = 0;
491 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100492 if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
493 && (lc == 11 || lc == 12)) {
494 if (write_guest_abs(vcpu, ext_sa_addr + 1024,
495 &vcpu->run->s.regs.gscb, 32))
496 mci.gs = 0;
497 } else {
498 mci.gs = 0;
499 }
David Hildenbrandd6404de2015-10-14 16:47:36 +0200500
501 /* General interruption information */
David Hildenbrand0319dae2016-08-03 11:18:57 +0200502 rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
David Hildenbrandd6404de2015-10-14 16:47:36 +0200503 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
504 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
505 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
506 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
David Hildenbrandff5dc142015-10-14 16:57:56 +0200507 rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
David Hildenbrandd6404de2015-10-14 16:47:36 +0200508
509 /* Register-save areas */
David Hildenbrand0319dae2016-08-03 11:18:57 +0200510 if (MACHINE_HAS_VX) {
511 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
512 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
513 } else {
514 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
515 vcpu->run->s.regs.fprs, 128);
516 }
517 rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
518 vcpu->run->s.regs.gprs, 128);
519 rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
520 (u32 __user *) __LC_FP_CREG_SAVE_AREA);
521 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
522 (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
523 rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
524 (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
525 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
526 (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
527 rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
528 &vcpu->run->s.regs.acrs, 64);
529 rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
530 &vcpu->arch.sie_block->gcr, 128);
David Hildenbrandd6404de2015-10-14 16:47:36 +0200531
532 /* Extended interruption information */
David Hildenbrand8953fb02016-08-03 12:25:08 +0200533 rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
534 (u32 __user *) __LC_EXT_DAMAGE_CODE);
David Hildenbrandd6404de2015-10-14 16:47:36 +0200535 rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
536 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
537 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
538 sizeof(mchk->fixed_logout));
539 return rc ? -EFAULT : 0;
540}
541
Jens Freimann383d0b02014-07-29 15:11:49 +0200542static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200543{
Jens Freimann6d3da242013-07-03 15:18:35 +0200544 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
Jens Freimann383d0b02014-07-29 15:11:49 +0200545 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann6d3da242013-07-03 15:18:35 +0200546 struct kvm_s390_mchk_info mchk = {};
Jens Freimann6d3da242013-07-03 15:18:35 +0200547 int deliver = 0;
548 int rc = 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200549
Jens Freimann6d3da242013-07-03 15:18:35 +0200550 spin_lock(&fi->lock);
Jens Freimann383d0b02014-07-29 15:11:49 +0200551 spin_lock(&li->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +0200552 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
553 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
554 /*
555 * If there was an exigent machine check pending, then any
556 * repressible machine checks that might have been pending
557 * are indicated along with it, so always clear bits for
558 * repressible and exigent interrupts
559 */
560 mchk = li->irq.mchk;
561 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
562 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
563 memset(&li->irq.mchk, 0, sizeof(mchk));
564 deliver = 1;
565 }
Jens Freimann383d0b02014-07-29 15:11:49 +0200566 /*
Jens Freimann6d3da242013-07-03 15:18:35 +0200567 * We indicate floating repressible conditions along with
568 * other pending conditions. Channel Report Pending and Channel
569 * Subsystem damage are the only two and and are indicated by
570 * bits in mcic and masked in cr14.
Jens Freimann383d0b02014-07-29 15:11:49 +0200571 */
Jens Freimann6d3da242013-07-03 15:18:35 +0200572 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
573 mchk.mcic |= fi->mchk.mcic;
574 mchk.cr14 |= fi->mchk.cr14;
575 memset(&fi->mchk, 0, sizeof(mchk));
576 deliver = 1;
577 }
Jens Freimann383d0b02014-07-29 15:11:49 +0200578 spin_unlock(&li->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +0200579 spin_unlock(&fi->lock);
Jens Freimann383d0b02014-07-29 15:11:49 +0200580
Jens Freimann6d3da242013-07-03 15:18:35 +0200581 if (deliver) {
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200582 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
Jens Freimann6d3da242013-07-03 15:18:35 +0200583 mchk.mcic);
584 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
585 KVM_S390_MCHK,
586 mchk.cr14, mchk.mcic);
David Hildenbrandd6404de2015-10-14 16:47:36 +0200587 rc = __write_machine_check(vcpu, &mchk);
Jens Freimann6d3da242013-07-03 15:18:35 +0200588 }
David Hildenbrandd6404de2015-10-14 16:47:36 +0200589 return rc;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200590}
591
592static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
593{
Jens Freimann383d0b02014-07-29 15:11:49 +0200594 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200595 int rc;
596
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200597 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200598 vcpu->stat.deliver_restart_signal++;
599 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
600
601 rc = write_guest_lc(vcpu,
Heiko Carstensc667aea2015-12-31 10:29:00 +0100602 offsetof(struct lowcore, restart_old_psw),
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200603 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Heiko Carstensc667aea2015-12-31 10:29:00 +0100604 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200605 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann383d0b02014-07-29 15:11:49 +0200606 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
Jens Freimann99e20002014-12-01 17:05:39 +0100607 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200608}
609
Jens Freimann383d0b02014-07-29 15:11:49 +0200610static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200611{
Jens Freimann383d0b02014-07-29 15:11:49 +0200612 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
613 struct kvm_s390_prefix_info prefix;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200614
Jens Freimann383d0b02014-07-29 15:11:49 +0200615 spin_lock(&li->lock);
616 prefix = li->irq.prefix;
617 li->irq.prefix.address = 0;
618 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
619 spin_unlock(&li->lock);
620
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200621 vcpu->stat.deliver_prefix_signal++;
622 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
623 KVM_S390_SIGP_SET_PREFIX,
Jens Freimann383d0b02014-07-29 15:11:49 +0200624 prefix.address, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200625
Jens Freimann383d0b02014-07-29 15:11:49 +0200626 kvm_s390_set_prefix(vcpu, prefix.address);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200627 return 0;
628}
629
Jens Freimann383d0b02014-07-29 15:11:49 +0200630static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200631{
Jens Freimann383d0b02014-07-29 15:11:49 +0200632 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200633 int rc;
Jens Freimann383d0b02014-07-29 15:11:49 +0200634 int cpu_addr;
635
636 spin_lock(&li->lock);
637 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
638 clear_bit(cpu_addr, li->sigp_emerg_pending);
639 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
640 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
641 spin_unlock(&li->lock);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200642
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200643 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200644 vcpu->stat.deliver_emergency_signal++;
Jens Freimann383d0b02014-07-29 15:11:49 +0200645 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
646 cpu_addr, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200647
648 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
649 (u16 *)__LC_EXT_INT_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200650 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200651 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
652 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
653 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
654 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann99e20002014-12-01 17:05:39 +0100655 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200656}
657
Jens Freimann383d0b02014-07-29 15:11:49 +0200658static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200659{
Jens Freimann383d0b02014-07-29 15:11:49 +0200660 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
661 struct kvm_s390_extcall_info extcall;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200662 int rc;
663
Jens Freimann383d0b02014-07-29 15:11:49 +0200664 spin_lock(&li->lock);
665 extcall = li->irq.extcall;
666 li->irq.extcall.code = 0;
667 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
668 spin_unlock(&li->lock);
669
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200670 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200671 vcpu->stat.deliver_external_call++;
672 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
673 KVM_S390_INT_EXTERNAL_CALL,
Jens Freimann383d0b02014-07-29 15:11:49 +0200674 extcall.code, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200675
676 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
677 (u16 *)__LC_EXT_INT_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200678 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200679 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
680 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
681 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
682 sizeof(psw_t));
Jens Freimann99e20002014-12-01 17:05:39 +0100683 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200684}
685
Jens Freimann383d0b02014-07-29 15:11:49 +0200686static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200687{
Jens Freimann383d0b02014-07-29 15:11:49 +0200688 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
689 struct kvm_s390_pgm_info pgm_info;
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100690 int rc = 0, nullifying = false;
David Hildenbrand634790b2015-11-04 16:33:33 +0100691 u16 ilen;
David Hildenbrand87128362014-03-03 10:55:13 +0100692
Jens Freimann383d0b02014-07-29 15:11:49 +0200693 spin_lock(&li->lock);
694 pgm_info = li->irq.pgm;
695 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
696 memset(&li->irq.pgm, 0, sizeof(pgm_info));
697 spin_unlock(&li->lock);
698
David Hildenbrand634790b2015-11-04 16:33:33 +0100699 ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
David Hildenbrand0e8bc062015-11-04 13:47:58 +0100700 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
701 pgm_info.code, ilen);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200702 vcpu->stat.deliver_program_int++;
703 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
Jens Freimann383d0b02014-07-29 15:11:49 +0200704 pgm_info.code, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200705
Jens Freimann383d0b02014-07-29 15:11:49 +0200706 switch (pgm_info.code & ~PGM_PER) {
David Hildenbrand87128362014-03-03 10:55:13 +0100707 case PGM_AFX_TRANSLATION:
708 case PGM_ASX_TRANSLATION:
709 case PGM_EX_TRANSLATION:
710 case PGM_LFX_TRANSLATION:
711 case PGM_LSTE_SEQUENCE:
712 case PGM_LSX_TRANSLATION:
713 case PGM_LX_TRANSLATION:
714 case PGM_PRIMARY_AUTHORITY:
715 case PGM_SECONDARY_AUTHORITY:
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100716 nullifying = true;
717 /* fall through */
David Hildenbrand87128362014-03-03 10:55:13 +0100718 case PGM_SPACE_SWITCH:
Jens Freimann383d0b02014-07-29 15:11:49 +0200719 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100720 (u64 *)__LC_TRANS_EXC_CODE);
721 break;
722 case PGM_ALEN_TRANSLATION:
723 case PGM_ALE_SEQUENCE:
724 case PGM_ASTE_INSTANCE:
725 case PGM_ASTE_SEQUENCE:
726 case PGM_ASTE_VALIDITY:
727 case PGM_EXTENDED_AUTHORITY:
Jens Freimann383d0b02014-07-29 15:11:49 +0200728 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100729 (u8 *)__LC_EXC_ACCESS_ID);
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100730 nullifying = true;
David Hildenbrand87128362014-03-03 10:55:13 +0100731 break;
732 case PGM_ASCE_TYPE:
733 case PGM_PAGE_TRANSLATION:
734 case PGM_REGION_FIRST_TRANS:
735 case PGM_REGION_SECOND_TRANS:
736 case PGM_REGION_THIRD_TRANS:
737 case PGM_SEGMENT_TRANSLATION:
Jens Freimann383d0b02014-07-29 15:11:49 +0200738 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100739 (u64 *)__LC_TRANS_EXC_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200740 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100741 (u8 *)__LC_EXC_ACCESS_ID);
Jens Freimann383d0b02014-07-29 15:11:49 +0200742 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100743 (u8 *)__LC_OP_ACCESS_ID);
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100744 nullifying = true;
David Hildenbrand87128362014-03-03 10:55:13 +0100745 break;
746 case PGM_MONITOR:
Jens Freimann383d0b02014-07-29 15:11:49 +0200747 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
Thomas Hutha36c5392014-10-16 14:31:53 +0200748 (u16 *)__LC_MON_CLASS_NR);
Jens Freimann383d0b02014-07-29 15:11:49 +0200749 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100750 (u64 *)__LC_MON_CODE);
751 break;
Eric Farman403c8642015-02-02 15:01:06 -0500752 case PGM_VECTOR_PROCESSING:
David Hildenbrand87128362014-03-03 10:55:13 +0100753 case PGM_DATA:
Jens Freimann383d0b02014-07-29 15:11:49 +0200754 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100755 (u32 *)__LC_DATA_EXC_CODE);
756 break;
757 case PGM_PROTECTION:
Jens Freimann383d0b02014-07-29 15:11:49 +0200758 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100759 (u64 *)__LC_TRANS_EXC_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200760 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100761 (u8 *)__LC_EXC_ACCESS_ID);
762 break;
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100763 case PGM_STACK_FULL:
764 case PGM_STACK_EMPTY:
765 case PGM_STACK_SPECIFICATION:
766 case PGM_STACK_TYPE:
767 case PGM_STACK_OPERATION:
768 case PGM_TRACE_TABEL:
769 case PGM_CRYPTO_OPERATION:
770 nullifying = true;
771 break;
David Hildenbrand87128362014-03-03 10:55:13 +0100772 }
773
Jens Freimann383d0b02014-07-29 15:11:49 +0200774 if (pgm_info.code & PGM_PER) {
775 rc |= put_guest_lc(vcpu, pgm_info.per_code,
David Hildenbrand87128362014-03-03 10:55:13 +0100776 (u8 *) __LC_PER_CODE);
Jens Freimann383d0b02014-07-29 15:11:49 +0200777 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
David Hildenbrand87128362014-03-03 10:55:13 +0100778 (u8 *)__LC_PER_ATMID);
Jens Freimann383d0b02014-07-29 15:11:49 +0200779 rc |= put_guest_lc(vcpu, pgm_info.per_address,
David Hildenbrand87128362014-03-03 10:55:13 +0100780 (u64 *) __LC_PER_ADDRESS);
Jens Freimann383d0b02014-07-29 15:11:49 +0200781 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
David Hildenbrand87128362014-03-03 10:55:13 +0100782 (u8 *) __LC_PER_ACCESS_ID);
783 }
784
David Hildenbrandeaa4f412015-11-04 16:46:55 +0100785 if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
David Hildenbrand0e8bc062015-11-04 13:47:58 +0100786 kvm_s390_rewind_psw(vcpu, ilen);
Thomas Hutha9a846fd2015-02-05 09:06:56 +0100787
David Hildenbrand0e8bc062015-11-04 13:47:58 +0100788 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
789 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
David Hildenbrand2ba45962015-03-25 13:12:32 +0100790 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
791 (u64 *) __LC_LAST_BREAK);
Jens Freimann383d0b02014-07-29 15:11:49 +0200792 rc |= put_guest_lc(vcpu, pgm_info.code,
David Hildenbrand87128362014-03-03 10:55:13 +0100793 (u16 *)__LC_PGM_INT_CODE);
794 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
795 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
796 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
797 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann99e20002014-12-01 17:05:39 +0100798 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200799}
David Hildenbrand87128362014-03-03 10:55:13 +0100800
Jens Freimann6d3da242013-07-03 15:18:35 +0200801static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200802{
Jens Freimann6d3da242013-07-03 15:18:35 +0200803 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
804 struct kvm_s390_ext_info ext;
805 int rc = 0;
806
807 spin_lock(&fi->lock);
808 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
809 spin_unlock(&fi->lock);
810 return 0;
811 }
812 ext = fi->srv_signal;
813 memset(&fi->srv_signal, 0, sizeof(ext));
814 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
815 spin_unlock(&fi->lock);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200816
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200817 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
Jens Freimann6d3da242013-07-03 15:18:35 +0200818 ext.ext_params);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200819 vcpu->stat.deliver_service_signal++;
Jens Freimann6d3da242013-07-03 15:18:35 +0200820 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
821 ext.ext_params, 0);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200822
823 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
David Hildenbrand467fc292014-12-01 12:02:44 +0100824 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200825 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
826 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
827 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
828 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
Jens Freimann6d3da242013-07-03 15:18:35 +0200829 rc |= put_guest_lc(vcpu, ext.ext_params,
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200830 (u32 *)__LC_EXT_PARAMS);
Jens Freimann6d3da242013-07-03 15:18:35 +0200831
Jens Freimann99e20002014-12-01 17:05:39 +0100832 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200833}
834
Jens Freimann6d3da242013-07-03 15:18:35 +0200835static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200836{
Jens Freimann6d3da242013-07-03 15:18:35 +0200837 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
838 struct kvm_s390_interrupt_info *inti;
839 int rc = 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200840
Jens Freimann6d3da242013-07-03 15:18:35 +0200841 spin_lock(&fi->lock);
842 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
843 struct kvm_s390_interrupt_info,
844 list);
845 if (inti) {
Jens Freimann6d3da242013-07-03 15:18:35 +0200846 list_del(&inti->list);
847 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
848 }
849 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
850 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
851 spin_unlock(&fi->lock);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200852
Jens Freimann6d3da242013-07-03 15:18:35 +0200853 if (inti) {
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200854 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
855 KVM_S390_INT_PFAULT_DONE, 0,
856 inti->ext.ext_params2);
857 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
858 inti->ext.ext_params2);
859
Jens Freimann6d3da242013-07-03 15:18:35 +0200860 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
861 (u16 *)__LC_EXT_INT_CODE);
862 rc |= put_guest_lc(vcpu, PFAULT_DONE,
863 (u16 *)__LC_EXT_CPU_ADDR);
864 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
865 &vcpu->arch.sie_block->gpsw,
866 sizeof(psw_t));
867 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
868 &vcpu->arch.sie_block->gpsw,
869 sizeof(psw_t));
870 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
871 (u64 *)__LC_EXT_PARAMS2);
872 kfree(inti);
873 }
Jens Freimann99e20002014-12-01 17:05:39 +0100874 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200875}
876
Jens Freimann6d3da242013-07-03 15:18:35 +0200877static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200878{
Jens Freimann6d3da242013-07-03 15:18:35 +0200879 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
880 struct kvm_s390_interrupt_info *inti;
881 int rc = 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200882
Jens Freimann6d3da242013-07-03 15:18:35 +0200883 spin_lock(&fi->lock);
884 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
885 struct kvm_s390_interrupt_info,
886 list);
887 if (inti) {
888 VCPU_EVENT(vcpu, 4,
Christian Borntraeger3f24ba12015-07-09 14:08:18 +0200889 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
Jens Freimann6d3da242013-07-03 15:18:35 +0200890 inti->ext.ext_params, inti->ext.ext_params2);
891 vcpu->stat.deliver_virtio_interrupt++;
892 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
893 inti->type,
894 inti->ext.ext_params,
895 inti->ext.ext_params2);
896 list_del(&inti->list);
897 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
898 }
899 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
900 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
901 spin_unlock(&fi->lock);
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200902
Jens Freimann6d3da242013-07-03 15:18:35 +0200903 if (inti) {
904 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
905 (u16 *)__LC_EXT_INT_CODE);
906 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
907 (u16 *)__LC_EXT_CPU_ADDR);
908 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
909 &vcpu->arch.sie_block->gpsw,
910 sizeof(psw_t));
911 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
912 &vcpu->arch.sie_block->gpsw,
913 sizeof(psw_t));
914 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
915 (u32 *)__LC_EXT_PARAMS);
916 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
917 (u64 *)__LC_EXT_PARAMS2);
918 kfree(inti);
919 }
Jens Freimann99e20002014-12-01 17:05:39 +0100920 return rc ? -EFAULT : 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200921}
922
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200923static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
924{
925 int rc;
926
927 rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
928 rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
929 rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
930 rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
931 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
932 &vcpu->arch.sie_block->gpsw,
933 sizeof(psw_t));
934 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
935 &vcpu->arch.sie_block->gpsw,
936 sizeof(psw_t));
937 return rc ? -EFAULT : 0;
938}
939
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200940static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
Jens Freimann6d3da242013-07-03 15:18:35 +0200941 unsigned long irq_type)
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200942{
Jens Freimann6d3da242013-07-03 15:18:35 +0200943 struct list_head *isc_list;
944 struct kvm_s390_float_interrupt *fi;
945 struct kvm_s390_interrupt_info *inti = NULL;
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200946 struct kvm_s390_io_info io;
947 u32 isc;
Jens Freimann6d3da242013-07-03 15:18:35 +0200948 int rc = 0;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200949
Jens Freimann6d3da242013-07-03 15:18:35 +0200950 fi = &vcpu->kvm->arch.float_int;
Jens Freimann0fb97ab2014-07-29 13:45:21 +0200951
Jens Freimann6d3da242013-07-03 15:18:35 +0200952 spin_lock(&fi->lock);
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200953 isc = irq_type_to_isc(irq_type);
954 isc_list = &fi->lists[isc];
Jens Freimann6d3da242013-07-03 15:18:35 +0200955 inti = list_first_entry_or_null(isc_list,
956 struct kvm_s390_interrupt_info,
957 list);
958 if (inti) {
Christian Borntraegerdcc98ea2016-06-07 09:37:17 +0200959 if (inti->type & KVM_S390_INT_IO_AI_MASK)
960 VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
961 else
962 VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
963 inti->io.subchannel_id >> 8,
964 inti->io.subchannel_id >> 1 & 0x3,
965 inti->io.subchannel_nr);
966
Jens Freimann6d3da242013-07-03 15:18:35 +0200967 vcpu->stat.deliver_io_int++;
968 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
969 inti->type,
970 ((__u32)inti->io.subchannel_id << 16) |
971 inti->io.subchannel_nr,
972 ((__u64)inti->io.io_int_parm << 32) |
973 inti->io.io_int_word);
974 list_del(&inti->list);
975 fi->counters[FIRQ_CNTR_IO] -= 1;
976 }
977 if (list_empty(isc_list))
978 clear_bit(irq_type, &fi->pending_irqs);
979 spin_unlock(&fi->lock);
David Hildenbrand87128362014-03-03 10:55:13 +0100980
Jens Freimann6d3da242013-07-03 15:18:35 +0200981 if (inti) {
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200982 rc = __do_deliver_io(vcpu, &(inti->io));
Jens Freimann6d3da242013-07-03 15:18:35 +0200983 kfree(inti);
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200984 goto out;
Jens Freimann6d3da242013-07-03 15:18:35 +0200985 }
Jens Freimann383d0b02014-07-29 15:11:49 +0200986
Michael Muellerd7c5cb02017-06-12 14:15:19 +0200987 if (vcpu->kvm->arch.gisa &&
988 kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) {
989 /*
990 * in case an adapter interrupt was not delivered
991 * in SIE context KVM will handle the delivery
992 */
993 VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
994 memset(&io, 0, sizeof(io));
995 io.io_int_word = (isc << 27) | 0x80000000;
996 vcpu->stat.deliver_io_int++;
997 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
998 KVM_S390_INT_IO(1, 0, 0, 0),
999 ((__u32)io.subchannel_id << 16) |
1000 io.subchannel_nr,
1001 ((__u64)io.io_int_parm << 32) |
1002 io.io_int_word);
1003 rc = __do_deliver_io(vcpu, &io);
1004 }
1005out:
1006 return rc;
Jens Freimann383d0b02014-07-29 15:11:49 +02001007}
1008
1009typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
1010
1011static const deliver_irq_t deliver_irq_funcs[] = {
1012 [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
Jens Freimann6d3da242013-07-03 15:18:35 +02001013 [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
Jens Freimann383d0b02014-07-29 15:11:49 +02001014 [IRQ_PEND_PROG] = __deliver_prog,
1015 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
1016 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
1017 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
1018 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
1019 [IRQ_PEND_RESTART] = __deliver_restart,
Jens Freimann383d0b02014-07-29 15:11:49 +02001020 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
1021 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
Jens Freimann6d3da242013-07-03 15:18:35 +02001022 [IRQ_PEND_EXT_SERVICE] = __deliver_service,
1023 [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
1024 [IRQ_PEND_VIRTIO] = __deliver_virtio,
Jens Freimann383d0b02014-07-29 15:11:49 +02001025};
1026
David Hildenbrandea5f4962014-10-14 15:29:30 +02001027/* Check whether an external call is pending (deliverable or not) */
1028int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
David Hildenbrand49539192014-02-21 08:59:59 +01001029{
David Hildenbrandea5f4962014-10-14 15:29:30 +02001030 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
David Hildenbrand49539192014-02-21 08:59:59 +01001031
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001032 if (!sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001033 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
David Hildenbrand49539192014-02-21 08:59:59 +01001034
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02001035 return sca_ext_call_pending(vcpu, NULL);
David Hildenbrand49539192014-02-21 08:59:59 +01001036}
1037
David Hildenbrand9a022062014-08-05 17:40:47 +02001038int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
Carsten Otteba5c1e92008-03-25 18:47:26 +01001039{
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001040 if (deliverable_irqs(vcpu))
1041 return 1;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001042
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001043 if (kvm_cpu_has_pending_timer(vcpu))
1044 return 1;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001045
David Hildenbrandea5f4962014-10-14 15:29:30 +02001046 /* external call pending and deliverable */
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001047 if (kvm_s390_ext_call_pending(vcpu) &&
David Hildenbrandea5f4962014-10-14 15:29:30 +02001048 !psw_extint_disabled(vcpu) &&
1049 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001050 return 1;
David Hildenbrand49539192014-02-21 08:59:59 +01001051
David Hildenbrand4d32ad62015-05-06 13:51:29 +02001052 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1053 return 1;
1054 return 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001055}
1056
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001057int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1058{
David Hildenbrandb4aec922014-12-01 15:55:42 +01001059 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001060}
1061
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001062static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1063{
1064 u64 now, cputm, sltime = 0;
1065
1066 if (ckc_interrupts_enabled(vcpu)) {
1067 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1068 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
1069 /* already expired or overflow? */
1070 if (!sltime || vcpu->arch.sie_block->ckc <= now)
1071 return 0;
1072 if (cpu_timer_interrupts_enabled(vcpu)) {
1073 cputm = kvm_s390_get_cpu_timer(vcpu);
1074 /* already expired? */
1075 if (cputm >> 63)
1076 return 0;
1077 return min(sltime, tod_to_ns(cputm));
1078 }
1079 } else if (cpu_timer_interrupts_enabled(vcpu)) {
1080 sltime = kvm_s390_get_cpu_timer(vcpu);
1081 /* already expired? */
1082 if (sltime >> 63)
1083 return 0;
1084 }
1085 return sltime;
1086}
1087
Carsten Otteba5c1e92008-03-25 18:47:26 +01001088int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1089{
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001090 u64 sltime;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001091
1092 vcpu->stat.exit_wait_state++;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001093
David Hildenbrand0759d062014-05-13 16:54:32 +02001094 /* fast path */
David Hildenbrand118b8622015-09-23 12:25:15 +02001095 if (kvm_arch_vcpu_runnable(vcpu))
David Hildenbrand0759d062014-05-13 16:54:32 +02001096 return 0;
Carsten Ottee52b2af2008-05-21 13:37:44 +02001097
Carsten Otteba5c1e92008-03-25 18:47:26 +01001098 if (psw_interrupts_disabled(vcpu)) {
1099 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001100 return -EOPNOTSUPP; /* disabled wait */
Carsten Otteba5c1e92008-03-25 18:47:26 +01001101 }
1102
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001103 if (!ckc_interrupts_enabled(vcpu) &&
1104 !cpu_timer_interrupts_enabled(vcpu)) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001105 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
David Hildenbrandbda343e2014-12-12 12:26:40 +01001106 __set_cpu_idle(vcpu);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001107 goto no_timer;
1108 }
1109
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001110 sltime = __calculate_sltime(vcpu);
1111 if (!sltime)
David Hildenbrandbda343e2014-12-12 12:26:40 +01001112 return 0;
1113
1114 __set_cpu_idle(vcpu);
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01001115 hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001116 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001117no_timer:
Thomas Huth800c1062013-09-12 10:33:45 +02001118 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
David Hildenbrand0759d062014-05-13 16:54:32 +02001119 kvm_vcpu_block(vcpu);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001120 __unset_cpu_idle(vcpu);
Thomas Huth800c1062013-09-12 10:33:45 +02001121 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1122
David Hildenbrand2d00f752014-12-11 10:18:01 +01001123 hrtimer_cancel(&vcpu->arch.ckc_timer);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001124 return 0;
1125}
1126
David Hildenbrand0e9c85a2014-05-16 11:59:46 +02001127void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1128{
Christian Borntraeger3491caf2016-05-13 12:16:35 +02001129 /*
1130 * We cannot move this into the if, as the CPU might be already
1131 * in kvm_vcpu_block without having the waitqueue set (polling)
1132 */
1133 vcpu->valid_wakeup = true;
Christian Borntraeger72e1ad42017-09-19 12:34:06 +02001134 /*
1135 * This is mostly to document, that the read in swait_active could
1136 * be moved before other stores, leading to subtle races.
1137 * All current users do not store or use an atomic like update
1138 */
1139 smp_mb__after_atomic();
Marcelo Tosatti85773702016-02-19 09:46:39 +01001140 if (swait_active(&vcpu->wq)) {
David Hildenbrand0e9c85a2014-05-16 11:59:46 +02001141 /*
1142 * The vcpu gave up the cpu voluntarily, mark it as a good
1143 * yield-candidate.
1144 */
1145 vcpu->preempted = true;
Marcelo Tosatti85773702016-02-19 09:46:39 +01001146 swake_up(&vcpu->wq);
David Hildenbrandce2e4f02014-07-11 10:00:43 +02001147 vcpu->stat.halt_wakeup++;
David Hildenbrand0e9c85a2014-05-16 11:59:46 +02001148 }
David Hildenbrandadbf1692016-05-27 22:03:52 +02001149 /*
1150 * The VCPU might not be sleeping but is executing the VSIE. Let's
1151 * kick it, so it leaves the SIE to process the request.
1152 */
1153 kvm_s390_vsie_kick(vcpu);
David Hildenbrand0e9c85a2014-05-16 11:59:46 +02001154}
1155
Christian Borntraegerca872302009-05-12 17:21:49 +02001156enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1157{
1158 struct kvm_vcpu *vcpu;
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001159 u64 sltime;
Christian Borntraegerca872302009-05-12 17:21:49 +02001160
1161 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001162 sltime = __calculate_sltime(vcpu);
Christian Borntraegerca872302009-05-12 17:21:49 +02001163
David Hildenbrand2d00f752014-12-11 10:18:01 +01001164 /*
1165 * If the monotonic clock runs faster than the tod clock we might be
1166 * woken up too early and have to go back to sleep to avoid deadlocks.
1167 */
David Hildenbrandb3c17f12016-02-22 14:14:50 +01001168 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
David Hildenbrand2d00f752014-12-11 10:18:01 +01001169 return HRTIMER_RESTART;
1170 kvm_s390_vcpu_wakeup(vcpu);
Christian Borntraegerca872302009-05-12 17:21:49 +02001171 return HRTIMER_NORESTART;
1172}
Carsten Otteba5c1e92008-03-25 18:47:26 +01001173
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001174void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1175{
1176 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001177
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001178 spin_lock(&li->lock);
Jens Freimann383d0b02014-07-29 15:11:49 +02001179 li->pending_irqs = 0;
1180 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1181 memset(&li->irq, 0, sizeof(li->irq));
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001182 spin_unlock(&li->lock);
David Hildenbrand49539192014-02-21 08:59:59 +01001183
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02001184 sca_clear_ext_call(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001185}
1186
Christian Borntraeger614aeab2014-08-25 12:27:29 +02001187int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
Carsten Otteba5c1e92008-03-25 18:47:26 +01001188{
Christian Borntraeger180c12f2008-06-27 15:05:40 +02001189 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001190 deliver_irq_t func;
Jens Freimann79395032014-04-17 10:10:30 +02001191 int rc = 0;
Jens Freimann383d0b02014-07-29 15:11:49 +02001192 unsigned long irq_type;
Jens Freimann6d3da242013-07-03 15:18:35 +02001193 unsigned long irqs;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001194
1195 __reset_intercept_indicators(vcpu);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001196
Jens Freimann383d0b02014-07-29 15:11:49 +02001197 /* pending ckc conditions might have been invalidated */
1198 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
David Hildenbrandb4aec922014-12-01 15:55:42 +01001199 if (ckc_irq_pending(vcpu))
Jens Freimann383d0b02014-07-29 15:11:49 +02001200 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1201
David Hildenbrandb4aec922014-12-01 15:55:42 +01001202 /* pending cpu timer conditions might have been invalidated */
1203 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1204 if (cpu_timer_irq_pending(vcpu))
1205 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1206
Jens Freimannffeca0a2015-04-17 10:21:04 +02001207 while ((irqs = deliverable_irqs(vcpu)) && !rc) {
Michael Muellerc7901a62017-06-29 18:39:27 +02001208 /* bits are in the reverse order of interrupt priority */
1209 irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
Jens Freimann6d3da242013-07-03 15:18:35 +02001210 if (is_ioirq(irq_type)) {
1211 rc = __deliver_io(vcpu, irq_type);
1212 } else {
1213 func = deliver_irq_funcs[irq_type];
1214 if (!func) {
1215 WARN_ON_ONCE(func == NULL);
1216 clear_bit(irq_type, &li->pending_irqs);
1217 continue;
1218 }
1219 rc = func(vcpu);
Jens Freimann383d0b02014-07-29 15:11:49 +02001220 }
Jens Freimannffeca0a2015-04-17 10:21:04 +02001221 }
Jens Freimann383d0b02014-07-29 15:11:49 +02001222
Jens Freimann6d3da242013-07-03 15:18:35 +02001223 set_intercept_indicators(vcpu);
Jens Freimann79395032014-04-17 10:10:30 +02001224
1225 return rc;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001226}
1227
Jens Freimann383d0b02014-07-29 15:11:49 +02001228static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001229{
1230 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1231
David Hildenbranded2afcf2015-07-20 10:33:03 +02001232 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1233 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1234 irq->u.pgm.code, 0);
1235
David Hildenbrand634790b2015-11-04 16:33:33 +01001236 if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1237 /* auto detection if no valid ILC was given */
1238 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1239 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1240 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1241 }
1242
David Hildenbrand238293b2015-05-04 12:38:48 +02001243 if (irq->u.pgm.code == PGM_PER) {
1244 li->irq.pgm.code |= PGM_PER;
David Hildenbrand634790b2015-11-04 16:33:33 +01001245 li->irq.pgm.flags = irq->u.pgm.flags;
David Hildenbrand238293b2015-05-04 12:38:48 +02001246 /* only modify PER related information */
1247 li->irq.pgm.per_address = irq->u.pgm.per_address;
1248 li->irq.pgm.per_code = irq->u.pgm.per_code;
1249 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1250 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1251 } else if (!(irq->u.pgm.code & PGM_PER)) {
1252 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1253 irq->u.pgm.code;
David Hildenbrand634790b2015-11-04 16:33:33 +01001254 li->irq.pgm.flags = irq->u.pgm.flags;
David Hildenbrand238293b2015-05-04 12:38:48 +02001255 /* only modify non-PER information */
1256 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1257 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1258 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1259 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1260 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1261 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1262 } else {
1263 li->irq.pgm = irq->u.pgm;
1264 }
Jens Freimann91851242014-12-01 16:43:40 +01001265 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001266 return 0;
1267}
1268
Jens Freimann383d0b02014-07-29 15:11:49 +02001269static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001270{
1271 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1272
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001273 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1274 irq->u.ext.ext_params2);
Jens Freimann383d0b02014-07-29 15:11:49 +02001275 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1276 irq->u.ext.ext_params,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001277 irq->u.ext.ext_params2);
Jens Freimann383d0b02014-07-29 15:11:49 +02001278
1279 li->irq.ext = irq->u.ext;
1280 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
David Hildenbrand20182242018-01-23 18:05:28 +01001281 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001282 return 0;
1283}
1284
Christian Borntraeger0675d922015-01-15 12:40:42 +01001285static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001286{
1287 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001288 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001289 uint16_t src_id = irq->u.extcall.code;
Jens Freimann0146a7b2014-07-28 15:37:58 +02001290
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001291 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
David Hildenbrandea5f4962014-10-14 15:29:30 +02001292 src_id);
Jens Freimann383d0b02014-07-29 15:11:49 +02001293 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001294 src_id, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001295
David Hildenbrandea5f4962014-10-14 15:29:30 +02001296 /* sending vcpu invalid */
David Hildenbrand152e9f62015-11-05 09:06:06 +01001297 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001298 return -EINVAL;
1299
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001300 if (sclp.has_sigpif)
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02001301 return sca_inject_ext_call(vcpu, src_id);
David Hildenbrandea5f4962014-10-14 15:29:30 +02001302
David Hildenbrandb938eace2015-04-30 13:33:59 +02001303 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
David Hildenbrandea5f4962014-10-14 15:29:30 +02001304 return -EBUSY;
Jens Freimann383d0b02014-07-29 15:11:49 +02001305 *extcall = irq->u.extcall;
David Hildenbrand20182242018-01-23 18:05:28 +01001306 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001307 return 0;
1308}
1309
Jens Freimann383d0b02014-07-29 15:11:49 +02001310static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001311{
1312 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001313 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
Jens Freimann0146a7b2014-07-28 15:37:58 +02001314
David Hildenbranded2afcf2015-07-20 10:33:03 +02001315 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
Jens Freimann556cc0d2014-12-18 15:52:21 +01001316 irq->u.prefix.address);
Jens Freimann383d0b02014-07-29 15:11:49 +02001317 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001318 irq->u.prefix.address, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001319
David Hildenbranda3a9c592014-10-14 09:44:55 +02001320 if (!is_vcpu_stopped(vcpu))
1321 return -EBUSY;
1322
Jens Freimann383d0b02014-07-29 15:11:49 +02001323 *prefix = irq->u.prefix;
1324 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001325 return 0;
1326}
1327
David Hildenbrand6cddd432014-10-15 16:48:53 +02001328#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
Jens Freimann383d0b02014-07-29 15:11:49 +02001329static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001330{
1331 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
David Hildenbrand28225452014-10-15 16:48:16 +02001332 struct kvm_s390_stop_info *stop = &li->irq.stop;
David Hildenbrand6cddd432014-10-15 16:48:53 +02001333 int rc = 0;
Jens Freimann0146a7b2014-07-28 15:37:58 +02001334
David Hildenbranded2afcf2015-07-20 10:33:03 +02001335 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001336
David Hildenbrand28225452014-10-15 16:48:16 +02001337 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1338 return -EINVAL;
1339
David Hildenbrand6cddd432014-10-15 16:48:53 +02001340 if (is_vcpu_stopped(vcpu)) {
1341 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1342 rc = kvm_s390_store_status_unloaded(vcpu,
1343 KVM_S390_STORE_STATUS_NOADDR);
1344 return rc;
1345 }
1346
1347 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1348 return -EBUSY;
David Hildenbrand28225452014-10-15 16:48:16 +02001349 stop->flags = irq->u.stop.flags;
David Hildenbrand20182242018-01-23 18:05:28 +01001350 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001351 return 0;
1352}
1353
1354static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
Jens Freimann383d0b02014-07-29 15:11:49 +02001355 struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001356{
1357 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1358
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001359 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
David Hildenbranded2afcf2015-07-20 10:33:03 +02001360 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001361
1362 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001363 return 0;
1364}
1365
1366static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
Jens Freimann383d0b02014-07-29 15:11:49 +02001367 struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001368{
1369 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1370
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001371 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
Jens Freimann383d0b02014-07-29 15:11:49 +02001372 irq->u.emerg.code);
1373 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001374 irq->u.emerg.code, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001375
David Hildenbrandb85de332015-11-05 09:38:15 +01001376 /* sending vcpu invalid */
1377 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1378 return -EINVAL;
1379
Jens Freimann49538d12014-12-18 15:48:14 +01001380 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
Jens Freimann383d0b02014-07-29 15:11:49 +02001381 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
David Hildenbrand20182242018-01-23 18:05:28 +01001382 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001383 return 0;
1384}
1385
Jens Freimann383d0b02014-07-29 15:11:49 +02001386static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001387{
1388 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001389 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
Jens Freimann0146a7b2014-07-28 15:37:58 +02001390
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001391 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
Jens Freimann556cc0d2014-12-18 15:52:21 +01001392 irq->u.mchk.mcic);
Jens Freimann383d0b02014-07-29 15:11:49 +02001393 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001394 irq->u.mchk.mcic);
Jens Freimann383d0b02014-07-29 15:11:49 +02001395
1396 /*
Jens Freimannfc2020c2014-08-13 10:09:04 +02001397 * Because repressible machine checks can be indicated along with
1398 * exigent machine checks (PoP, Chapter 11, Interruption action)
1399 * we need to combine cr14, mcic and external damage code.
1400 * Failing storage address and the logout area should not be or'ed
1401 * together, we just indicate the last occurrence of the corresponding
1402 * machine check
Jens Freimann383d0b02014-07-29 15:11:49 +02001403 */
Jens Freimannfc2020c2014-08-13 10:09:04 +02001404 mchk->cr14 |= irq->u.mchk.cr14;
Jens Freimann383d0b02014-07-29 15:11:49 +02001405 mchk->mcic |= irq->u.mchk.mcic;
Jens Freimannfc2020c2014-08-13 10:09:04 +02001406 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1407 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1408 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1409 sizeof(mchk->fixed_logout));
Jens Freimann383d0b02014-07-29 15:11:49 +02001410 if (mchk->mcic & MCHK_EX_MASK)
1411 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1412 else if (mchk->mcic & MCHK_REP_MASK)
1413 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001414 return 0;
1415}
1416
Jens Freimann383d0b02014-07-29 15:11:49 +02001417static int __inject_ckc(struct kvm_vcpu *vcpu)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001418{
1419 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1420
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001421 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
Jens Freimann383d0b02014-07-29 15:11:49 +02001422 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001423 0, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001424
1425 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
David Hildenbrand20182242018-01-23 18:05:28 +01001426 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001427 return 0;
1428}
1429
Jens Freimann383d0b02014-07-29 15:11:49 +02001430static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
Jens Freimann0146a7b2014-07-28 15:37:58 +02001431{
1432 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1433
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001434 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
Jens Freimann383d0b02014-07-29 15:11:49 +02001435 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
David Hildenbranded2afcf2015-07-20 10:33:03 +02001436 0, 0);
Jens Freimann383d0b02014-07-29 15:11:49 +02001437
1438 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
David Hildenbrand20182242018-01-23 18:05:28 +01001439 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
Jens Freimannbcd84682014-02-11 11:07:05 +01001440 return 0;
1441}
1442
Jens Freimann6d3da242013-07-03 15:18:35 +02001443static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1444 int isc, u32 schid)
1445{
1446 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1447 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1448 struct kvm_s390_interrupt_info *iter;
1449 u16 id = (schid & 0xffff0000U) >> 16;
1450 u16 nr = schid & 0x0000ffffU;
Jens Freimann383d0b02014-07-29 15:11:49 +02001451
Jens Freimann6d3da242013-07-03 15:18:35 +02001452 spin_lock(&fi->lock);
1453 list_for_each_entry(iter, isc_list, list) {
1454 if (schid && (id != iter->io.subchannel_id ||
1455 nr != iter->io.subchannel_nr))
1456 continue;
1457 /* found an appropriate entry */
1458 list_del_init(&iter->list);
1459 fi->counters[FIRQ_CNTR_IO] -= 1;
1460 if (list_empty(isc_list))
Michael Muelleree739f42017-07-03 15:32:50 +02001461 clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
Jens Freimann6d3da242013-07-03 15:18:35 +02001462 spin_unlock(&fi->lock);
1463 return iter;
1464 }
1465 spin_unlock(&fi->lock);
1466 return NULL;
1467}
1468
1469/*
1470 * Dequeue and return an I/O interrupt matching any of the interruption
1471 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1472 */
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001473struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
Jens Freimann6d3da242013-07-03 15:18:35 +02001474 u64 isc_mask, u32 schid)
1475{
1476 struct kvm_s390_interrupt_info *inti = NULL;
1477 int isc;
1478
1479 for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1480 if (isc_mask & isc_to_isc_bits(isc))
1481 inti = get_io_int(kvm, isc, schid);
1482 }
1483 return inti;
1484}
1485
1486#define SCCB_MASK 0xFFFFFFF8
1487#define SCCB_EVENT_PENDING 0x3
1488
1489static int __inject_service(struct kvm *kvm,
1490 struct kvm_s390_interrupt_info *inti)
1491{
1492 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1493
1494 spin_lock(&fi->lock);
1495 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1496 /*
1497 * Early versions of the QEMU s390 bios will inject several
1498 * service interrupts after another without handling a
1499 * condition code indicating busy.
1500 * We will silently ignore those superfluous sccb values.
1501 * A future version of QEMU will take care of serialization
1502 * of servc requests
1503 */
1504 if (fi->srv_signal.ext_params & SCCB_MASK)
1505 goto out;
1506 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1507 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1508out:
1509 spin_unlock(&fi->lock);
1510 kfree(inti);
1511 return 0;
1512}
1513
1514static int __inject_virtio(struct kvm *kvm,
1515 struct kvm_s390_interrupt_info *inti)
1516{
1517 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1518
1519 spin_lock(&fi->lock);
1520 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1521 spin_unlock(&fi->lock);
1522 return -EBUSY;
1523 }
1524 fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1525 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1526 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1527 spin_unlock(&fi->lock);
1528 return 0;
1529}
1530
1531static int __inject_pfault_done(struct kvm *kvm,
1532 struct kvm_s390_interrupt_info *inti)
1533{
1534 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1535
1536 spin_lock(&fi->lock);
1537 if (fi->counters[FIRQ_CNTR_PFAULT] >=
1538 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1539 spin_unlock(&fi->lock);
1540 return -EBUSY;
1541 }
1542 fi->counters[FIRQ_CNTR_PFAULT] += 1;
1543 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1544 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1545 spin_unlock(&fi->lock);
1546 return 0;
1547}
1548
1549#define CR_PENDING_SUBCLASS 28
1550static int __inject_float_mchk(struct kvm *kvm,
1551 struct kvm_s390_interrupt_info *inti)
1552{
1553 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1554
1555 spin_lock(&fi->lock);
1556 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1557 fi->mchk.mcic |= inti->mchk.mcic;
1558 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1559 spin_unlock(&fi->lock);
1560 kfree(inti);
1561 return 0;
1562}
1563
1564static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001565{
1566 struct kvm_s390_float_interrupt *fi;
Jens Freimann6d3da242013-07-03 15:18:35 +02001567 struct list_head *list;
1568 int isc;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001569
Michael Muellerd7c5cb02017-06-12 14:15:19 +02001570 isc = int_word_to_isc(inti->io.io_int_word);
1571
1572 if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) {
1573 VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
1574 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1575 kfree(inti);
1576 return 0;
1577 }
1578
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001579 fi = &kvm->arch.float_int;
1580 spin_lock(&fi->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001581 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1582 spin_unlock(&fi->lock);
1583 return -EBUSY;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001584 }
Jens Freimann6d3da242013-07-03 15:18:35 +02001585 fi->counters[FIRQ_CNTR_IO] += 1;
1586
Christian Borntraegerdcc98ea2016-06-07 09:37:17 +02001587 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1588 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1589 else
1590 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1591 inti->io.subchannel_id >> 8,
1592 inti->io.subchannel_id >> 1 & 0x3,
1593 inti->io.subchannel_nr);
Jens Freimann6d3da242013-07-03 15:18:35 +02001594 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1595 list_add_tail(&inti->list, list);
Michael Muelleree739f42017-07-03 15:32:50 +02001596 set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001597 spin_unlock(&fi->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001598 return 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001599}
1600
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001601/*
1602 * Find a destination VCPU for a floating irq and kick it.
1603 */
1604static void __floating_irq_kick(struct kvm *kvm, u64 type)
1605{
1606 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001607 struct kvm_vcpu *dst_vcpu;
1608 int sigcpu, online_vcpus, nr_tries = 0;
1609
1610 online_vcpus = atomic_read(&kvm->online_vcpus);
1611 if (!online_vcpus)
1612 return;
1613
1614 /* find idle VCPUs first, then round robin */
1615 sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1616 if (sigcpu == online_vcpus) {
1617 do {
1618 sigcpu = fi->next_rr_cpu;
1619 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1620 /* avoid endless loops if all vcpus are stopped */
1621 if (nr_tries++ >= online_vcpus)
1622 return;
1623 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1624 }
1625 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1626
1627 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001628 switch (type) {
1629 case KVM_S390_MCHK:
David Hildenbrand20182242018-01-23 18:05:28 +01001630 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001631 break;
1632 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
David Hildenbrand20182242018-01-23 18:05:28 +01001633 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001634 break;
1635 default:
David Hildenbrand20182242018-01-23 18:05:28 +01001636 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001637 break;
1638 }
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001639 kvm_s390_vcpu_wakeup(dst_vcpu);
1640}
1641
Jens Freimanna91b8eb2014-01-30 08:40:23 +01001642static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
Carsten Otteba5c1e92008-03-25 18:47:26 +01001643{
Jens Freimann6d3da242013-07-03 15:18:35 +02001644 u64 type = READ_ONCE(inti->type);
1645 int rc;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001646
Jens Freimann6d3da242013-07-03 15:18:35 +02001647 switch (type) {
1648 case KVM_S390_MCHK:
1649 rc = __inject_float_mchk(kvm, inti);
1650 break;
1651 case KVM_S390_INT_VIRTIO:
1652 rc = __inject_virtio(kvm, inti);
1653 break;
1654 case KVM_S390_INT_SERVICE:
1655 rc = __inject_service(kvm, inti);
1656 break;
1657 case KVM_S390_INT_PFAULT_DONE:
1658 rc = __inject_pfault_done(kvm, inti);
1659 break;
1660 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1661 rc = __inject_io(kvm, inti);
1662 break;
1663 default:
1664 rc = -EINVAL;
Cornelia Huckd8346b72012-12-20 15:32:08 +01001665 }
Jens Freimann6d3da242013-07-03 15:18:35 +02001666 if (rc)
1667 return rc;
1668
David Hildenbrand96e0ed22015-01-14 14:08:38 +01001669 __floating_irq_kick(kvm, type);
Jens Freimann6d3da242013-07-03 15:18:35 +02001670 return 0;
Jens Freimannc05c4182013-10-07 16:13:45 +02001671}
1672
1673int kvm_s390_inject_vm(struct kvm *kvm,
1674 struct kvm_s390_interrupt *s390int)
1675{
1676 struct kvm_s390_interrupt_info *inti;
David Hildenbrand428d53b2015-01-16 12:58:09 +01001677 int rc;
Jens Freimannc05c4182013-10-07 16:13:45 +02001678
1679 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1680 if (!inti)
1681 return -ENOMEM;
1682
1683 inti->type = s390int->type;
1684 switch (inti->type) {
1685 case KVM_S390_INT_VIRTIO:
1686 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1687 s390int->parm, s390int->parm64);
1688 inti->ext.ext_params = s390int->parm;
1689 inti->ext.ext_params2 = s390int->parm64;
1690 break;
1691 case KVM_S390_INT_SERVICE:
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001692 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
Jens Freimannc05c4182013-10-07 16:13:45 +02001693 inti->ext.ext_params = s390int->parm;
1694 break;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001695 case KVM_S390_INT_PFAULT_DONE:
Dominik Dingel3c038e62013-10-07 17:11:48 +02001696 inti->ext.ext_params2 = s390int->parm64;
1697 break;
Jens Freimannc05c4182013-10-07 16:13:45 +02001698 case KVM_S390_MCHK:
Christian Borntraeger3f24ba12015-07-09 14:08:18 +02001699 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
Jens Freimannc05c4182013-10-07 16:13:45 +02001700 s390int->parm64);
1701 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1702 inti->mchk.mcic = s390int->parm64;
1703 break;
1704 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
Jens Freimannc05c4182013-10-07 16:13:45 +02001705 inti->io.subchannel_id = s390int->parm >> 16;
1706 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1707 inti->io.io_int_parm = s390int->parm64 >> 32;
1708 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1709 break;
1710 default:
1711 kfree(inti);
1712 return -EINVAL;
1713 }
1714 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1715 2);
1716
David Hildenbrand428d53b2015-01-16 12:58:09 +01001717 rc = __inject_vm(kvm, inti);
1718 if (rc)
1719 kfree(inti);
1720 return rc;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001721}
1722
David Hildenbrand15462e32015-02-04 15:59:11 +01001723int kvm_s390_reinject_io_int(struct kvm *kvm,
Cornelia Huck2f32d4e2014-01-08 18:07:54 +01001724 struct kvm_s390_interrupt_info *inti)
1725{
David Hildenbrand15462e32015-02-04 15:59:11 +01001726 return __inject_vm(kvm, inti);
Cornelia Huck2f32d4e2014-01-08 18:07:54 +01001727}
1728
Jens Freimann383d0b02014-07-29 15:11:49 +02001729int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1730 struct kvm_s390_irq *irq)
Carsten Otteba5c1e92008-03-25 18:47:26 +01001731{
Jens Freimann383d0b02014-07-29 15:11:49 +02001732 irq->type = s390int->type;
1733 switch (irq->type) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001734 case KVM_S390_PROGRAM_INT:
Jens Freimann0146a7b2014-07-28 15:37:58 +02001735 if (s390int->parm & 0xffff0000)
Jens Freimann383d0b02014-07-29 15:11:49 +02001736 return -EINVAL;
1737 irq->u.pgm.code = s390int->parm;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001738 break;
Christian Borntraegerb7e6e4d2009-01-22 10:29:08 +01001739 case KVM_S390_SIGP_SET_PREFIX:
Jens Freimann383d0b02014-07-29 15:11:49 +02001740 irq->u.prefix.address = s390int->parm;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001741 break;
David Hildenbrand28225452014-10-15 16:48:16 +02001742 case KVM_S390_SIGP_STOP:
1743 irq->u.stop.flags = s390int->parm;
1744 break;
Jason J. Herne82a12732012-10-02 16:25:36 +02001745 case KVM_S390_INT_EXTERNAL_CALL:
Jens Freimann94d1f562015-01-15 14:40:34 +01001746 if (s390int->parm & 0xffff0000)
Jens Freimann383d0b02014-07-29 15:11:49 +02001747 return -EINVAL;
1748 irq->u.extcall.code = s390int->parm;
Jason J. Herne82a12732012-10-02 16:25:36 +02001749 break;
1750 case KVM_S390_INT_EMERGENCY:
Jens Freimann94d1f562015-01-15 14:40:34 +01001751 if (s390int->parm & 0xffff0000)
Jens Freimann383d0b02014-07-29 15:11:49 +02001752 return -EINVAL;
1753 irq->u.emerg.code = s390int->parm;
Jason J. Herne82a12732012-10-02 16:25:36 +02001754 break;
Cornelia Huck48a3e952012-12-20 15:32:09 +01001755 case KVM_S390_MCHK:
Jens Freimann383d0b02014-07-29 15:11:49 +02001756 irq->u.mchk.mcic = s390int->parm64;
1757 break;
1758 }
1759 return 0;
1760}
1761
David Hildenbrand6cddd432014-10-15 16:48:53 +02001762int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1763{
1764 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1765
1766 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1767}
1768
1769void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1770{
1771 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1772
1773 spin_lock(&li->lock);
1774 li->irq.stop.flags = 0;
1775 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1776 spin_unlock(&li->lock);
1777}
1778
Jens Freimann79e87a12015-03-19 15:12:12 +01001779static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
Jens Freimann383d0b02014-07-29 15:11:49 +02001780{
Jens Freimann383d0b02014-07-29 15:11:49 +02001781 int rc;
1782
Jens Freimann383d0b02014-07-29 15:11:49 +02001783 switch (irq->type) {
1784 case KVM_S390_PROGRAM_INT:
Jens Freimann383d0b02014-07-29 15:11:49 +02001785 rc = __inject_prog(vcpu, irq);
1786 break;
1787 case KVM_S390_SIGP_SET_PREFIX:
1788 rc = __inject_set_prefix(vcpu, irq);
1789 break;
1790 case KVM_S390_SIGP_STOP:
1791 rc = __inject_sigp_stop(vcpu, irq);
1792 break;
1793 case KVM_S390_RESTART:
1794 rc = __inject_sigp_restart(vcpu, irq);
1795 break;
1796 case KVM_S390_INT_CLOCK_COMP:
1797 rc = __inject_ckc(vcpu);
1798 break;
1799 case KVM_S390_INT_CPU_TIMER:
1800 rc = __inject_cpu_timer(vcpu);
1801 break;
1802 case KVM_S390_INT_EXTERNAL_CALL:
1803 rc = __inject_extcall(vcpu, irq);
1804 break;
1805 case KVM_S390_INT_EMERGENCY:
1806 rc = __inject_sigp_emergency(vcpu, irq);
1807 break;
1808 case KVM_S390_MCHK:
1809 rc = __inject_mchk(vcpu, irq);
Cornelia Huck48a3e952012-12-20 15:32:09 +01001810 break;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001811 case KVM_S390_INT_PFAULT_INIT:
Jens Freimann383d0b02014-07-29 15:11:49 +02001812 rc = __inject_pfault_init(vcpu, irq);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001813 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001814 case KVM_S390_INT_VIRTIO:
1815 case KVM_S390_INT_SERVICE:
Cornelia Huckd8346b72012-12-20 15:32:08 +01001816 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
Carsten Otteba5c1e92008-03-25 18:47:26 +01001817 default:
Jens Freimann0146a7b2014-07-28 15:37:58 +02001818 rc = -EINVAL;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001819 }
Jens Freimann79e87a12015-03-19 15:12:12 +01001820
1821 return rc;
1822}
1823
1824int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1825{
1826 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1827 int rc;
1828
1829 spin_lock(&li->lock);
1830 rc = do_inject_vcpu(vcpu, irq);
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001831 spin_unlock(&li->lock);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001832 if (!rc)
1833 kvm_s390_vcpu_wakeup(vcpu);
Jens Freimann0146a7b2014-07-28 15:37:58 +02001834 return rc;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001835}
Jens Freimannc05c4182013-10-07 16:13:45 +02001836
Jens Freimann6d3da242013-07-03 15:18:35 +02001837static inline void clear_irq_list(struct list_head *_list)
Jens Freimannc05c4182013-10-07 16:13:45 +02001838{
Jens Freimann6d3da242013-07-03 15:18:35 +02001839 struct kvm_s390_interrupt_info *inti, *n;
Jens Freimannc05c4182013-10-07 16:13:45 +02001840
Jens Freimann6d3da242013-07-03 15:18:35 +02001841 list_for_each_entry_safe(inti, n, _list, list) {
Jens Freimannc05c4182013-10-07 16:13:45 +02001842 list_del(&inti->list);
1843 kfree(inti);
1844 }
Jens Freimannc05c4182013-10-07 16:13:45 +02001845}
1846
Jens Freimann94aa0332015-03-16 12:17:13 +01001847static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1848 struct kvm_s390_irq *irq)
Jens Freimannc05c4182013-10-07 16:13:45 +02001849{
Jens Freimann94aa0332015-03-16 12:17:13 +01001850 irq->type = inti->type;
Jens Freimannc05c4182013-10-07 16:13:45 +02001851 switch (inti->type) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001852 case KVM_S390_INT_PFAULT_INIT:
1853 case KVM_S390_INT_PFAULT_DONE:
Jens Freimannc05c4182013-10-07 16:13:45 +02001854 case KVM_S390_INT_VIRTIO:
Jens Freimann94aa0332015-03-16 12:17:13 +01001855 irq->u.ext = inti->ext;
Jens Freimannc05c4182013-10-07 16:13:45 +02001856 break;
1857 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
Jens Freimann94aa0332015-03-16 12:17:13 +01001858 irq->u.io = inti->io;
Jens Freimannc05c4182013-10-07 16:13:45 +02001859 break;
Jens Freimannc05c4182013-10-07 16:13:45 +02001860 }
Jens Freimannc05c4182013-10-07 16:13:45 +02001861}
1862
Jens Freimann6d3da242013-07-03 15:18:35 +02001863void kvm_s390_clear_float_irqs(struct kvm *kvm)
1864{
1865 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1866 int i;
1867
1868 spin_lock(&fi->lock);
Jens Freimannf2ae45e2015-06-22 13:20:12 +02001869 fi->pending_irqs = 0;
1870 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1871 memset(&fi->mchk, 0, sizeof(fi->mchk));
Jens Freimann6d3da242013-07-03 15:18:35 +02001872 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1873 clear_irq_list(&fi->lists[i]);
1874 for (i = 0; i < FIRQ_MAX_COUNT; i++)
1875 fi->counters[i] = 0;
1876 spin_unlock(&fi->lock);
1877};
1878
Jens Freimann94aa0332015-03-16 12:17:13 +01001879static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
Jens Freimannc05c4182013-10-07 16:13:45 +02001880{
1881 struct kvm_s390_interrupt_info *inti;
1882 struct kvm_s390_float_interrupt *fi;
Jens Freimann94aa0332015-03-16 12:17:13 +01001883 struct kvm_s390_irq *buf;
Jens Freimann6d3da242013-07-03 15:18:35 +02001884 struct kvm_s390_irq *irq;
Jens Freimann94aa0332015-03-16 12:17:13 +01001885 int max_irqs;
Jens Freimannc05c4182013-10-07 16:13:45 +02001886 int ret = 0;
1887 int n = 0;
Jens Freimann6d3da242013-07-03 15:18:35 +02001888 int i;
Jens Freimannc05c4182013-10-07 16:13:45 +02001889
Jens Freimann94aa0332015-03-16 12:17:13 +01001890 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1891 return -EINVAL;
1892
1893 /*
1894 * We are already using -ENOMEM to signal
1895 * userspace it may retry with a bigger buffer,
1896 * so we need to use something else for this case
1897 */
1898 buf = vzalloc(len);
1899 if (!buf)
1900 return -ENOBUFS;
1901
1902 max_irqs = len / sizeof(struct kvm_s390_irq);
1903
Jens Freimannc05c4182013-10-07 16:13:45 +02001904 fi = &kvm->arch.float_int;
1905 spin_lock(&fi->lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001906 for (i = 0; i < FIRQ_LIST_COUNT; i++) {
1907 list_for_each_entry(inti, &fi->lists[i], list) {
1908 if (n == max_irqs) {
1909 /* signal userspace to try again */
1910 ret = -ENOMEM;
1911 goto out;
1912 }
1913 inti_to_irq(inti, &buf[n]);
1914 n++;
1915 }
1916 }
1917 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
Jens Freimann94aa0332015-03-16 12:17:13 +01001918 if (n == max_irqs) {
Jens Freimannc05c4182013-10-07 16:13:45 +02001919 /* signal userspace to try again */
1920 ret = -ENOMEM;
Jens Freimann6d3da242013-07-03 15:18:35 +02001921 goto out;
Jens Freimannc05c4182013-10-07 16:13:45 +02001922 }
Jens Freimann6d3da242013-07-03 15:18:35 +02001923 irq = (struct kvm_s390_irq *) &buf[n];
1924 irq->type = KVM_S390_INT_SERVICE;
1925 irq->u.ext = fi->srv_signal;
Jens Freimannc05c4182013-10-07 16:13:45 +02001926 n++;
1927 }
Jens Freimann6d3da242013-07-03 15:18:35 +02001928 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
1929 if (n == max_irqs) {
1930 /* signal userspace to try again */
1931 ret = -ENOMEM;
1932 goto out;
1933 }
1934 irq = (struct kvm_s390_irq *) &buf[n];
1935 irq->type = KVM_S390_MCHK;
1936 irq->u.mchk = fi->mchk;
1937 n++;
1938}
1939
1940out:
Jens Freimannc05c4182013-10-07 16:13:45 +02001941 spin_unlock(&fi->lock);
Jens Freimann94aa0332015-03-16 12:17:13 +01001942 if (!ret && n > 0) {
1943 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
1944 ret = -EFAULT;
1945 }
1946 vfree(buf);
Jens Freimannc05c4182013-10-07 16:13:45 +02001947
1948 return ret < 0 ? ret : n;
1949}
1950
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08001951static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
1952{
1953 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1954 struct kvm_s390_ais_all ais;
1955
1956 if (attr->attr < sizeof(ais))
1957 return -EINVAL;
1958
1959 if (!test_kvm_facility(kvm, 72))
1960 return -ENOTSUPP;
1961
1962 mutex_lock(&fi->ais_lock);
1963 ais.simm = fi->simm;
1964 ais.nimm = fi->nimm;
1965 mutex_unlock(&fi->ais_lock);
1966
1967 if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
1968 return -EFAULT;
1969
1970 return 0;
1971}
1972
Jens Freimannc05c4182013-10-07 16:13:45 +02001973static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1974{
1975 int r;
1976
1977 switch (attr->group) {
1978 case KVM_DEV_FLIC_GET_ALL_IRQS:
Jens Freimann94aa0332015-03-16 12:17:13 +01001979 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
Jens Freimannc05c4182013-10-07 16:13:45 +02001980 attr->attr);
1981 break;
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08001982 case KVM_DEV_FLIC_AISM_ALL:
1983 r = flic_ais_mode_get_all(dev->kvm, attr);
1984 break;
Jens Freimannc05c4182013-10-07 16:13:45 +02001985 default:
1986 r = -EINVAL;
1987 }
1988
1989 return r;
1990}
1991
1992static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1993 u64 addr)
1994{
1995 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1996 void *target = NULL;
1997 void __user *source;
1998 u64 size;
1999
2000 if (get_user(inti->type, (u64 __user *)addr))
2001 return -EFAULT;
2002
2003 switch (inti->type) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002004 case KVM_S390_INT_PFAULT_INIT:
2005 case KVM_S390_INT_PFAULT_DONE:
Jens Freimannc05c4182013-10-07 16:13:45 +02002006 case KVM_S390_INT_VIRTIO:
2007 case KVM_S390_INT_SERVICE:
2008 target = (void *) &inti->ext;
2009 source = &uptr->u.ext;
2010 size = sizeof(inti->ext);
2011 break;
2012 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2013 target = (void *) &inti->io;
2014 source = &uptr->u.io;
2015 size = sizeof(inti->io);
2016 break;
2017 case KVM_S390_MCHK:
2018 target = (void *) &inti->mchk;
2019 source = &uptr->u.mchk;
2020 size = sizeof(inti->mchk);
2021 break;
2022 default:
2023 return -EINVAL;
2024 }
2025
2026 if (copy_from_user(target, source, size))
2027 return -EFAULT;
2028
2029 return 0;
2030}
2031
2032static int enqueue_floating_irq(struct kvm_device *dev,
2033 struct kvm_device_attr *attr)
2034{
2035 struct kvm_s390_interrupt_info *inti = NULL;
2036 int r = 0;
2037 int len = attr->attr;
2038
2039 if (len % sizeof(struct kvm_s390_irq) != 0)
2040 return -EINVAL;
2041 else if (len > KVM_S390_FLIC_MAX_BUFFER)
2042 return -EINVAL;
2043
2044 while (len >= sizeof(struct kvm_s390_irq)) {
2045 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2046 if (!inti)
2047 return -ENOMEM;
2048
2049 r = copy_irq_from_user(inti, attr->addr);
2050 if (r) {
2051 kfree(inti);
2052 return r;
2053 }
Jens Freimanna91b8eb2014-01-30 08:40:23 +01002054 r = __inject_vm(dev->kvm, inti);
2055 if (r) {
2056 kfree(inti);
2057 return r;
2058 }
Jens Freimannc05c4182013-10-07 16:13:45 +02002059 len -= sizeof(struct kvm_s390_irq);
2060 attr->addr += sizeof(struct kvm_s390_irq);
2061 }
2062
2063 return r;
2064}
2065
Cornelia Huck841b91c2013-07-15 13:36:01 +02002066static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2067{
2068 if (id >= MAX_S390_IO_ADAPTERS)
2069 return NULL;
2070 return kvm->arch.adapters[id];
2071}
2072
2073static int register_io_adapter(struct kvm_device *dev,
2074 struct kvm_device_attr *attr)
2075{
2076 struct s390_io_adapter *adapter;
2077 struct kvm_s390_io_adapter adapter_info;
2078
2079 if (copy_from_user(&adapter_info,
2080 (void __user *)attr->addr, sizeof(adapter_info)))
2081 return -EFAULT;
2082
2083 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
2084 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
2085 return -EINVAL;
2086
2087 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2088 if (!adapter)
2089 return -ENOMEM;
2090
2091 INIT_LIST_HEAD(&adapter->maps);
2092 init_rwsem(&adapter->maps_lock);
2093 atomic_set(&adapter->nr_maps, 0);
2094 adapter->id = adapter_info.id;
2095 adapter->isc = adapter_info.isc;
2096 adapter->maskable = adapter_info.maskable;
2097 adapter->masked = false;
2098 adapter->swap = adapter_info.swap;
Fei Li08fab502017-01-19 17:02:26 +01002099 adapter->suppressible = (adapter_info.flags) &
2100 KVM_S390_ADAPTER_SUPPRESSIBLE;
Cornelia Huck841b91c2013-07-15 13:36:01 +02002101 dev->kvm->arch.adapters[adapter->id] = adapter;
2102
2103 return 0;
2104}
2105
2106int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2107{
2108 int ret;
2109 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2110
2111 if (!adapter || !adapter->maskable)
2112 return -EINVAL;
2113 ret = adapter->masked;
2114 adapter->masked = masked;
2115 return ret;
2116}
2117
2118static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
2119{
2120 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2121 struct s390_map_info *map;
2122 int ret;
2123
2124 if (!adapter || !addr)
2125 return -EINVAL;
2126
2127 map = kzalloc(sizeof(*map), GFP_KERNEL);
2128 if (!map) {
2129 ret = -ENOMEM;
2130 goto out;
2131 }
2132 INIT_LIST_HEAD(&map->list);
2133 map->guest_addr = addr;
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +02002134 map->addr = gmap_translate(kvm->arch.gmap, addr);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002135 if (map->addr == -EFAULT) {
2136 ret = -EFAULT;
2137 goto out;
2138 }
2139 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
2140 if (ret < 0)
2141 goto out;
2142 BUG_ON(ret != 1);
2143 down_write(&adapter->maps_lock);
2144 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
2145 list_add_tail(&map->list, &adapter->maps);
2146 ret = 0;
2147 } else {
2148 put_page(map->page);
2149 ret = -EINVAL;
2150 }
2151 up_write(&adapter->maps_lock);
2152out:
2153 if (ret)
2154 kfree(map);
2155 return ret;
2156}
2157
2158static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
2159{
2160 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2161 struct s390_map_info *map, *tmp;
2162 int found = 0;
2163
2164 if (!adapter || !addr)
2165 return -EINVAL;
2166
2167 down_write(&adapter->maps_lock);
2168 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
2169 if (map->guest_addr == addr) {
2170 found = 1;
2171 atomic_dec(&adapter->nr_maps);
2172 list_del(&map->list);
2173 put_page(map->page);
2174 kfree(map);
2175 break;
2176 }
2177 }
2178 up_write(&adapter->maps_lock);
2179
2180 return found ? 0 : -EINVAL;
2181}
2182
2183void kvm_s390_destroy_adapters(struct kvm *kvm)
2184{
2185 int i;
2186 struct s390_map_info *map, *tmp;
2187
2188 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
2189 if (!kvm->arch.adapters[i])
2190 continue;
2191 list_for_each_entry_safe(map, tmp,
2192 &kvm->arch.adapters[i]->maps, list) {
2193 list_del(&map->list);
2194 put_page(map->page);
2195 kfree(map);
2196 }
2197 kfree(kvm->arch.adapters[i]);
2198 }
2199}
2200
2201static int modify_io_adapter(struct kvm_device *dev,
2202 struct kvm_device_attr *attr)
2203{
2204 struct kvm_s390_io_adapter_req req;
2205 struct s390_io_adapter *adapter;
2206 int ret;
2207
2208 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2209 return -EFAULT;
2210
2211 adapter = get_io_adapter(dev->kvm, req.id);
2212 if (!adapter)
2213 return -EINVAL;
2214 switch (req.type) {
2215 case KVM_S390_IO_ADAPTER_MASK:
2216 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2217 if (ret > 0)
2218 ret = 0;
2219 break;
2220 case KVM_S390_IO_ADAPTER_MAP:
2221 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2222 break;
2223 case KVM_S390_IO_ADAPTER_UNMAP:
2224 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2225 break;
2226 default:
2227 ret = -EINVAL;
2228 }
2229
2230 return ret;
2231}
2232
Halil Pasic6d28f782016-01-25 19:10:40 +01002233static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2234
2235{
2236 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2237 u32 schid;
2238
2239 if (attr->flags)
2240 return -EINVAL;
2241 if (attr->attr != sizeof(schid))
2242 return -EINVAL;
2243 if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2244 return -EFAULT;
Michael Mueller4dd6f172017-07-06 14:22:20 +02002245 if (!schid)
2246 return -EINVAL;
Halil Pasic6d28f782016-01-25 19:10:40 +01002247 kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2248 /*
2249 * If userspace is conforming to the architecture, we can have at most
2250 * one pending I/O interrupt per subchannel, so this is effectively a
2251 * clear all.
2252 */
2253 return 0;
2254}
2255
Fei Li51978392017-02-17 17:06:26 +08002256static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2257{
2258 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2259 struct kvm_s390_ais_req req;
2260 int ret = 0;
2261
Christian Borntraeger1ba15b22017-05-31 10:18:55 +02002262 if (!test_kvm_facility(kvm, 72))
Fei Li51978392017-02-17 17:06:26 +08002263 return -ENOTSUPP;
2264
2265 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2266 return -EFAULT;
2267
2268 if (req.isc > MAX_ISC)
2269 return -EINVAL;
2270
2271 trace_kvm_s390_modify_ais_mode(req.isc,
2272 (fi->simm & AIS_MODE_MASK(req.isc)) ?
2273 (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2274 2 : KVM_S390_AIS_MODE_SINGLE :
2275 KVM_S390_AIS_MODE_ALL, req.mode);
2276
2277 mutex_lock(&fi->ais_lock);
2278 switch (req.mode) {
2279 case KVM_S390_AIS_MODE_ALL:
2280 fi->simm &= ~AIS_MODE_MASK(req.isc);
2281 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2282 break;
2283 case KVM_S390_AIS_MODE_SINGLE:
2284 fi->simm |= AIS_MODE_MASK(req.isc);
2285 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2286 break;
2287 default:
2288 ret = -EINVAL;
2289 }
2290 mutex_unlock(&fi->ais_lock);
2291
2292 return ret;
2293}
2294
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002295static int kvm_s390_inject_airq(struct kvm *kvm,
2296 struct s390_io_adapter *adapter)
2297{
2298 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2299 struct kvm_s390_interrupt s390int = {
2300 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2301 .parm = 0,
2302 .parm64 = (adapter->isc << 27) | 0x80000000,
2303 };
2304 int ret = 0;
2305
Christian Borntraeger1ba15b22017-05-31 10:18:55 +02002306 if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002307 return kvm_s390_inject_vm(kvm, &s390int);
2308
2309 mutex_lock(&fi->ais_lock);
2310 if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2311 trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2312 goto out;
2313 }
2314
2315 ret = kvm_s390_inject_vm(kvm, &s390int);
2316 if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2317 fi->nimm |= AIS_MODE_MASK(adapter->isc);
2318 trace_kvm_s390_modify_ais_mode(adapter->isc,
2319 KVM_S390_AIS_MODE_SINGLE, 2);
2320 }
2321out:
2322 mutex_unlock(&fi->ais_lock);
2323 return ret;
2324}
2325
2326static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2327{
2328 unsigned int id = attr->attr;
2329 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2330
2331 if (!adapter)
2332 return -EINVAL;
2333
2334 return kvm_s390_inject_airq(kvm, adapter);
2335}
2336
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08002337static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2338{
2339 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2340 struct kvm_s390_ais_all ais;
2341
2342 if (!test_kvm_facility(kvm, 72))
2343 return -ENOTSUPP;
2344
2345 if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2346 return -EFAULT;
2347
2348 mutex_lock(&fi->ais_lock);
2349 fi->simm = ais.simm;
2350 fi->nimm = ais.nimm;
2351 mutex_unlock(&fi->ais_lock);
2352
2353 return 0;
2354}
2355
Jens Freimannc05c4182013-10-07 16:13:45 +02002356static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2357{
2358 int r = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002359 unsigned int i;
2360 struct kvm_vcpu *vcpu;
Jens Freimannc05c4182013-10-07 16:13:45 +02002361
2362 switch (attr->group) {
2363 case KVM_DEV_FLIC_ENQUEUE:
2364 r = enqueue_floating_irq(dev, attr);
2365 break;
2366 case KVM_DEV_FLIC_CLEAR_IRQS:
Christian Borntraeger67335e62014-03-25 17:09:08 +01002367 kvm_s390_clear_float_irqs(dev->kvm);
Jens Freimannc05c4182013-10-07 16:13:45 +02002368 break;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002369 case KVM_DEV_FLIC_APF_ENABLE:
2370 dev->kvm->arch.gmap->pfault_enabled = 1;
2371 break;
2372 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2373 dev->kvm->arch.gmap->pfault_enabled = 0;
2374 /*
2375 * Make sure no async faults are in transition when
2376 * clearing the queues. So we don't need to worry
2377 * about late coming workers.
2378 */
2379 synchronize_srcu(&dev->kvm->srcu);
2380 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2381 kvm_clear_async_pf_completion_queue(vcpu);
2382 break;
Cornelia Huck841b91c2013-07-15 13:36:01 +02002383 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2384 r = register_io_adapter(dev, attr);
2385 break;
2386 case KVM_DEV_FLIC_ADAPTER_MODIFY:
2387 r = modify_io_adapter(dev, attr);
2388 break;
Halil Pasic6d28f782016-01-25 19:10:40 +01002389 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2390 r = clear_io_irq(dev->kvm, attr);
2391 break;
Fei Li51978392017-02-17 17:06:26 +08002392 case KVM_DEV_FLIC_AISM:
2393 r = modify_ais_mode(dev->kvm, attr);
2394 break;
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002395 case KVM_DEV_FLIC_AIRQ_INJECT:
2396 r = flic_inject_airq(dev->kvm, attr);
2397 break;
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08002398 case KVM_DEV_FLIC_AISM_ALL:
2399 r = flic_ais_mode_set_all(dev->kvm, attr);
2400 break;
Jens Freimannc05c4182013-10-07 16:13:45 +02002401 default:
2402 r = -EINVAL;
2403 }
2404
2405 return r;
2406}
2407
Halil Pasic4f129852016-02-25 12:44:17 +01002408static int flic_has_attr(struct kvm_device *dev,
2409 struct kvm_device_attr *attr)
2410{
2411 switch (attr->group) {
2412 case KVM_DEV_FLIC_GET_ALL_IRQS:
2413 case KVM_DEV_FLIC_ENQUEUE:
2414 case KVM_DEV_FLIC_CLEAR_IRQS:
2415 case KVM_DEV_FLIC_APF_ENABLE:
2416 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2417 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2418 case KVM_DEV_FLIC_ADAPTER_MODIFY:
Halil Pasic6d28f782016-01-25 19:10:40 +01002419 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
Fei Li51978392017-02-17 17:06:26 +08002420 case KVM_DEV_FLIC_AISM:
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002421 case KVM_DEV_FLIC_AIRQ_INJECT:
Yi Min Zhao2c1a48f2017-06-07 16:09:52 +08002422 case KVM_DEV_FLIC_AISM_ALL:
Halil Pasic4f129852016-02-25 12:44:17 +01002423 return 0;
2424 }
2425 return -ENXIO;
2426}
2427
Jens Freimannc05c4182013-10-07 16:13:45 +02002428static int flic_create(struct kvm_device *dev, u32 type)
2429{
2430 if (!dev)
2431 return -EINVAL;
2432 if (dev->kvm->arch.flic)
2433 return -EINVAL;
2434 dev->kvm->arch.flic = dev;
2435 return 0;
2436}
2437
2438static void flic_destroy(struct kvm_device *dev)
2439{
2440 dev->kvm->arch.flic = NULL;
2441 kfree(dev);
2442}
2443
2444/* s390 floating irq controller (flic) */
2445struct kvm_device_ops kvm_flic_ops = {
2446 .name = "kvm-flic",
2447 .get_attr = flic_get_attr,
2448 .set_attr = flic_set_attr,
Halil Pasic4f129852016-02-25 12:44:17 +01002449 .has_attr = flic_has_attr,
Jens Freimannc05c4182013-10-07 16:13:45 +02002450 .create = flic_create,
2451 .destroy = flic_destroy,
2452};
Cornelia Huck84223592013-07-15 13:36:01 +02002453
2454static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2455{
2456 unsigned long bit;
2457
2458 bit = bit_nr + (addr % PAGE_SIZE) * 8;
2459
2460 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2461}
2462
2463static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2464 u64 addr)
2465{
2466 struct s390_map_info *map;
2467
2468 if (!adapter)
2469 return NULL;
2470
2471 list_for_each_entry(map, &adapter->maps, list) {
2472 if (map->guest_addr == addr)
2473 return map;
2474 }
2475 return NULL;
2476}
2477
2478static int adapter_indicators_set(struct kvm *kvm,
2479 struct s390_io_adapter *adapter,
2480 struct kvm_s390_adapter_int *adapter_int)
2481{
2482 unsigned long bit;
2483 int summary_set, idx;
2484 struct s390_map_info *info;
2485 void *map;
2486
2487 info = get_map_info(adapter, adapter_int->ind_addr);
2488 if (!info)
2489 return -1;
2490 map = page_address(info->page);
2491 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2492 set_bit(bit, map);
2493 idx = srcu_read_lock(&kvm->srcu);
2494 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2495 set_page_dirty_lock(info->page);
2496 info = get_map_info(adapter, adapter_int->summary_addr);
2497 if (!info) {
2498 srcu_read_unlock(&kvm->srcu, idx);
2499 return -1;
2500 }
2501 map = page_address(info->page);
2502 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2503 adapter->swap);
2504 summary_set = test_and_set_bit(bit, map);
2505 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2506 set_page_dirty_lock(info->page);
2507 srcu_read_unlock(&kvm->srcu, idx);
2508 return summary_set ? 0 : 1;
2509}
2510
2511/*
2512 * < 0 - not injected due to error
2513 * = 0 - coalesced, summary indicator already active
2514 * > 0 - injected interrupt
2515 */
2516static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2517 struct kvm *kvm, int irq_source_id, int level,
2518 bool line_status)
2519{
2520 int ret;
2521 struct s390_io_adapter *adapter;
2522
2523 /* We're only interested in the 0->1 transition. */
2524 if (!level)
2525 return 0;
2526 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2527 if (!adapter)
2528 return -1;
2529 down_read(&adapter->maps_lock);
2530 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2531 up_read(&adapter->maps_lock);
2532 if ((ret > 0) && !adapter->masked) {
Yi Min Zhaoa8920952017-02-20 10:15:01 +08002533 ret = kvm_s390_inject_airq(kvm, adapter);
Cornelia Huck84223592013-07-15 13:36:01 +02002534 if (ret == 0)
2535 ret = 1;
2536 }
2537 return ret;
2538}
2539
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002540/*
2541 * Inject the machine check to the guest.
2542 */
2543void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2544 struct mcck_volatile_info *mcck_info)
2545{
2546 struct kvm_s390_interrupt_info inti;
2547 struct kvm_s390_irq irq;
2548 struct kvm_s390_mchk_info *mchk;
2549 union mci mci;
2550 __u64 cr14 = 0; /* upper bits are not used */
David Hildenbrand3dbf0202017-08-30 18:06:01 +02002551 int rc;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002552
2553 mci.val = mcck_info->mcic;
2554 if (mci.sr)
Martin Schwidefskycc654502017-10-12 13:24:46 +02002555 cr14 |= CR14_RECOVERY_SUBMASK;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002556 if (mci.dg)
Martin Schwidefskycc654502017-10-12 13:24:46 +02002557 cr14 |= CR14_DEGRADATION_SUBMASK;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002558 if (mci.w)
Martin Schwidefskycc654502017-10-12 13:24:46 +02002559 cr14 |= CR14_WARNING_SUBMASK;
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002560
2561 mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2562 mchk->cr14 = cr14;
2563 mchk->mcic = mcck_info->mcic;
2564 mchk->ext_damage_code = mcck_info->ext_damage_code;
2565 mchk->failing_storage_address = mcck_info->failing_storage_address;
2566 if (mci.ck) {
2567 /* Inject the floating machine check */
2568 inti.type = KVM_S390_MCHK;
David Hildenbrand3dbf0202017-08-30 18:06:01 +02002569 rc = __inject_vm(vcpu->kvm, &inti);
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002570 } else {
2571 /* Inject the machine check to specified vcpu */
2572 irq.type = KVM_S390_MCHK;
David Hildenbrand3dbf0202017-08-30 18:06:01 +02002573 rc = kvm_s390_inject_vcpu(vcpu, &irq);
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002574 }
David Hildenbrand3dbf0202017-08-30 18:06:01 +02002575 WARN_ON_ONCE(rc);
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02002576}
2577
Radim Krčmářc63cf532016-07-12 22:09:26 +02002578int kvm_set_routing_entry(struct kvm *kvm,
2579 struct kvm_kernel_irq_routing_entry *e,
Cornelia Huck84223592013-07-15 13:36:01 +02002580 const struct kvm_irq_routing_entry *ue)
2581{
2582 int ret;
2583
2584 switch (ue->type) {
2585 case KVM_IRQ_ROUTING_S390_ADAPTER:
2586 e->set = set_adapter_int;
2587 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2588 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2589 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2590 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2591 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2592 ret = 0;
2593 break;
2594 default:
2595 ret = -EINVAL;
2596 }
2597
2598 return ret;
2599}
2600
2601int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2602 int irq_source_id, int level, bool line_status)
2603{
2604 return -EINVAL;
2605}
Jens Freimann816c7662014-11-24 17:13:46 +01002606
2607int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2608{
2609 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2610 struct kvm_s390_irq *buf;
2611 int r = 0;
2612 int n;
2613
2614 buf = vmalloc(len);
2615 if (!buf)
2616 return -ENOMEM;
2617
2618 if (copy_from_user((void *) buf, irqstate, len)) {
2619 r = -EFAULT;
2620 goto out_free;
2621 }
2622
2623 /*
2624 * Don't allow setting the interrupt state
2625 * when there are already interrupts pending
2626 */
2627 spin_lock(&li->lock);
2628 if (li->pending_irqs) {
2629 r = -EBUSY;
2630 goto out_unlock;
2631 }
2632
2633 for (n = 0; n < len / sizeof(*buf); n++) {
2634 r = do_inject_vcpu(vcpu, &buf[n]);
2635 if (r)
2636 break;
2637 }
2638
2639out_unlock:
2640 spin_unlock(&li->lock);
2641out_free:
2642 vfree(buf);
2643
2644 return r;
2645}
2646
2647static void store_local_irq(struct kvm_s390_local_interrupt *li,
2648 struct kvm_s390_irq *irq,
2649 unsigned long irq_type)
2650{
2651 switch (irq_type) {
2652 case IRQ_PEND_MCHK_EX:
2653 case IRQ_PEND_MCHK_REP:
2654 irq->type = KVM_S390_MCHK;
2655 irq->u.mchk = li->irq.mchk;
2656 break;
2657 case IRQ_PEND_PROG:
2658 irq->type = KVM_S390_PROGRAM_INT;
2659 irq->u.pgm = li->irq.pgm;
2660 break;
2661 case IRQ_PEND_PFAULT_INIT:
2662 irq->type = KVM_S390_INT_PFAULT_INIT;
2663 irq->u.ext = li->irq.ext;
2664 break;
2665 case IRQ_PEND_EXT_EXTERNAL:
2666 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2667 irq->u.extcall = li->irq.extcall;
2668 break;
2669 case IRQ_PEND_EXT_CLOCK_COMP:
2670 irq->type = KVM_S390_INT_CLOCK_COMP;
2671 break;
2672 case IRQ_PEND_EXT_CPU_TIMER:
2673 irq->type = KVM_S390_INT_CPU_TIMER;
2674 break;
2675 case IRQ_PEND_SIGP_STOP:
2676 irq->type = KVM_S390_SIGP_STOP;
2677 irq->u.stop = li->irq.stop;
2678 break;
2679 case IRQ_PEND_RESTART:
2680 irq->type = KVM_S390_RESTART;
2681 break;
2682 case IRQ_PEND_SET_PREFIX:
2683 irq->type = KVM_S390_SIGP_SET_PREFIX;
2684 irq->u.prefix = li->irq.prefix;
2685 break;
2686 }
2687}
2688
2689int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2690{
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02002691 int scn;
Jens Freimann816c7662014-11-24 17:13:46 +01002692 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2693 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2694 unsigned long pending_irqs;
2695 struct kvm_s390_irq irq;
2696 unsigned long irq_type;
2697 int cpuaddr;
2698 int n = 0;
2699
2700 spin_lock(&li->lock);
2701 pending_irqs = li->pending_irqs;
2702 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2703 sizeof(sigp_emerg_pending));
2704 spin_unlock(&li->lock);
2705
2706 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2707 memset(&irq, 0, sizeof(irq));
2708 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2709 continue;
2710 if (n + sizeof(irq) > len)
2711 return -ENOBUFS;
2712 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2713 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2714 return -EFAULT;
2715 n += sizeof(irq);
2716 }
2717
2718 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2719 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2720 memset(&irq, 0, sizeof(irq));
2721 if (n + sizeof(irq) > len)
2722 return -ENOBUFS;
2723 irq.type = KVM_S390_INT_EMERGENCY;
2724 irq.u.emerg.code = cpuaddr;
2725 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2726 return -EFAULT;
2727 n += sizeof(irq);
2728 }
2729 }
2730
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02002731 if (sca_ext_call_pending(vcpu, &scn)) {
Jens Freimann816c7662014-11-24 17:13:46 +01002732 if (n + sizeof(irq) > len)
2733 return -ENOBUFS;
2734 memset(&irq, 0, sizeof(irq));
2735 irq.type = KVM_S390_INT_EXTERNAL_CALL;
Eugene (jno) Dvurechenskia5bd7642015-04-21 15:10:10 +02002736 irq.u.extcall.code = scn;
Jens Freimann816c7662014-11-24 17:13:46 +01002737 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2738 return -EFAULT;
2739 n += sizeof(irq);
2740 }
2741
2742 return n;
2743}
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002744
2745void kvm_s390_gisa_clear(struct kvm *kvm)
2746{
2747 if (kvm->arch.gisa) {
2748 memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa));
2749 kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa;
2750 VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa);
2751 }
2752}
2753
2754void kvm_s390_gisa_init(struct kvm *kvm)
2755{
2756 /* not implemented yet */
2757}
2758
2759void kvm_s390_gisa_destroy(struct kvm *kvm)
2760{
2761 if (!kvm->arch.gisa)
2762 return;
2763 kvm->arch.gisa = NULL;
2764}