blob: 8771fef112a1be41a2d01c395370dc9de9ffc314 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
33
Heiko Carstenscbb870c2010-02-26 22:37:43 +010034#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020036#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010038#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010039#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010040#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020041#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020042#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020043#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040044#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010045#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010046#include "gaccess.h"
47
David Hildenbrandea2cdd22015-05-20 13:24:02 +020048#define KMSG_COMPONENT "kvm-s390"
49#undef pr_fmt
50#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
51
Cornelia Huck5786fff2012-07-23 17:20:29 +020052#define CREATE_TRACE_POINTS
53#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020054#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020055
Thomas Huth41408c282015-02-06 15:01:21 +010056#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010057#define LOCAL_IRQS 32
58#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010060
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
62
63struct kvm_stats_debugfs_item debugfs_entries[] = {
64 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020065 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010066 { "exit_validity", VCPU_STAT(exit_validity) },
67 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
68 { "exit_external_request", VCPU_STAT(exit_external_request) },
69 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020078 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020079 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010081 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
82 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010083 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020084 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010085 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
86 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
87 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
88 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
89 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
90 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
91 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020092 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010093 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
94 { "instruction_spx", VCPU_STAT(instruction_spx) },
95 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
96 { "instruction_stap", VCPU_STAT(instruction_stap) },
97 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010098 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010099 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
100 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200101 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100102 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
103 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200104 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200105 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200106 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100107 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100108 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200109 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100110 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200111 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
112 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100113 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200114 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
115 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500116 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100117 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
118 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
119 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200120 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
121 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
122 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100123 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100124 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200125 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200126 { "diagnose_258", VCPU_STAT(diagnose_258) },
127 { "diagnose_308", VCPU_STAT(diagnose_308) },
128 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100129 { NULL }
130};
131
David Hildenbranda411edf2016-02-02 15:41:22 +0100132/* allow nested virtualization in KVM (if enabled by user space) */
133static int nested;
134module_param(nested, int, S_IRUGO);
135MODULE_PARM_DESC(nested, "Nested virtualization support");
136
Michael Mueller9d8d5782015-02-02 15:42:51 +0100137/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200138unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100139
Michael Mueller9d8d5782015-02-02 15:42:51 +0100140unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200141{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100142 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
143 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200144}
145
David Hildenbrand15c97052015-03-19 17:36:43 +0100146/* available cpu features supported by kvm */
147static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200148/* available subfunctions indicated via query / "test bit" */
149static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100150
Michael Mueller9d8d5782015-02-02 15:42:51 +0100151static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200152static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200153debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100154
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100155/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200156int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100157{
158 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200159 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100160}
161
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100162static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
163 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200164
Fan Zhangfdf03652015-05-13 10:58:41 +0200165/*
166 * This callback is executed during stop_machine(). All CPUs are therefore
167 * temporarily stopped. In order not to change guest behavior, we have to
168 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
169 * so a CPU won't be stopped while calculating with the epoch.
170 */
171static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
172 void *v)
173{
174 struct kvm *kvm;
175 struct kvm_vcpu *vcpu;
176 int i;
177 unsigned long long *delta = v;
178
179 list_for_each_entry(kvm, &vm_list, vm_list) {
180 kvm->arch.epoch -= *delta;
181 kvm_for_each_vcpu(i, vcpu, kvm) {
182 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100183 if (vcpu->arch.cputm_enabled)
184 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100185 if (vcpu->arch.vsie_block)
186 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200187 }
188 }
189 return NOTIFY_OK;
190}
191
192static struct notifier_block kvm_clock_notifier = {
193 .notifier_call = kvm_clock_sync,
194};
195
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100196int kvm_arch_hardware_setup(void)
197{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200198 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100199 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200200 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
201 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200202 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
203 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100204 return 0;
205}
206
207void kvm_arch_hardware_unsetup(void)
208{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100209 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200210 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200211 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
212 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100213}
214
David Hildenbrand22be5a12016-01-21 13:22:54 +0100215static void allow_cpu_feat(unsigned long nr)
216{
217 set_bit_inv(nr, kvm_s390_available_cpu_feat);
218}
219
David Hildenbrand0a763c72016-05-18 16:03:47 +0200220static inline int plo_test_bit(unsigned char nr)
221{
222 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100223 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200224
225 asm volatile(
226 /* Parameter registers are ignored for "test bit" */
227 " plo 0,0,0,0(0)\n"
228 " ipm %0\n"
229 " srl %0,28\n"
230 : "=d" (cc)
231 : "d" (r0)
232 : "cc");
233 return cc == 0;
234}
235
David Hildenbrand22be5a12016-01-21 13:22:54 +0100236static void kvm_s390_cpu_feat_init(void)
237{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200238 int i;
239
240 for (i = 0; i < 256; ++i) {
241 if (plo_test_bit(i))
242 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
243 }
244
245 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400246 ptff(kvm_s390_available_subfunc.ptff,
247 sizeof(kvm_s390_available_subfunc.ptff),
248 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200249
250 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200251 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
252 kvm_s390_available_subfunc.kmac);
253 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
254 kvm_s390_available_subfunc.kmc);
255 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
256 kvm_s390_available_subfunc.km);
257 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
258 kvm_s390_available_subfunc.kimd);
259 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
260 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200261 }
262 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200263 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
264 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200265 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200266 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
267 kvm_s390_available_subfunc.kmctr);
268 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
269 kvm_s390_available_subfunc.kmf);
270 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
271 kvm_s390_available_subfunc.kmo);
272 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
273 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200274 }
275 if (test_facility(57)) /* MSA5 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200276 __cpacf_query(CPACF_PPNO, (cpacf_mask_t *)
277 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200278
David Hildenbrand22be5a12016-01-21 13:22:54 +0100279 if (MACHINE_HAS_ESOP)
280 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200281 /*
282 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
283 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
284 */
285 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100286 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200287 return;
288 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100289 if (sclp.has_64bscao)
290 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100291 if (sclp.has_siif)
292 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100293 if (sclp.has_gpere)
294 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100295 if (sclp.has_gsls)
296 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100297 if (sclp.has_ib)
298 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100299 if (sclp.has_cei)
300 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100301 if (sclp.has_ibs)
302 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500303 if (sclp.has_kss)
304 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200305 /*
306 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
307 * all skey handling functions read/set the skey from the PGSTE
308 * instead of the real storage key.
309 *
310 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
311 * pages being detected as preserved although they are resident.
312 *
313 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
314 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
315 *
316 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
317 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
318 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
319 *
320 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
321 * cannot easily shadow the SCA because of the ipte lock.
322 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100323}
324
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100325int kvm_arch_init(void *opaque)
326{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200327 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
328 if (!kvm_s390_dbf)
329 return -ENOMEM;
330
331 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
332 debug_unregister(kvm_s390_dbf);
333 return -ENOMEM;
334 }
335
David Hildenbrand22be5a12016-01-21 13:22:54 +0100336 kvm_s390_cpu_feat_init();
337
Cornelia Huck84877d92014-09-02 10:27:35 +0100338 /* Register floating interrupt controller interface. */
339 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100340}
341
Christian Borntraeger78f26132015-07-22 15:50:58 +0200342void kvm_arch_exit(void)
343{
344 debug_unregister(kvm_s390_dbf);
345}
346
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100347/* Section: device related */
348long kvm_arch_dev_ioctl(struct file *filp,
349 unsigned int ioctl, unsigned long arg)
350{
351 if (ioctl == KVM_S390_ENABLE_SIE)
352 return s390_enable_sie();
353 return -EINVAL;
354}
355
Alexander Graf784aa3d2014-07-14 18:27:35 +0200356int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100357{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100358 int r;
359
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200360 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100361 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200362 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100363 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100364#ifdef CONFIG_KVM_S390_UCONTROL
365 case KVM_CAP_S390_UCONTROL:
366#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200367 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100368 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200369 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100370 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100371 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100372 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200373 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200374 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200375 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200376 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200377 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100378 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100379 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200380 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100381 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400382 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100383 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200384 case KVM_CAP_S390_USER_INSTR0:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100385 case KVM_CAP_S390_AIS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100386 r = 1;
387 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100388 case KVM_CAP_S390_MEM_OP:
389 r = MEM_OP_MAX_SIZE;
390 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200391 case KVM_CAP_NR_VCPUS:
392 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100393 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200394 if (!kvm_s390_use_sca_entries())
395 r = KVM_MAX_VCPUS;
396 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100397 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200398 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100399 case KVM_CAP_NR_MEMSLOTS:
400 r = KVM_USER_MEM_SLOTS;
401 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200402 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100403 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200404 break;
Eric Farman68c55752014-06-09 10:57:26 -0400405 case KVM_CAP_S390_VECTOR_REGISTERS:
406 r = MACHINE_HAS_VX;
407 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800408 case KVM_CAP_S390_RI:
409 r = test_facility(64);
410 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100411 case KVM_CAP_S390_GS:
412 r = test_facility(133);
413 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200414 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100415 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200416 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100417 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100418}
419
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400420static void kvm_s390_sync_dirty_log(struct kvm *kvm,
421 struct kvm_memory_slot *memslot)
422{
423 gfn_t cur_gfn, last_gfn;
424 unsigned long address;
425 struct gmap *gmap = kvm->arch.gmap;
426
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400427 /* Loop over all guest pages */
428 last_gfn = memslot->base_gfn + memslot->npages;
429 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
430 address = gfn_to_hva_memslot(memslot, cur_gfn);
431
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100432 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400433 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100434 if (fatal_signal_pending(current))
435 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100436 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400437 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400438}
439
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100440/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200441static void sca_del_vcpu(struct kvm_vcpu *vcpu);
442
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100443/*
444 * Get (and clear) the dirty memory log for a memory slot.
445 */
446int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
447 struct kvm_dirty_log *log)
448{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400449 int r;
450 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200451 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400452 struct kvm_memory_slot *memslot;
453 int is_dirty = 0;
454
Janosch Franke1e8a962017-02-02 16:39:31 +0100455 if (kvm_is_ucontrol(kvm))
456 return -EINVAL;
457
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400458 mutex_lock(&kvm->slots_lock);
459
460 r = -EINVAL;
461 if (log->slot >= KVM_USER_MEM_SLOTS)
462 goto out;
463
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200464 slots = kvm_memslots(kvm);
465 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400466 r = -ENOENT;
467 if (!memslot->dirty_bitmap)
468 goto out;
469
470 kvm_s390_sync_dirty_log(kvm, memslot);
471 r = kvm_get_dirty_log(kvm, log, &is_dirty);
472 if (r)
473 goto out;
474
475 /* Clear the dirty log */
476 if (is_dirty) {
477 n = kvm_dirty_bitmap_bytes(memslot);
478 memset(memslot->dirty_bitmap, 0, n);
479 }
480 r = 0;
481out:
482 mutex_unlock(&kvm->slots_lock);
483 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100484}
485
David Hildenbrand6502a342016-06-21 14:19:51 +0200486static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
487{
488 unsigned int i;
489 struct kvm_vcpu *vcpu;
490
491 kvm_for_each_vcpu(i, vcpu, kvm) {
492 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
493 }
494}
495
Cornelia Huckd938dc52013-10-23 18:26:34 +0200496static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
497{
498 int r;
499
500 if (cap->flags)
501 return -EINVAL;
502
503 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200504 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200505 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200506 kvm->arch.use_irqchip = 1;
507 r = 0;
508 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200509 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200510 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200511 kvm->arch.user_sigp = 1;
512 r = 0;
513 break;
Eric Farman68c55752014-06-09 10:57:26 -0400514 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100515 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200516 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100517 r = -EBUSY;
518 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100519 set_kvm_facility(kvm->arch.model.fac_mask, 129);
520 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200521 if (test_facility(134)) {
522 set_kvm_facility(kvm->arch.model.fac_mask, 134);
523 set_kvm_facility(kvm->arch.model.fac_list, 134);
524 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100525 if (test_facility(135)) {
526 set_kvm_facility(kvm->arch.model.fac_mask, 135);
527 set_kvm_facility(kvm->arch.model.fac_list, 135);
528 }
Michael Mueller18280d82015-03-16 16:05:41 +0100529 r = 0;
530 } else
531 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100532 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200533 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
534 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400535 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800536 case KVM_CAP_S390_RI:
537 r = -EINVAL;
538 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200539 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800540 r = -EBUSY;
541 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100542 set_kvm_facility(kvm->arch.model.fac_mask, 64);
543 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800544 r = 0;
545 }
546 mutex_unlock(&kvm->lock);
547 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
548 r ? "(not available)" : "(success)");
549 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100550 case KVM_CAP_S390_AIS:
551 mutex_lock(&kvm->lock);
552 if (kvm->created_vcpus) {
553 r = -EBUSY;
554 } else {
555 set_kvm_facility(kvm->arch.model.fac_mask, 72);
556 set_kvm_facility(kvm->arch.model.fac_list, 72);
557 kvm->arch.float_int.ais_enabled = 1;
558 r = 0;
559 }
560 mutex_unlock(&kvm->lock);
561 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
562 r ? "(not available)" : "(success)");
563 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100564 case KVM_CAP_S390_GS:
565 r = -EINVAL;
566 mutex_lock(&kvm->lock);
567 if (atomic_read(&kvm->online_vcpus)) {
568 r = -EBUSY;
569 } else if (test_facility(133)) {
570 set_kvm_facility(kvm->arch.model.fac_mask, 133);
571 set_kvm_facility(kvm->arch.model.fac_list, 133);
572 r = 0;
573 }
574 mutex_unlock(&kvm->lock);
575 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
576 r ? "(not available)" : "(success)");
577 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100578 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200579 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100580 kvm->arch.user_stsi = 1;
581 r = 0;
582 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200583 case KVM_CAP_S390_USER_INSTR0:
584 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
585 kvm->arch.user_instr0 = 1;
586 icpt_operexc_on_all_vcpus(kvm);
587 r = 0;
588 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200589 default:
590 r = -EINVAL;
591 break;
592 }
593 return r;
594}
595
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100596static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
597{
598 int ret;
599
600 switch (attr->attr) {
601 case KVM_S390_VM_MEM_LIMIT_SIZE:
602 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200603 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100604 kvm->arch.mem_limit);
605 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100606 ret = -EFAULT;
607 break;
608 default:
609 ret = -ENXIO;
610 break;
611 }
612 return ret;
613}
614
615static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200616{
617 int ret;
618 unsigned int idx;
619 switch (attr->attr) {
620 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100621 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100622 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200623 break;
624
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200625 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200626 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200627 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200628 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200629 kvm->arch.use_cmma = 1;
630 ret = 0;
631 }
632 mutex_unlock(&kvm->lock);
633 break;
634 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100635 ret = -ENXIO;
636 if (!sclp.has_cmma)
637 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200638 ret = -EINVAL;
639 if (!kvm->arch.use_cmma)
640 break;
641
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200642 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200643 mutex_lock(&kvm->lock);
644 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200645 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200646 srcu_read_unlock(&kvm->srcu, idx);
647 mutex_unlock(&kvm->lock);
648 ret = 0;
649 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100650 case KVM_S390_VM_MEM_LIMIT_SIZE: {
651 unsigned long new_limit;
652
653 if (kvm_is_ucontrol(kvm))
654 return -EINVAL;
655
656 if (get_user(new_limit, (u64 __user *)attr->addr))
657 return -EFAULT;
658
Dominik Dingela3a92c32014-12-01 17:24:42 +0100659 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
660 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100661 return -E2BIG;
662
Dominik Dingela3a92c32014-12-01 17:24:42 +0100663 if (!new_limit)
664 return -EINVAL;
665
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100666 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100667 if (new_limit != KVM_S390_NO_MEM_LIMIT)
668 new_limit -= 1;
669
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100670 ret = -EBUSY;
671 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200672 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100673 /* gmap_create will round the limit up */
674 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100675
676 if (!new) {
677 ret = -ENOMEM;
678 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100679 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100680 new->private = kvm;
681 kvm->arch.gmap = new;
682 ret = 0;
683 }
684 }
685 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100686 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
687 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
688 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100689 break;
690 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200691 default:
692 ret = -ENXIO;
693 break;
694 }
695 return ret;
696}
697
Tony Krowiaka374e892014-09-03 10:13:53 +0200698static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
699
700static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
701{
702 struct kvm_vcpu *vcpu;
703 int i;
704
Michael Mueller9d8d5782015-02-02 15:42:51 +0100705 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200706 return -EINVAL;
707
708 mutex_lock(&kvm->lock);
709 switch (attr->attr) {
710 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
711 get_random_bytes(
712 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
713 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
714 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200715 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200716 break;
717 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
718 get_random_bytes(
719 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
720 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
721 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200722 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200723 break;
724 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
725 kvm->arch.crypto.aes_kw = 0;
726 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
727 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200728 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200729 break;
730 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
731 kvm->arch.crypto.dea_kw = 0;
732 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
733 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200734 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200735 break;
736 default:
737 mutex_unlock(&kvm->lock);
738 return -ENXIO;
739 }
740
741 kvm_for_each_vcpu(i, vcpu, kvm) {
742 kvm_s390_vcpu_crypto_setup(vcpu);
743 exit_sie(vcpu);
744 }
745 mutex_unlock(&kvm->lock);
746 return 0;
747}
748
Jason J. Herne72f25022014-11-25 09:46:02 -0500749static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
750{
751 u8 gtod_high;
752
753 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
754 sizeof(gtod_high)))
755 return -EFAULT;
756
757 if (gtod_high != 0)
758 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200759 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500760
761 return 0;
762}
763
764static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
765{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200766 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500767
768 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
769 return -EFAULT;
770
David Hildenbrand25ed1672015-05-12 09:49:14 +0200771 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200772 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500773 return 0;
774}
775
776static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
777{
778 int ret;
779
780 if (attr->flags)
781 return -EINVAL;
782
783 switch (attr->attr) {
784 case KVM_S390_VM_TOD_HIGH:
785 ret = kvm_s390_set_tod_high(kvm, attr);
786 break;
787 case KVM_S390_VM_TOD_LOW:
788 ret = kvm_s390_set_tod_low(kvm, attr);
789 break;
790 default:
791 ret = -ENXIO;
792 break;
793 }
794 return ret;
795}
796
797static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
798{
799 u8 gtod_high = 0;
800
801 if (copy_to_user((void __user *)attr->addr, &gtod_high,
802 sizeof(gtod_high)))
803 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200804 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500805
806 return 0;
807}
808
809static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
810{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200811 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500812
David Hildenbrand60417fc2015-09-29 16:20:36 +0200813 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500814 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
815 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200816 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500817
818 return 0;
819}
820
821static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
822{
823 int ret;
824
825 if (attr->flags)
826 return -EINVAL;
827
828 switch (attr->attr) {
829 case KVM_S390_VM_TOD_HIGH:
830 ret = kvm_s390_get_tod_high(kvm, attr);
831 break;
832 case KVM_S390_VM_TOD_LOW:
833 ret = kvm_s390_get_tod_low(kvm, attr);
834 break;
835 default:
836 ret = -ENXIO;
837 break;
838 }
839 return ret;
840}
841
Michael Mueller658b6ed2015-02-02 15:49:35 +0100842static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
843{
844 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200845 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100846 int ret = 0;
847
848 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200849 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +0100850 ret = -EBUSY;
851 goto out;
852 }
853 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
854 if (!proc) {
855 ret = -ENOMEM;
856 goto out;
857 }
858 if (!copy_from_user(proc, (void __user *)attr->addr,
859 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200860 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200861 lowest_ibc = sclp.ibc >> 16 & 0xfff;
862 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +0200863 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +0200864 if (proc->ibc > unblocked_ibc)
865 kvm->arch.model.ibc = unblocked_ibc;
866 else if (proc->ibc < lowest_ibc)
867 kvm->arch.model.ibc = lowest_ibc;
868 else
869 kvm->arch.model.ibc = proc->ibc;
870 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100871 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100872 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +0100873 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
874 kvm->arch.model.ibc,
875 kvm->arch.model.cpuid);
876 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
877 kvm->arch.model.fac_list[0],
878 kvm->arch.model.fac_list[1],
879 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100880 } else
881 ret = -EFAULT;
882 kfree(proc);
883out:
884 mutex_unlock(&kvm->lock);
885 return ret;
886}
887
David Hildenbrand15c97052015-03-19 17:36:43 +0100888static int kvm_s390_set_processor_feat(struct kvm *kvm,
889 struct kvm_device_attr *attr)
890{
891 struct kvm_s390_vm_cpu_feat data;
892 int ret = -EBUSY;
893
894 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
895 return -EFAULT;
896 if (!bitmap_subset((unsigned long *) data.feat,
897 kvm_s390_available_cpu_feat,
898 KVM_S390_VM_CPU_FEAT_NR_BITS))
899 return -EINVAL;
900
901 mutex_lock(&kvm->lock);
902 if (!atomic_read(&kvm->online_vcpus)) {
903 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
904 KVM_S390_VM_CPU_FEAT_NR_BITS);
905 ret = 0;
906 }
907 mutex_unlock(&kvm->lock);
908 return ret;
909}
910
David Hildenbrand0a763c72016-05-18 16:03:47 +0200911static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
912 struct kvm_device_attr *attr)
913{
914 /*
915 * Once supported by kernel + hw, we have to store the subfunctions
916 * in kvm->arch and remember that user space configured them.
917 */
918 return -ENXIO;
919}
920
Michael Mueller658b6ed2015-02-02 15:49:35 +0100921static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
922{
923 int ret = -ENXIO;
924
925 switch (attr->attr) {
926 case KVM_S390_VM_CPU_PROCESSOR:
927 ret = kvm_s390_set_processor(kvm, attr);
928 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100929 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
930 ret = kvm_s390_set_processor_feat(kvm, attr);
931 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200932 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
933 ret = kvm_s390_set_processor_subfunc(kvm, attr);
934 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100935 }
936 return ret;
937}
938
939static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
940{
941 struct kvm_s390_vm_cpu_processor *proc;
942 int ret = 0;
943
944 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
945 if (!proc) {
946 ret = -ENOMEM;
947 goto out;
948 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200949 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100950 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100951 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
952 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +0100953 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
954 kvm->arch.model.ibc,
955 kvm->arch.model.cpuid);
956 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
957 kvm->arch.model.fac_list[0],
958 kvm->arch.model.fac_list[1],
959 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100960 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
961 ret = -EFAULT;
962 kfree(proc);
963out:
964 return ret;
965}
966
967static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
968{
969 struct kvm_s390_vm_cpu_machine *mach;
970 int ret = 0;
971
972 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
973 if (!mach) {
974 ret = -ENOMEM;
975 goto out;
976 }
977 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200978 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100979 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +0100980 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100981 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +0100982 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +0100983 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
984 kvm->arch.model.ibc,
985 kvm->arch.model.cpuid);
986 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
987 mach->fac_mask[0],
988 mach->fac_mask[1],
989 mach->fac_mask[2]);
990 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
991 mach->fac_list[0],
992 mach->fac_list[1],
993 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100994 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
995 ret = -EFAULT;
996 kfree(mach);
997out:
998 return ret;
999}
1000
David Hildenbrand15c97052015-03-19 17:36:43 +01001001static int kvm_s390_get_processor_feat(struct kvm *kvm,
1002 struct kvm_device_attr *attr)
1003{
1004 struct kvm_s390_vm_cpu_feat data;
1005
1006 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1007 KVM_S390_VM_CPU_FEAT_NR_BITS);
1008 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1009 return -EFAULT;
1010 return 0;
1011}
1012
1013static int kvm_s390_get_machine_feat(struct kvm *kvm,
1014 struct kvm_device_attr *attr)
1015{
1016 struct kvm_s390_vm_cpu_feat data;
1017
1018 bitmap_copy((unsigned long *) data.feat,
1019 kvm_s390_available_cpu_feat,
1020 KVM_S390_VM_CPU_FEAT_NR_BITS);
1021 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1022 return -EFAULT;
1023 return 0;
1024}
1025
David Hildenbrand0a763c72016-05-18 16:03:47 +02001026static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1027 struct kvm_device_attr *attr)
1028{
1029 /*
1030 * Once we can actually configure subfunctions (kernel + hw support),
1031 * we have to check if they were already set by user space, if so copy
1032 * them from kvm->arch.
1033 */
1034 return -ENXIO;
1035}
1036
1037static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1038 struct kvm_device_attr *attr)
1039{
1040 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1041 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1042 return -EFAULT;
1043 return 0;
1044}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001045static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1046{
1047 int ret = -ENXIO;
1048
1049 switch (attr->attr) {
1050 case KVM_S390_VM_CPU_PROCESSOR:
1051 ret = kvm_s390_get_processor(kvm, attr);
1052 break;
1053 case KVM_S390_VM_CPU_MACHINE:
1054 ret = kvm_s390_get_machine(kvm, attr);
1055 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001056 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1057 ret = kvm_s390_get_processor_feat(kvm, attr);
1058 break;
1059 case KVM_S390_VM_CPU_MACHINE_FEAT:
1060 ret = kvm_s390_get_machine_feat(kvm, attr);
1061 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001062 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1063 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1064 break;
1065 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1066 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1067 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001068 }
1069 return ret;
1070}
1071
Dominik Dingelf2061652014-04-09 13:13:00 +02001072static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1073{
1074 int ret;
1075
1076 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001077 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001078 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001079 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001080 case KVM_S390_VM_TOD:
1081 ret = kvm_s390_set_tod(kvm, attr);
1082 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001083 case KVM_S390_VM_CPU_MODEL:
1084 ret = kvm_s390_set_cpu_model(kvm, attr);
1085 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001086 case KVM_S390_VM_CRYPTO:
1087 ret = kvm_s390_vm_set_crypto(kvm, attr);
1088 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001089 default:
1090 ret = -ENXIO;
1091 break;
1092 }
1093
1094 return ret;
1095}
1096
1097static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1098{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001099 int ret;
1100
1101 switch (attr->group) {
1102 case KVM_S390_VM_MEM_CTRL:
1103 ret = kvm_s390_get_mem_control(kvm, attr);
1104 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001105 case KVM_S390_VM_TOD:
1106 ret = kvm_s390_get_tod(kvm, attr);
1107 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001108 case KVM_S390_VM_CPU_MODEL:
1109 ret = kvm_s390_get_cpu_model(kvm, attr);
1110 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001111 default:
1112 ret = -ENXIO;
1113 break;
1114 }
1115
1116 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001117}
1118
1119static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1120{
1121 int ret;
1122
1123 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001124 case KVM_S390_VM_MEM_CTRL:
1125 switch (attr->attr) {
1126 case KVM_S390_VM_MEM_ENABLE_CMMA:
1127 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001128 ret = sclp.has_cmma ? 0 : -ENXIO;
1129 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001130 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001131 ret = 0;
1132 break;
1133 default:
1134 ret = -ENXIO;
1135 break;
1136 }
1137 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001138 case KVM_S390_VM_TOD:
1139 switch (attr->attr) {
1140 case KVM_S390_VM_TOD_LOW:
1141 case KVM_S390_VM_TOD_HIGH:
1142 ret = 0;
1143 break;
1144 default:
1145 ret = -ENXIO;
1146 break;
1147 }
1148 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001149 case KVM_S390_VM_CPU_MODEL:
1150 switch (attr->attr) {
1151 case KVM_S390_VM_CPU_PROCESSOR:
1152 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001153 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1154 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001155 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001156 ret = 0;
1157 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001158 /* configuring subfunctions is not supported yet */
1159 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001160 default:
1161 ret = -ENXIO;
1162 break;
1163 }
1164 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001165 case KVM_S390_VM_CRYPTO:
1166 switch (attr->attr) {
1167 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1168 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1169 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1170 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1171 ret = 0;
1172 break;
1173 default:
1174 ret = -ENXIO;
1175 break;
1176 }
1177 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001178 default:
1179 ret = -ENXIO;
1180 break;
1181 }
1182
1183 return ret;
1184}
1185
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001186static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1187{
1188 uint8_t *keys;
1189 uint64_t hva;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001190 int i, r = 0;
1191
1192 if (args->flags != 0)
1193 return -EINVAL;
1194
1195 /* Is this guest using storage keys? */
1196 if (!mm_use_skey(current->mm))
1197 return KVM_S390_GET_SKEYS_NONE;
1198
1199 /* Enforce sane limit on memory allocation */
1200 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1201 return -EINVAL;
1202
1203 keys = kmalloc_array(args->count, sizeof(uint8_t),
1204 GFP_KERNEL | __GFP_NOWARN);
1205 if (!keys)
1206 keys = vmalloc(sizeof(uint8_t) * args->count);
1207 if (!keys)
1208 return -ENOMEM;
1209
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001210 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001211 for (i = 0; i < args->count; i++) {
1212 hva = gfn_to_hva(kvm, args->start_gfn + i);
1213 if (kvm_is_error_hva(hva)) {
1214 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001215 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001216 }
1217
David Hildenbrand154c8c12016-05-09 11:22:34 +02001218 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1219 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001220 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001221 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001222 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001223
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001224 if (!r) {
1225 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1226 sizeof(uint8_t) * args->count);
1227 if (r)
1228 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001229 }
1230
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001231 kvfree(keys);
1232 return r;
1233}
1234
1235static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1236{
1237 uint8_t *keys;
1238 uint64_t hva;
1239 int i, r = 0;
1240
1241 if (args->flags != 0)
1242 return -EINVAL;
1243
1244 /* Enforce sane limit on memory allocation */
1245 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1246 return -EINVAL;
1247
1248 keys = kmalloc_array(args->count, sizeof(uint8_t),
1249 GFP_KERNEL | __GFP_NOWARN);
1250 if (!keys)
1251 keys = vmalloc(sizeof(uint8_t) * args->count);
1252 if (!keys)
1253 return -ENOMEM;
1254
1255 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1256 sizeof(uint8_t) * args->count);
1257 if (r) {
1258 r = -EFAULT;
1259 goto out;
1260 }
1261
1262 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001263 r = s390_enable_skey();
1264 if (r)
1265 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001266
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001267 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001268 for (i = 0; i < args->count; i++) {
1269 hva = gfn_to_hva(kvm, args->start_gfn + i);
1270 if (kvm_is_error_hva(hva)) {
1271 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001272 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001273 }
1274
1275 /* Lowest order bit is reserved */
1276 if (keys[i] & 0x01) {
1277 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001278 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001279 }
1280
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001281 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001282 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001283 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001284 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001285 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001286out:
1287 kvfree(keys);
1288 return r;
1289}
1290
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001291long kvm_arch_vm_ioctl(struct file *filp,
1292 unsigned int ioctl, unsigned long arg)
1293{
1294 struct kvm *kvm = filp->private_data;
1295 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001296 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001297 int r;
1298
1299 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001300 case KVM_S390_INTERRUPT: {
1301 struct kvm_s390_interrupt s390int;
1302
1303 r = -EFAULT;
1304 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1305 break;
1306 r = kvm_s390_inject_vm(kvm, &s390int);
1307 break;
1308 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001309 case KVM_ENABLE_CAP: {
1310 struct kvm_enable_cap cap;
1311 r = -EFAULT;
1312 if (copy_from_user(&cap, argp, sizeof(cap)))
1313 break;
1314 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1315 break;
1316 }
Cornelia Huck84223592013-07-15 13:36:01 +02001317 case KVM_CREATE_IRQCHIP: {
1318 struct kvm_irq_routing_entry routing;
1319
1320 r = -EINVAL;
1321 if (kvm->arch.use_irqchip) {
1322 /* Set up dummy routing. */
1323 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001324 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001325 }
1326 break;
1327 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001328 case KVM_SET_DEVICE_ATTR: {
1329 r = -EFAULT;
1330 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1331 break;
1332 r = kvm_s390_vm_set_attr(kvm, &attr);
1333 break;
1334 }
1335 case KVM_GET_DEVICE_ATTR: {
1336 r = -EFAULT;
1337 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1338 break;
1339 r = kvm_s390_vm_get_attr(kvm, &attr);
1340 break;
1341 }
1342 case KVM_HAS_DEVICE_ATTR: {
1343 r = -EFAULT;
1344 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1345 break;
1346 r = kvm_s390_vm_has_attr(kvm, &attr);
1347 break;
1348 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001349 case KVM_S390_GET_SKEYS: {
1350 struct kvm_s390_skeys args;
1351
1352 r = -EFAULT;
1353 if (copy_from_user(&args, argp,
1354 sizeof(struct kvm_s390_skeys)))
1355 break;
1356 r = kvm_s390_get_skeys(kvm, &args);
1357 break;
1358 }
1359 case KVM_S390_SET_SKEYS: {
1360 struct kvm_s390_skeys args;
1361
1362 r = -EFAULT;
1363 if (copy_from_user(&args, argp,
1364 sizeof(struct kvm_s390_skeys)))
1365 break;
1366 r = kvm_s390_set_skeys(kvm, &args);
1367 break;
1368 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001369 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001370 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001371 }
1372
1373 return r;
1374}
1375
Tony Krowiak45c9b472015-01-13 11:33:26 -05001376static int kvm_s390_query_ap_config(u8 *config)
1377{
1378 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001379 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001380
Christian Borntraeger86044c82015-02-26 13:53:47 +01001381 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001382 asm volatile(
1383 "lgr 0,%1\n"
1384 "lgr 2,%2\n"
1385 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001386 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001387 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001388 "1:\n"
1389 EX_TABLE(0b, 1b)
1390 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001391 : "r" (fcn_code), "r" (config)
1392 : "cc", "0", "2", "memory"
1393 );
1394
1395 return cc;
1396}
1397
1398static int kvm_s390_apxa_installed(void)
1399{
1400 u8 config[128];
1401 int cc;
1402
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001403 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001404 cc = kvm_s390_query_ap_config(config);
1405
1406 if (cc)
1407 pr_err("PQAP(QCI) failed with cc=%d", cc);
1408 else
1409 return config[0] & 0x40;
1410 }
1411
1412 return 0;
1413}
1414
1415static void kvm_s390_set_crycb_format(struct kvm *kvm)
1416{
1417 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1418
1419 if (kvm_s390_apxa_installed())
1420 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1421 else
1422 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1423}
1424
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001425static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001426{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001427 struct cpuid cpuid;
1428
1429 get_cpu_id(&cpuid);
1430 cpuid.version = 0xff;
1431 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001432}
1433
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001434static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001435{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001436 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001437 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001438
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001439 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001440 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001441
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001442 /* Enable AES/DEA protected key functions by default */
1443 kvm->arch.crypto.aes_kw = 1;
1444 kvm->arch.crypto.dea_kw = 1;
1445 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1446 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1447 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1448 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001449}
1450
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001451static void sca_dispose(struct kvm *kvm)
1452{
1453 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001454 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001455 else
1456 free_page((unsigned long)(kvm->arch.sca));
1457 kvm->arch.sca = NULL;
1458}
1459
Carsten Ottee08b9632012-01-04 10:25:20 +01001460int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001461{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001462 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001463 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001464 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001465 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001466
Carsten Ottee08b9632012-01-04 10:25:20 +01001467 rc = -EINVAL;
1468#ifdef CONFIG_KVM_S390_UCONTROL
1469 if (type & ~KVM_VM_S390_UCONTROL)
1470 goto out_err;
1471 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1472 goto out_err;
1473#else
1474 if (type)
1475 goto out_err;
1476#endif
1477
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001478 rc = s390_enable_sie();
1479 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001480 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001481
Carsten Otteb2904112011-10-18 12:27:13 +02001482 rc = -ENOMEM;
1483
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001484 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1485
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001486 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001487 if (!sclp.has_64bscao)
1488 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001489 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001490 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001491 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001492 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001493 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001494 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001495 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001496 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001497 kvm->arch.sca = (struct bsca_block *)
1498 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001499 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001500
1501 sprintf(debug_name, "kvm-%u", current->pid);
1502
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001503 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001504 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001505 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001506
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001507 kvm->arch.sie_page2 =
1508 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1509 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001510 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001511
Michael Muellerfb5bf932015-02-27 14:25:10 +01001512 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001513 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001514 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001515 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1516 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001517 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001518 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001519 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001520 }
1521
Michael Mueller981467c2015-02-24 13:51:04 +01001522 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001523 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1524 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001525 S390_ARCH_FAC_LIST_SIZE_BYTE);
1526
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001527 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1528 set_kvm_facility(kvm->arch.model.fac_list, 74);
1529
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001530 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001531 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001532
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001533 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001534
Fei Li51978392017-02-17 17:06:26 +08001535 mutex_init(&kvm->arch.float_int.ais_lock);
1536 kvm->arch.float_int.simm = 0;
1537 kvm->arch.float_int.nimm = 0;
1538 kvm->arch.float_int.ais_enabled = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001539 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001540 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1541 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001542 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001543 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001544
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001545 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001546 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001547
Carsten Ottee08b9632012-01-04 10:25:20 +01001548 if (type & KVM_VM_S390_UCONTROL) {
1549 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001550 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001551 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001552 if (sclp.hamax == U64_MAX)
1553 kvm->arch.mem_limit = TASK_MAX_SIZE;
1554 else
1555 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1556 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001557 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001558 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001559 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001560 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001561 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001562 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001563
1564 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001565 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001566 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001567
David Hildenbrand8ad35752014-03-14 11:00:21 +01001568 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001569 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001570 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001571
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001572 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001573out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001574 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001575 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001576 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001577 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001578 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001579}
1580
Luiz Capitulino235539b2016-09-07 14:47:23 -04001581bool kvm_arch_has_vcpu_debugfs(void)
1582{
1583 return false;
1584}
1585
1586int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1587{
1588 return 0;
1589}
1590
Christian Borntraegerd329c032008-11-26 14:50:27 +01001591void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1592{
1593 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001594 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001595 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001596 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001597 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001598 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001599
1600 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001601 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01001602
Dominik Dingele6db1d62015-05-07 15:41:57 +02001603 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001604 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001605 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001606
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001607 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001608 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001609}
1610
1611static void kvm_free_vcpus(struct kvm *kvm)
1612{
1613 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001614 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001615
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001616 kvm_for_each_vcpu(i, vcpu, kvm)
1617 kvm_arch_vcpu_destroy(vcpu);
1618
1619 mutex_lock(&kvm->lock);
1620 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1621 kvm->vcpus[i] = NULL;
1622
1623 atomic_set(&kvm->online_vcpus, 0);
1624 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001625}
1626
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001627void kvm_arch_destroy_vm(struct kvm *kvm)
1628{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001629 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001630 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001631 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001632 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001633 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001634 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001635 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001636 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001637 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001638 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001639}
1640
1641/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001642static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1643{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001644 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001645 if (!vcpu->arch.gmap)
1646 return -ENOMEM;
1647 vcpu->arch.gmap->private = vcpu->kvm;
1648
1649 return 0;
1650}
1651
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001652static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1653{
David Hildenbranda6940672016-08-08 22:39:32 +02001654 if (!kvm_s390_use_sca_entries())
1655 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001656 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001657 if (vcpu->kvm->arch.use_esca) {
1658 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001659
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001660 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001661 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001662 } else {
1663 struct bsca_block *sca = vcpu->kvm->arch.sca;
1664
1665 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001666 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001667 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001668 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001669}
1670
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001671static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001672{
David Hildenbranda6940672016-08-08 22:39:32 +02001673 if (!kvm_s390_use_sca_entries()) {
1674 struct bsca_block *sca = vcpu->kvm->arch.sca;
1675
1676 /* we still need the basic sca for the ipte control */
1677 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1678 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1679 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001680 read_lock(&vcpu->kvm->arch.sca_lock);
1681 if (vcpu->kvm->arch.use_esca) {
1682 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001683
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001684 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001685 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1686 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01001687 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001688 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001689 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001690 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001691
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001692 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001693 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1694 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001695 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001696 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001697 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001698}
1699
1700/* Basic SCA to Extended SCA data copy routines */
1701static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1702{
1703 d->sda = s->sda;
1704 d->sigp_ctrl.c = s->sigp_ctrl.c;
1705 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1706}
1707
1708static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1709{
1710 int i;
1711
1712 d->ipte_control = s->ipte_control;
1713 d->mcn[0] = s->mcn;
1714 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1715 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1716}
1717
1718static int sca_switch_to_extended(struct kvm *kvm)
1719{
1720 struct bsca_block *old_sca = kvm->arch.sca;
1721 struct esca_block *new_sca;
1722 struct kvm_vcpu *vcpu;
1723 unsigned int vcpu_idx;
1724 u32 scaol, scaoh;
1725
1726 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1727 if (!new_sca)
1728 return -ENOMEM;
1729
1730 scaoh = (u32)((u64)(new_sca) >> 32);
1731 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1732
1733 kvm_s390_vcpu_block_all(kvm);
1734 write_lock(&kvm->arch.sca_lock);
1735
1736 sca_copy_b_to_e(new_sca, old_sca);
1737
1738 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1739 vcpu->arch.sie_block->scaoh = scaoh;
1740 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01001741 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001742 }
1743 kvm->arch.sca = new_sca;
1744 kvm->arch.use_esca = 1;
1745
1746 write_unlock(&kvm->arch.sca_lock);
1747 kvm_s390_vcpu_unblock_all(kvm);
1748
1749 free_page((unsigned long)old_sca);
1750
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001751 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1752 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001753 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001754}
1755
1756static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1757{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001758 int rc;
1759
David Hildenbranda6940672016-08-08 22:39:32 +02001760 if (!kvm_s390_use_sca_entries()) {
1761 if (id < KVM_MAX_VCPUS)
1762 return true;
1763 return false;
1764 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001765 if (id < KVM_S390_BSCA_CPU_SLOTS)
1766 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001767 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001768 return false;
1769
1770 mutex_lock(&kvm->lock);
1771 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1772 mutex_unlock(&kvm->lock);
1773
1774 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001775}
1776
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001777int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1778{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001779 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1780 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001781 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1782 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001783 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001784 KVM_SYNC_CRS |
1785 KVM_SYNC_ARCH0 |
1786 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02001787 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08001788 if (test_kvm_facility(vcpu->kvm, 64))
1789 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01001790 if (test_kvm_facility(vcpu->kvm, 133))
1791 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01001792 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1793 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1794 */
1795 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04001796 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001797 else
1798 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001799
1800 if (kvm_is_ucontrol(vcpu->kvm))
1801 return __kvm_ucontrol_vcpu_init(vcpu);
1802
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001803 return 0;
1804}
1805
David Hildenbranddb0758b2016-02-15 09:42:25 +01001806/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1807static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1808{
1809 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001810 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001811 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01001812 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001813}
1814
1815/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1816static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1817{
1818 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001819 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001820 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1821 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001822 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001823}
1824
1825/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1826static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1827{
1828 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1829 vcpu->arch.cputm_enabled = true;
1830 __start_cpu_timer_accounting(vcpu);
1831}
1832
1833/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1834static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1835{
1836 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1837 __stop_cpu_timer_accounting(vcpu);
1838 vcpu->arch.cputm_enabled = false;
1839}
1840
1841static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1842{
1843 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1844 __enable_cpu_timer_accounting(vcpu);
1845 preempt_enable();
1846}
1847
1848static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1849{
1850 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1851 __disable_cpu_timer_accounting(vcpu);
1852 preempt_enable();
1853}
1854
David Hildenbrand4287f242016-02-15 09:40:12 +01001855/* set the cpu timer - may only be called from the VCPU thread itself */
1856void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1857{
David Hildenbranddb0758b2016-02-15 09:42:25 +01001858 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01001859 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001860 if (vcpu->arch.cputm_enabled)
1861 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01001862 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001863 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001864 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01001865}
1866
David Hildenbranddb0758b2016-02-15 09:42:25 +01001867/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01001868__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1869{
David Hildenbrand9c23a132016-02-17 21:53:33 +01001870 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001871 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001872
1873 if (unlikely(!vcpu->arch.cputm_enabled))
1874 return vcpu->arch.sie_block->cputm;
1875
David Hildenbrand9c23a132016-02-17 21:53:33 +01001876 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1877 do {
1878 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1879 /*
1880 * If the writer would ever execute a read in the critical
1881 * section, e.g. in irq context, we have a deadlock.
1882 */
1883 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1884 value = vcpu->arch.sie_block->cputm;
1885 /* if cputm_start is 0, accounting is being started/stopped */
1886 if (likely(vcpu->arch.cputm_start))
1887 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1888 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1889 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01001890 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01001891}
1892
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001893void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1894{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001895
David Hildenbrand37d9df92015-03-11 16:47:33 +01001896 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001897 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01001898 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001899 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01001900 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001901}
1902
1903void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1904{
David Hildenbrand01a745a2016-02-12 20:41:56 +01001905 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01001906 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001907 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001908 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01001909 vcpu->arch.enabled_gmap = gmap_get_enabled();
1910 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001911
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001912}
1913
1914static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1915{
1916 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1917 vcpu->arch.sie_block->gpsw.mask = 0UL;
1918 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001919 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01001920 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001921 vcpu->arch.sie_block->ckc = 0UL;
1922 vcpu->arch.sie_block->todpr = 0;
1923 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1924 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1925 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001926 /* make sure the new fpc will be lazily loaded */
1927 save_fpu_regs();
1928 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001929 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001930 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001931 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1932 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001933 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1934 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001935 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001936}
1937
Dominik Dingel31928aa2014-12-04 15:47:07 +01001938void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001939{
Jason J. Herne72f25022014-11-25 09:46:02 -05001940 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001941 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001942 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001943 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001944 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02001945 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01001946 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001947 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02001948 }
David Hildenbrand6502a342016-06-21 14:19:51 +02001949 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
1950 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01001951 /* make vcpu_load load the right gmap on the first trigger */
1952 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001953}
1954
Tony Krowiak5102ee82014-06-27 14:46:01 -04001955static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1956{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001957 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001958 return;
1959
Tony Krowiaka374e892014-09-03 10:13:53 +02001960 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1961
1962 if (vcpu->kvm->arch.crypto.aes_kw)
1963 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1964 if (vcpu->kvm->arch.crypto.dea_kw)
1965 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1966
Tony Krowiak5102ee82014-06-27 14:46:01 -04001967 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1968}
1969
Dominik Dingelb31605c2014-03-25 13:47:11 +01001970void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1971{
1972 free_page(vcpu->arch.sie_block->cbrlo);
1973 vcpu->arch.sie_block->cbrlo = 0;
1974}
1975
1976int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1977{
1978 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1979 if (!vcpu->arch.sie_block->cbrlo)
1980 return -ENOMEM;
1981
David Hildenbrand0c9d8682017-03-13 11:48:28 +01001982 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1983 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
Dominik Dingelb31605c2014-03-25 13:47:11 +01001984 return 0;
1985}
1986
Michael Mueller91520f12015-02-27 14:32:11 +01001987static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1988{
1989 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1990
Michael Mueller91520f12015-02-27 14:32:11 +01001991 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01001992 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001993 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01001994}
1995
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001996int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1997{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001998 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001999
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002000 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2001 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002002 CPUSTAT_STOPPED);
2003
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002004 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002005 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002006 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002007 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002008
Michael Mueller91520f12015-02-27 14:32:11 +01002009 kvm_s390_vcpu_setup_model(vcpu);
2010
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002011 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2012 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002013 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002014 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002015 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002016 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002017 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002018
David Hildenbrand873b4252016-04-04 15:53:47 +02002019 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002020 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002021 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002022 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2023 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002024 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002025 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002026 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002027 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002028 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002029 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002030 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002031 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002032 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002033 vcpu->arch.sie_block->eca |= ECA_VX;
2034 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002035 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002036 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2037 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002038 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002039
2040 if (sclp.has_kss)
2041 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2042 else
2043 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002044
Dominik Dingele6db1d62015-05-07 15:41:57 +02002045 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002046 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2047 if (rc)
2048 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002049 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01002050 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002051 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002052
Tony Krowiak5102ee82014-06-27 14:46:01 -04002053 kvm_s390_vcpu_crypto_setup(vcpu);
2054
Dominik Dingelb31605c2014-03-25 13:47:11 +01002055 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002056}
2057
2058struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2059 unsigned int id)
2060{
Carsten Otte4d475552011-10-18 12:27:12 +02002061 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002062 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002063 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002064
David Hildenbrand42158252015-10-12 12:57:22 +02002065 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002066 goto out;
2067
2068 rc = -ENOMEM;
2069
Michael Muellerb110fea2013-06-12 13:54:54 +02002070 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002071 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002072 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002073
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002074 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2075 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002076 goto out_free_cpu;
2077
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002078 vcpu->arch.sie_block = &sie_page->sie_block;
2079 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2080
David Hildenbrandefed1102015-04-16 12:32:41 +02002081 /* the real guest size will always be smaller than msl */
2082 vcpu->arch.sie_block->mso = 0;
2083 vcpu->arch.sie_block->msl = sclp.hamax;
2084
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002085 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002086 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002087 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002088 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002089 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002090 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002091
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002092 rc = kvm_vcpu_init(vcpu, kvm, id);
2093 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002094 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002095 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002096 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002097 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002098
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002099 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002100out_free_sie_block:
2101 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002102out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002103 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002104out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002105 return ERR_PTR(rc);
2106}
2107
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002108int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2109{
David Hildenbrand9a022062014-08-05 17:40:47 +02002110 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002111}
2112
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002113void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002114{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002115 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002116 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002117}
2118
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002119void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002120{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002121 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002122}
2123
Christian Borntraeger8e236542015-04-09 13:49:04 +02002124static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2125{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002126 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002127 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002128}
2129
2130static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2131{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002132 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002133}
2134
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002135/*
2136 * Kick a guest cpu out of SIE and wait until SIE is not running.
2137 * If the CPU is not running (e.g. waiting as idle) the function will
2138 * return immediately. */
2139void exit_sie(struct kvm_vcpu *vcpu)
2140{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002141 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002142 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2143 cpu_relax();
2144}
2145
Christian Borntraeger8e236542015-04-09 13:49:04 +02002146/* Kick a guest cpu out of SIE to process a request synchronously */
2147void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002148{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002149 kvm_make_request(req, vcpu);
2150 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002151}
2152
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002153static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2154 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002155{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002156 struct kvm *kvm = gmap->private;
2157 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002158 unsigned long prefix;
2159 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002160
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002161 if (gmap_is_shadow(gmap))
2162 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002163 if (start >= 1UL << 31)
2164 /* We are only interested in prefix pages */
2165 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002166 kvm_for_each_vcpu(i, vcpu, kvm) {
2167 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002168 prefix = kvm_s390_get_prefix(vcpu);
2169 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2170 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2171 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002172 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002173 }
2174 }
2175}
2176
Christoffer Dallb6d33832012-03-08 16:44:24 -05002177int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2178{
2179 /* kvm common code refers to this, but never calls it */
2180 BUG();
2181 return 0;
2182}
2183
Carsten Otte14eebd92012-05-15 14:15:26 +02002184static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2185 struct kvm_one_reg *reg)
2186{
2187 int r = -EINVAL;
2188
2189 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002190 case KVM_REG_S390_TODPR:
2191 r = put_user(vcpu->arch.sie_block->todpr,
2192 (u32 __user *)reg->addr);
2193 break;
2194 case KVM_REG_S390_EPOCHDIFF:
2195 r = put_user(vcpu->arch.sie_block->epoch,
2196 (u64 __user *)reg->addr);
2197 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002198 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002199 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002200 (u64 __user *)reg->addr);
2201 break;
2202 case KVM_REG_S390_CLOCK_COMP:
2203 r = put_user(vcpu->arch.sie_block->ckc,
2204 (u64 __user *)reg->addr);
2205 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002206 case KVM_REG_S390_PFTOKEN:
2207 r = put_user(vcpu->arch.pfault_token,
2208 (u64 __user *)reg->addr);
2209 break;
2210 case KVM_REG_S390_PFCOMPARE:
2211 r = put_user(vcpu->arch.pfault_compare,
2212 (u64 __user *)reg->addr);
2213 break;
2214 case KVM_REG_S390_PFSELECT:
2215 r = put_user(vcpu->arch.pfault_select,
2216 (u64 __user *)reg->addr);
2217 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002218 case KVM_REG_S390_PP:
2219 r = put_user(vcpu->arch.sie_block->pp,
2220 (u64 __user *)reg->addr);
2221 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002222 case KVM_REG_S390_GBEA:
2223 r = put_user(vcpu->arch.sie_block->gbea,
2224 (u64 __user *)reg->addr);
2225 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002226 default:
2227 break;
2228 }
2229
2230 return r;
2231}
2232
2233static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2234 struct kvm_one_reg *reg)
2235{
2236 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002237 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002238
2239 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002240 case KVM_REG_S390_TODPR:
2241 r = get_user(vcpu->arch.sie_block->todpr,
2242 (u32 __user *)reg->addr);
2243 break;
2244 case KVM_REG_S390_EPOCHDIFF:
2245 r = get_user(vcpu->arch.sie_block->epoch,
2246 (u64 __user *)reg->addr);
2247 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002248 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002249 r = get_user(val, (u64 __user *)reg->addr);
2250 if (!r)
2251 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002252 break;
2253 case KVM_REG_S390_CLOCK_COMP:
2254 r = get_user(vcpu->arch.sie_block->ckc,
2255 (u64 __user *)reg->addr);
2256 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002257 case KVM_REG_S390_PFTOKEN:
2258 r = get_user(vcpu->arch.pfault_token,
2259 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002260 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2261 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002262 break;
2263 case KVM_REG_S390_PFCOMPARE:
2264 r = get_user(vcpu->arch.pfault_compare,
2265 (u64 __user *)reg->addr);
2266 break;
2267 case KVM_REG_S390_PFSELECT:
2268 r = get_user(vcpu->arch.pfault_select,
2269 (u64 __user *)reg->addr);
2270 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002271 case KVM_REG_S390_PP:
2272 r = get_user(vcpu->arch.sie_block->pp,
2273 (u64 __user *)reg->addr);
2274 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002275 case KVM_REG_S390_GBEA:
2276 r = get_user(vcpu->arch.sie_block->gbea,
2277 (u64 __user *)reg->addr);
2278 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002279 default:
2280 break;
2281 }
2282
2283 return r;
2284}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002285
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002286static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2287{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002288 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002289 return 0;
2290}
2291
2292int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2293{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002294 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002295 return 0;
2296}
2297
2298int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2299{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002300 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002301 return 0;
2302}
2303
2304int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2305 struct kvm_sregs *sregs)
2306{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002307 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002308 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002309 return 0;
2310}
2311
2312int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2313 struct kvm_sregs *sregs)
2314{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002315 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002316 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002317 return 0;
2318}
2319
2320int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2321{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002322 if (test_fp_ctl(fpu->fpc))
2323 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002324 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002325 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002326 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2327 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002328 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002329 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002330 return 0;
2331}
2332
2333int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2334{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002335 /* make sure we have the latest values */
2336 save_fpu_regs();
2337 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002338 convert_vx_to_fp((freg_t *) fpu->fprs,
2339 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002340 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002341 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002342 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002343 return 0;
2344}
2345
2346static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2347{
2348 int rc = 0;
2349
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002350 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002351 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002352 else {
2353 vcpu->run->psw_mask = psw.mask;
2354 vcpu->run->psw_addr = psw.addr;
2355 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002356 return rc;
2357}
2358
2359int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2360 struct kvm_translation *tr)
2361{
2362 return -EINVAL; /* not implemented yet */
2363}
2364
David Hildenbrand27291e22014-01-23 12:26:52 +01002365#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2366 KVM_GUESTDBG_USE_HW_BP | \
2367 KVM_GUESTDBG_ENABLE)
2368
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002369int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2370 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002371{
David Hildenbrand27291e22014-01-23 12:26:52 +01002372 int rc = 0;
2373
2374 vcpu->guest_debug = 0;
2375 kvm_s390_clear_bp_data(vcpu);
2376
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002377 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002378 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002379 if (!sclp.has_gpere)
2380 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002381
2382 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2383 vcpu->guest_debug = dbg->control;
2384 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002385 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002386
2387 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2388 rc = kvm_s390_import_bp_data(vcpu, dbg);
2389 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002390 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002391 vcpu->arch.guestdbg.last_bp = 0;
2392 }
2393
2394 if (rc) {
2395 vcpu->guest_debug = 0;
2396 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002397 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002398 }
2399
2400 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002401}
2402
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002403int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2404 struct kvm_mp_state *mp_state)
2405{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002406 /* CHECK_STOP and LOAD are not supported yet */
2407 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2408 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002409}
2410
2411int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2412 struct kvm_mp_state *mp_state)
2413{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002414 int rc = 0;
2415
2416 /* user space knows about this interface - let it control the state */
2417 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2418
2419 switch (mp_state->mp_state) {
2420 case KVM_MP_STATE_STOPPED:
2421 kvm_s390_vcpu_stop(vcpu);
2422 break;
2423 case KVM_MP_STATE_OPERATING:
2424 kvm_s390_vcpu_start(vcpu);
2425 break;
2426 case KVM_MP_STATE_LOAD:
2427 case KVM_MP_STATE_CHECK_STOP:
2428 /* fall through - CHECK_STOP and LOAD are not supported yet */
2429 default:
2430 rc = -ENXIO;
2431 }
2432
2433 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002434}
2435
David Hildenbrand8ad35752014-03-14 11:00:21 +01002436static bool ibs_enabled(struct kvm_vcpu *vcpu)
2437{
2438 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2439}
2440
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002441static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2442{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002443retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002444 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002445 if (!vcpu->requests)
2446 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002447 /*
2448 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002449 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002450 * This ensures that the ipte instruction for this request has
2451 * already finished. We might race against a second unmapper that
2452 * wants to set the blocking bit. Lets just retry the request loop.
2453 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002454 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002455 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002456 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2457 kvm_s390_get_prefix(vcpu),
2458 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002459 if (rc) {
2460 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002461 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002462 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002463 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002464 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002465
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002466 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2467 vcpu->arch.sie_block->ihcpu = 0xffff;
2468 goto retry;
2469 }
2470
David Hildenbrand8ad35752014-03-14 11:00:21 +01002471 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2472 if (!ibs_enabled(vcpu)) {
2473 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002474 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002475 &vcpu->arch.sie_block->cpuflags);
2476 }
2477 goto retry;
2478 }
2479
2480 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2481 if (ibs_enabled(vcpu)) {
2482 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002483 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002484 &vcpu->arch.sie_block->cpuflags);
2485 }
2486 goto retry;
2487 }
2488
David Hildenbrand6502a342016-06-21 14:19:51 +02002489 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2490 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2491 goto retry;
2492 }
2493
David Hildenbrand0759d062014-05-13 16:54:32 +02002494 /* nothing to do, just clear the request */
2495 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2496
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002497 return 0;
2498}
2499
David Hildenbrand25ed1672015-05-12 09:49:14 +02002500void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2501{
2502 struct kvm_vcpu *vcpu;
2503 int i;
2504
2505 mutex_lock(&kvm->lock);
2506 preempt_disable();
2507 kvm->arch.epoch = tod - get_tod_clock();
2508 kvm_s390_vcpu_block_all(kvm);
2509 kvm_for_each_vcpu(i, vcpu, kvm)
2510 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2511 kvm_s390_vcpu_unblock_all(kvm);
2512 preempt_enable();
2513 mutex_unlock(&kvm->lock);
2514}
2515
Thomas Huthfa576c52014-05-06 17:20:16 +02002516/**
2517 * kvm_arch_fault_in_page - fault-in guest page if necessary
2518 * @vcpu: The corresponding virtual cpu
2519 * @gpa: Guest physical address
2520 * @writable: Whether the page should be writable or not
2521 *
2522 * Make sure that a guest page has been faulted-in on the host.
2523 *
2524 * Return: Zero on success, negative error code otherwise.
2525 */
2526long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002527{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002528 return gmap_fault(vcpu->arch.gmap, gpa,
2529 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002530}
2531
Dominik Dingel3c038e62013-10-07 17:11:48 +02002532static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2533 unsigned long token)
2534{
2535 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002536 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002537
2538 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002539 irq.u.ext.ext_params2 = token;
2540 irq.type = KVM_S390_INT_PFAULT_INIT;
2541 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002542 } else {
2543 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002544 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002545 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2546 }
2547}
2548
2549void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2550 struct kvm_async_pf *work)
2551{
2552 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2553 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2554}
2555
2556void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2557 struct kvm_async_pf *work)
2558{
2559 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2560 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2561}
2562
2563void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2564 struct kvm_async_pf *work)
2565{
2566 /* s390 will always inject the page directly */
2567}
2568
2569bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2570{
2571 /*
2572 * s390 will always inject the page directly,
2573 * but we still want check_async_completion to cleanup
2574 */
2575 return true;
2576}
2577
2578static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2579{
2580 hva_t hva;
2581 struct kvm_arch_async_pf arch;
2582 int rc;
2583
2584 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2585 return 0;
2586 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2587 vcpu->arch.pfault_compare)
2588 return 0;
2589 if (psw_extint_disabled(vcpu))
2590 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002591 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002592 return 0;
2593 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2594 return 0;
2595 if (!vcpu->arch.gmap->pfault_enabled)
2596 return 0;
2597
Heiko Carstens81480cc2014-01-01 16:36:07 +01002598 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2599 hva += current->thread.gmap_addr & ~PAGE_MASK;
2600 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002601 return 0;
2602
2603 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2604 return rc;
2605}
2606
Thomas Huth3fb4c402013-09-12 10:33:43 +02002607static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002608{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002609 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002610
Dominik Dingel3c038e62013-10-07 17:11:48 +02002611 /*
2612 * On s390 notifications for arriving pages will be delivered directly
2613 * to the guest but the house keeping for completed pfaults is
2614 * handled outside the worker.
2615 */
2616 kvm_check_async_pf_completion(vcpu);
2617
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002618 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2619 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002620
2621 if (need_resched())
2622 schedule();
2623
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002624 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002625 s390_handle_mcck();
2626
Jens Freimann79395032014-04-17 10:10:30 +02002627 if (!kvm_is_ucontrol(vcpu->kvm)) {
2628 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2629 if (rc)
2630 return rc;
2631 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002632
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002633 rc = kvm_s390_handle_requests(vcpu);
2634 if (rc)
2635 return rc;
2636
David Hildenbrand27291e22014-01-23 12:26:52 +01002637 if (guestdbg_enabled(vcpu)) {
2638 kvm_s390_backup_guest_per_regs(vcpu);
2639 kvm_s390_patch_guest_per_regs(vcpu);
2640 }
2641
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002642 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002643 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2644 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2645 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002646
Thomas Huth3fb4c402013-09-12 10:33:43 +02002647 return 0;
2648}
2649
Thomas Huth492d8642015-02-10 16:11:01 +01002650static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2651{
David Hildenbrand56317922016-01-12 17:37:58 +01002652 struct kvm_s390_pgm_info pgm_info = {
2653 .code = PGM_ADDRESSING,
2654 };
2655 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01002656 int rc;
2657
2658 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2659 trace_kvm_s390_sie_fault(vcpu);
2660
2661 /*
2662 * We want to inject an addressing exception, which is defined as a
2663 * suppressing or terminating exception. However, since we came here
2664 * by a DAT access exception, the PSW still points to the faulting
2665 * instruction since DAT exceptions are nullifying. So we've got
2666 * to look up the current opcode to get the length of the instruction
2667 * to be able to forward the PSW.
2668 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02002669 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01002670 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01002671 if (rc < 0) {
2672 return rc;
2673 } else if (rc) {
2674 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2675 * Forward by arbitrary ilc, injection will take care of
2676 * nullification if necessary.
2677 */
2678 pgm_info = vcpu->arch.pgm;
2679 ilen = 4;
2680 }
David Hildenbrand56317922016-01-12 17:37:58 +01002681 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2682 kvm_s390_forward_psw(vcpu, ilen);
2683 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01002684}
2685
Thomas Huth3fb4c402013-09-12 10:33:43 +02002686static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2687{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002688 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2689 vcpu->arch.sie_block->icptcode);
2690 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2691
David Hildenbrand27291e22014-01-23 12:26:52 +01002692 if (guestdbg_enabled(vcpu))
2693 kvm_s390_restore_guest_per_regs(vcpu);
2694
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002695 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2696 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002697
2698 if (vcpu->arch.sie_block->icptcode > 0) {
2699 int rc = kvm_handle_sie_intercept(vcpu);
2700
2701 if (rc != -EOPNOTSUPP)
2702 return rc;
2703 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2704 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2705 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2706 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2707 return -EREMOTE;
2708 } else if (exit_reason != -EFAULT) {
2709 vcpu->stat.exit_null++;
2710 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002711 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2712 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2713 vcpu->run->s390_ucontrol.trans_exc_code =
2714 current->thread.gmap_addr;
2715 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002716 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002717 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002718 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002719 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002720 if (kvm_arch_setup_async_pf(vcpu))
2721 return 0;
2722 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002723 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02002724 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002725}
2726
2727static int __vcpu_run(struct kvm_vcpu *vcpu)
2728{
2729 int rc, exit_reason;
2730
Thomas Huth800c1062013-09-12 10:33:45 +02002731 /*
2732 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2733 * ning the guest), so that memslots (and other stuff) are protected
2734 */
2735 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2736
Thomas Hutha76ccff2013-09-12 10:33:44 +02002737 do {
2738 rc = vcpu_pre_run(vcpu);
2739 if (rc)
2740 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002741
Thomas Huth800c1062013-09-12 10:33:45 +02002742 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002743 /*
2744 * As PF_VCPU will be used in fault handler, between
2745 * guest_enter and guest_exit should be no uaccess.
2746 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002747 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02002748 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002749 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002750 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002751 exit_reason = sie64a(vcpu->arch.sie_block,
2752 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002753 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002754 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02002755 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02002756 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002757 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002758
Thomas Hutha76ccff2013-09-12 10:33:44 +02002759 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002760 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002761
Thomas Huth800c1062013-09-12 10:33:45 +02002762 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002763 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002764}
2765
David Hildenbrandb028ee32014-07-17 10:47:43 +02002766static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2767{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01002768 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002769 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01002770
2771 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002772 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02002773 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2774 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2775 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2776 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2777 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2778 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002779 /* some control register changes require a tlb flush */
2780 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002781 }
2782 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01002783 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002784 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2785 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2786 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2787 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2788 }
2789 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2790 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2791 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2792 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002793 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2794 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002795 }
Fan Zhang80cd8762016-08-15 04:53:22 +02002796 /*
2797 * If userspace sets the riccb (e.g. after migration) to a valid state,
2798 * we should enable RI here instead of doing the lazy enablement.
2799 */
2800 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01002801 test_kvm_facility(vcpu->kvm, 64) &&
2802 riccb->valid &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002803 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01002804 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002805 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02002806 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002807 /*
2808 * If userspace sets the gscb (e.g. after migration) to non-zero,
2809 * we should enable GS here instead of doing the lazy enablement.
2810 */
2811 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
2812 test_kvm_facility(vcpu->kvm, 133) &&
2813 gscb->gssm &&
2814 !vcpu->arch.gs_enabled) {
2815 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
2816 vcpu->arch.sie_block->ecb |= ECB_GS;
2817 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
2818 vcpu->arch.gs_enabled = 1;
2819 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01002820 save_access_regs(vcpu->arch.host_acrs);
2821 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002822 /* save host (userspace) fprs/vrs */
2823 save_fpu_regs();
2824 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
2825 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
2826 if (MACHINE_HAS_VX)
2827 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
2828 else
2829 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
2830 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
2831 if (test_fp_ctl(current->thread.fpu.fpc))
2832 /* User space provided an invalid FPC, let's clear it */
2833 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002834 if (MACHINE_HAS_GS) {
2835 preempt_disable();
2836 __ctl_set_bit(2, 4);
2837 if (current->thread.gs_cb) {
2838 vcpu->arch.host_gscb = current->thread.gs_cb;
2839 save_gs_cb(vcpu->arch.host_gscb);
2840 }
2841 if (vcpu->arch.gs_enabled) {
2842 current->thread.gs_cb = (struct gs_cb *)
2843 &vcpu->run->s.regs.gscb;
2844 restore_gs_cb(current->thread.gs_cb);
2845 }
2846 preempt_enable();
2847 }
Fan Zhang80cd8762016-08-15 04:53:22 +02002848
David Hildenbrandb028ee32014-07-17 10:47:43 +02002849 kvm_run->kvm_dirty_regs = 0;
2850}
2851
2852static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2853{
2854 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2855 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2856 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2857 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01002858 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002859 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2860 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2861 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2862 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2863 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2864 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2865 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01002866 save_access_regs(vcpu->run->s.regs.acrs);
2867 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002868 /* Save guest register state */
2869 save_fpu_regs();
2870 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2871 /* Restore will be done lazily at return */
2872 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
2873 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002874 if (MACHINE_HAS_GS) {
2875 __ctl_set_bit(2, 4);
2876 if (vcpu->arch.gs_enabled)
2877 save_gs_cb(current->thread.gs_cb);
2878 preempt_disable();
2879 current->thread.gs_cb = vcpu->arch.host_gscb;
2880 restore_gs_cb(vcpu->arch.host_gscb);
2881 preempt_enable();
2882 if (!vcpu->arch.host_gscb)
2883 __ctl_clear_bit(2, 4);
2884 vcpu->arch.host_gscb = NULL;
2885 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002886
David Hildenbrandb028ee32014-07-17 10:47:43 +02002887}
2888
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002889int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2890{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002891 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002892 sigset_t sigsaved;
2893
Paolo Bonzini460df4c2017-02-08 11:50:15 +01002894 if (kvm_run->immediate_exit)
2895 return -EINTR;
2896
David Hildenbrand27291e22014-01-23 12:26:52 +01002897 if (guestdbg_exit_pending(vcpu)) {
2898 kvm_s390_prepare_debug_exit(vcpu);
2899 return 0;
2900 }
2901
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002902 if (vcpu->sigset_active)
2903 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2904
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002905 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2906 kvm_s390_vcpu_start(vcpu);
2907 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002908 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002909 vcpu->vcpu_id);
2910 return -EINVAL;
2911 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002912
David Hildenbrandb028ee32014-07-17 10:47:43 +02002913 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002914 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002915
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002916 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002917 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002918
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002919 if (signal_pending(current) && !rc) {
2920 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002921 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002922 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002923
David Hildenbrand27291e22014-01-23 12:26:52 +01002924 if (guestdbg_exit_pending(vcpu) && !rc) {
2925 kvm_s390_prepare_debug_exit(vcpu);
2926 rc = 0;
2927 }
2928
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002929 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02002930 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002931 rc = 0;
2932 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002933
David Hildenbranddb0758b2016-02-15 09:42:25 +01002934 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002935 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002936
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002937 if (vcpu->sigset_active)
2938 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2939
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002940 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002941 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002942}
2943
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002944/*
2945 * store status at address
2946 * we use have two special cases:
2947 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2948 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2949 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002950int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002951{
Carsten Otte092670c2011-07-24 10:48:22 +02002952 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002953 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02002954 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01002955 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002956 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002957
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002958 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002959 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2960 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002961 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002962 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002963 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2964 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002965 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002966 gpa = px;
2967 } else
2968 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002969
2970 /* manually convert vector registers if necessary */
2971 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01002972 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002973 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2974 fprs, 128);
2975 } else {
2976 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002977 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002978 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002979 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002980 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002981 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002982 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002983 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02002984 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002985 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002986 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002987 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002988 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01002989 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002990 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01002991 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002992 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002993 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002994 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002995 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002996 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002997 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002998 &vcpu->arch.sie_block->gcr, 128);
2999 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003000}
3001
Thomas Huthe8798922013-11-06 15:46:33 +01003002int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3003{
3004 /*
3005 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003006 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003007 * it into the save area
3008 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003009 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003010 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003011 save_access_regs(vcpu->run->s.regs.acrs);
3012
3013 return kvm_s390_store_status_unloaded(vcpu, addr);
3014}
3015
David Hildenbrand8ad35752014-03-14 11:00:21 +01003016static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3017{
3018 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003019 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003020}
3021
3022static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3023{
3024 unsigned int i;
3025 struct kvm_vcpu *vcpu;
3026
3027 kvm_for_each_vcpu(i, vcpu, kvm) {
3028 __disable_ibs_on_vcpu(vcpu);
3029 }
3030}
3031
3032static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3033{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003034 if (!sclp.has_ibs)
3035 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003036 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003037 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003038}
3039
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003040void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3041{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003042 int i, online_vcpus, started_vcpus = 0;
3043
3044 if (!is_vcpu_stopped(vcpu))
3045 return;
3046
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003047 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003048 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003049 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003050 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3051
3052 for (i = 0; i < online_vcpus; i++) {
3053 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3054 started_vcpus++;
3055 }
3056
3057 if (started_vcpus == 0) {
3058 /* we're the only active VCPU -> speed it up */
3059 __enable_ibs_on_vcpu(vcpu);
3060 } else if (started_vcpus == 1) {
3061 /*
3062 * As we are starting a second VCPU, we have to disable
3063 * the IBS facility on all VCPUs to remove potentially
3064 * oustanding ENABLE requests.
3065 */
3066 __disable_ibs_on_all_vcpus(vcpu->kvm);
3067 }
3068
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003069 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003070 /*
3071 * Another VCPU might have used IBS while we were offline.
3072 * Let's play safe and flush the VCPU at startup.
3073 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003074 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003075 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003076 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003077}
3078
3079void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3080{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003081 int i, online_vcpus, started_vcpus = 0;
3082 struct kvm_vcpu *started_vcpu = NULL;
3083
3084 if (is_vcpu_stopped(vcpu))
3085 return;
3086
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003087 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003088 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003089 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003090 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3091
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003092 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003093 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003094
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003095 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003096 __disable_ibs_on_vcpu(vcpu);
3097
3098 for (i = 0; i < online_vcpus; i++) {
3099 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3100 started_vcpus++;
3101 started_vcpu = vcpu->kvm->vcpus[i];
3102 }
3103 }
3104
3105 if (started_vcpus == 1) {
3106 /*
3107 * As we only have one VCPU left, we want to enable the
3108 * IBS facility for that VCPU to speed it up.
3109 */
3110 __enable_ibs_on_vcpu(started_vcpu);
3111 }
3112
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003113 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003114 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003115}
3116
Cornelia Huckd6712df2012-12-20 15:32:11 +01003117static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3118 struct kvm_enable_cap *cap)
3119{
3120 int r;
3121
3122 if (cap->flags)
3123 return -EINVAL;
3124
3125 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003126 case KVM_CAP_S390_CSS_SUPPORT:
3127 if (!vcpu->kvm->arch.css_support) {
3128 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003129 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003130 trace_kvm_s390_enable_css(vcpu->kvm);
3131 }
3132 r = 0;
3133 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003134 default:
3135 r = -EINVAL;
3136 break;
3137 }
3138 return r;
3139}
3140
Thomas Huth41408c282015-02-06 15:01:21 +01003141static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3142 struct kvm_s390_mem_op *mop)
3143{
3144 void __user *uaddr = (void __user *)mop->buf;
3145 void *tmpbuf = NULL;
3146 int r, srcu_idx;
3147 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3148 | KVM_S390_MEMOP_F_CHECK_ONLY;
3149
3150 if (mop->flags & ~supported_flags)
3151 return -EINVAL;
3152
3153 if (mop->size > MEM_OP_MAX_SIZE)
3154 return -E2BIG;
3155
3156 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3157 tmpbuf = vmalloc(mop->size);
3158 if (!tmpbuf)
3159 return -ENOMEM;
3160 }
3161
3162 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3163
3164 switch (mop->op) {
3165 case KVM_S390_MEMOP_LOGICAL_READ:
3166 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003167 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3168 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003169 break;
3170 }
3171 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3172 if (r == 0) {
3173 if (copy_to_user(uaddr, tmpbuf, mop->size))
3174 r = -EFAULT;
3175 }
3176 break;
3177 case KVM_S390_MEMOP_LOGICAL_WRITE:
3178 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003179 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3180 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003181 break;
3182 }
3183 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3184 r = -EFAULT;
3185 break;
3186 }
3187 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3188 break;
3189 default:
3190 r = -EINVAL;
3191 }
3192
3193 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3194
3195 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3196 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3197
3198 vfree(tmpbuf);
3199 return r;
3200}
3201
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003202long kvm_arch_vcpu_ioctl(struct file *filp,
3203 unsigned int ioctl, unsigned long arg)
3204{
3205 struct kvm_vcpu *vcpu = filp->private_data;
3206 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003207 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003208 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003209
Avi Kivity937366242010-05-13 12:35:17 +03003210 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003211 case KVM_S390_IRQ: {
3212 struct kvm_s390_irq s390irq;
3213
3214 r = -EFAULT;
3215 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3216 break;
3217 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3218 break;
3219 }
Avi Kivity937366242010-05-13 12:35:17 +03003220 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003221 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003222 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003223
Avi Kivity937366242010-05-13 12:35:17 +03003224 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003225 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03003226 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003227 if (s390int_to_s390irq(&s390int, &s390irq))
3228 return -EINVAL;
3229 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03003230 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003231 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003232 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003233 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003234 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003235 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003236 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003237 case KVM_S390_SET_INITIAL_PSW: {
3238 psw_t psw;
3239
Avi Kivitybc923cc2010-05-13 12:21:46 +03003240 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003241 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003242 break;
3243 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3244 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003245 }
3246 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003247 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3248 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003249 case KVM_SET_ONE_REG:
3250 case KVM_GET_ONE_REG: {
3251 struct kvm_one_reg reg;
3252 r = -EFAULT;
3253 if (copy_from_user(&reg, argp, sizeof(reg)))
3254 break;
3255 if (ioctl == KVM_SET_ONE_REG)
3256 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3257 else
3258 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3259 break;
3260 }
Carsten Otte27e03932012-01-04 10:25:21 +01003261#ifdef CONFIG_KVM_S390_UCONTROL
3262 case KVM_S390_UCAS_MAP: {
3263 struct kvm_s390_ucas_mapping ucasmap;
3264
3265 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3266 r = -EFAULT;
3267 break;
3268 }
3269
3270 if (!kvm_is_ucontrol(vcpu->kvm)) {
3271 r = -EINVAL;
3272 break;
3273 }
3274
3275 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3276 ucasmap.vcpu_addr, ucasmap.length);
3277 break;
3278 }
3279 case KVM_S390_UCAS_UNMAP: {
3280 struct kvm_s390_ucas_mapping ucasmap;
3281
3282 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3283 r = -EFAULT;
3284 break;
3285 }
3286
3287 if (!kvm_is_ucontrol(vcpu->kvm)) {
3288 r = -EINVAL;
3289 break;
3290 }
3291
3292 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3293 ucasmap.length);
3294 break;
3295 }
3296#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003297 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003298 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003299 break;
3300 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003301 case KVM_ENABLE_CAP:
3302 {
3303 struct kvm_enable_cap cap;
3304 r = -EFAULT;
3305 if (copy_from_user(&cap, argp, sizeof(cap)))
3306 break;
3307 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3308 break;
3309 }
Thomas Huth41408c282015-02-06 15:01:21 +01003310 case KVM_S390_MEM_OP: {
3311 struct kvm_s390_mem_op mem_op;
3312
3313 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3314 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3315 else
3316 r = -EFAULT;
3317 break;
3318 }
Jens Freimann816c7662014-11-24 17:13:46 +01003319 case KVM_S390_SET_IRQ_STATE: {
3320 struct kvm_s390_irq_state irq_state;
3321
3322 r = -EFAULT;
3323 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3324 break;
3325 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3326 irq_state.len == 0 ||
3327 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3328 r = -EINVAL;
3329 break;
3330 }
3331 r = kvm_s390_set_irq_state(vcpu,
3332 (void __user *) irq_state.buf,
3333 irq_state.len);
3334 break;
3335 }
3336 case KVM_S390_GET_IRQ_STATE: {
3337 struct kvm_s390_irq_state irq_state;
3338
3339 r = -EFAULT;
3340 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3341 break;
3342 if (irq_state.len == 0) {
3343 r = -EINVAL;
3344 break;
3345 }
3346 r = kvm_s390_get_irq_state(vcpu,
3347 (__u8 __user *) irq_state.buf,
3348 irq_state.len);
3349 break;
3350 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003351 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003352 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003353 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003354 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003355}
3356
Carsten Otte5b1c1492012-01-04 10:25:23 +01003357int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3358{
3359#ifdef CONFIG_KVM_S390_UCONTROL
3360 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3361 && (kvm_is_ucontrol(vcpu->kvm))) {
3362 vmf->page = virt_to_page(vcpu->arch.sie_block);
3363 get_page(vmf->page);
3364 return 0;
3365 }
3366#endif
3367 return VM_FAULT_SIGBUS;
3368}
3369
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303370int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3371 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003372{
3373 return 0;
3374}
3375
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003376/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003377int kvm_arch_prepare_memory_region(struct kvm *kvm,
3378 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003379 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003380 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003381{
Nick Wangdd2887e2013-03-25 17:22:57 +01003382 /* A few sanity checks. We can have memory slots which have to be
3383 located/ended at a segment boundary (1MB). The memory in userland is
3384 ok to be fragmented into various different vmas. It is okay to mmap()
3385 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003386
Carsten Otte598841c2011-07-24 10:48:21 +02003387 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003388 return -EINVAL;
3389
Carsten Otte598841c2011-07-24 10:48:21 +02003390 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003391 return -EINVAL;
3392
Dominik Dingela3a92c32014-12-01 17:24:42 +01003393 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3394 return -EINVAL;
3395
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003396 return 0;
3397}
3398
3399void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003400 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003401 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003402 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003403 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003404{
Carsten Ottef7850c92011-07-24 10:48:23 +02003405 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003406
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003407 /* If the basics of the memslot do not change, we do not want
3408 * to update the gmap. Every update causes several unnecessary
3409 * segment translation exceptions. This is usually handled just
3410 * fine by the normal fault handler + gmap, but it will also
3411 * cause faults on the prefix page of running guest CPUs.
3412 */
3413 if (old->userspace_addr == mem->userspace_addr &&
3414 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3415 old->npages * PAGE_SIZE == mem->memory_size)
3416 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003417
3418 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3419 mem->guest_phys_addr, mem->memory_size);
3420 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003421 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003422 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003423}
3424
Alexander Yarygin60a37702016-04-01 15:38:57 +03003425static inline unsigned long nonhyp_mask(int i)
3426{
3427 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3428
3429 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3430}
3431
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003432void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3433{
3434 vcpu->valid_wakeup = false;
3435}
3436
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003437static int __init kvm_s390_init(void)
3438{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003439 int i;
3440
David Hildenbrand07197fd2015-01-30 16:01:38 +01003441 if (!sclp.has_sief2) {
3442 pr_info("SIE not available\n");
3443 return -ENODEV;
3444 }
3445
Alexander Yarygin60a37702016-04-01 15:38:57 +03003446 for (i = 0; i < 16; i++)
3447 kvm_s390_fac_list_mask[i] |=
3448 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3449
Michael Mueller9d8d5782015-02-02 15:42:51 +01003450 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003451}
3452
3453static void __exit kvm_s390_exit(void)
3454{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003455 kvm_exit();
3456}
3457
3458module_init(kvm_s390_init);
3459module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003460
3461/*
3462 * Enable autoloading of the kvm module.
3463 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3464 * since x86 takes a different approach.
3465 */
3466#include <linux/miscdevice.h>
3467MODULE_ALIAS_MISCDEV(KVM_MINOR);
3468MODULE_ALIAS("devname:kvm");