blob: 323297e55e80d31224c9374edbf4211b6c87a1b6 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c22015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
33
Heiko Carstenscbb870c2010-02-26 22:37:43 +010034#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020036#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010038#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010039#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010040#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020041#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020042#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020043#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040044#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010045#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010046#include "gaccess.h"
47
David Hildenbrandea2cdd22015-05-20 13:24:02 +020048#define KMSG_COMPONENT "kvm-s390"
49#undef pr_fmt
50#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
51
Cornelia Huck5786fff2012-07-23 17:20:29 +020052#define CREATE_TRACE_POINTS
53#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020054#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020055
Thomas Huth41408c22015-02-06 15:01:21 +010056#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010057#define LOCAL_IRQS 32
58#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c22015-02-06 15:01:21 +010060
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
62
63struct kvm_stats_debugfs_item debugfs_entries[] = {
64 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020065 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010066 { "exit_validity", VCPU_STAT(exit_validity) },
67 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
68 { "exit_external_request", VCPU_STAT(exit_external_request) },
69 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020078 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020079 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010081 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
82 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010083 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020084 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010085 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
86 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
87 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
88 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
89 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
90 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
91 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020092 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010093 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
94 { "instruction_spx", VCPU_STAT(instruction_spx) },
95 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
96 { "instruction_stap", VCPU_STAT(instruction_stap) },
97 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010098 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010099 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
100 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200101 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100102 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
103 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200104 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200105 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200106 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100107 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100108 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200109 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100110 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200111 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
112 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100113 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200114 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
115 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500116 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100117 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
118 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
119 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200120 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
121 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
122 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100123 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100124 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200125 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200126 { "diagnose_258", VCPU_STAT(diagnose_258) },
127 { "diagnose_308", VCPU_STAT(diagnose_308) },
128 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100129 { NULL }
130};
131
David Hildenbranda411edf2016-02-02 15:41:22 +0100132/* allow nested virtualization in KVM (if enabled by user space) */
133static int nested;
134module_param(nested, int, S_IRUGO);
135MODULE_PARM_DESC(nested, "Nested virtualization support");
136
Michael Mueller9d8d5782015-02-02 15:42:51 +0100137/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200138unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100139
Michael Mueller9d8d5782015-02-02 15:42:51 +0100140unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200141{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100142 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
143 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200144}
145
David Hildenbrand15c97052015-03-19 17:36:43 +0100146/* available cpu features supported by kvm */
147static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200148/* available subfunctions indicated via query / "test bit" */
149static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100150
Michael Mueller9d8d5782015-02-02 15:42:51 +0100151static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200152static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200153debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100154
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100155/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200156int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100157{
158 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200159 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100160}
161
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100162static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
163 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200164
Fan Zhangfdf03652015-05-13 10:58:41 +0200165/*
166 * This callback is executed during stop_machine(). All CPUs are therefore
167 * temporarily stopped. In order not to change guest behavior, we have to
168 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
169 * so a CPU won't be stopped while calculating with the epoch.
170 */
171static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
172 void *v)
173{
174 struct kvm *kvm;
175 struct kvm_vcpu *vcpu;
176 int i;
177 unsigned long long *delta = v;
178
179 list_for_each_entry(kvm, &vm_list, vm_list) {
180 kvm->arch.epoch -= *delta;
181 kvm_for_each_vcpu(i, vcpu, kvm) {
182 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100183 if (vcpu->arch.cputm_enabled)
184 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100185 if (vcpu->arch.vsie_block)
186 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200187 }
188 }
189 return NOTIFY_OK;
190}
191
192static struct notifier_block kvm_clock_notifier = {
193 .notifier_call = kvm_clock_sync,
194};
195
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100196int kvm_arch_hardware_setup(void)
197{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200198 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100199 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200200 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
201 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200202 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
203 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100204 return 0;
205}
206
207void kvm_arch_hardware_unsetup(void)
208{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100209 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200210 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200211 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
212 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100213}
214
David Hildenbrand22be5a12016-01-21 13:22:54 +0100215static void allow_cpu_feat(unsigned long nr)
216{
217 set_bit_inv(nr, kvm_s390_available_cpu_feat);
218}
219
David Hildenbrand0a763c72016-05-18 16:03:47 +0200220static inline int plo_test_bit(unsigned char nr)
221{
222 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100223 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200224
225 asm volatile(
226 /* Parameter registers are ignored for "test bit" */
227 " plo 0,0,0,0(0)\n"
228 " ipm %0\n"
229 " srl %0,28\n"
230 : "=d" (cc)
231 : "d" (r0)
232 : "cc");
233 return cc == 0;
234}
235
David Hildenbrand22be5a12016-01-21 13:22:54 +0100236static void kvm_s390_cpu_feat_init(void)
237{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200238 int i;
239
240 for (i = 0; i < 256; ++i) {
241 if (plo_test_bit(i))
242 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
243 }
244
245 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400246 ptff(kvm_s390_available_subfunc.ptff,
247 sizeof(kvm_s390_available_subfunc.ptff),
248 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200249
250 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200251 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
252 kvm_s390_available_subfunc.kmac);
253 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
254 kvm_s390_available_subfunc.kmc);
255 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
256 kvm_s390_available_subfunc.km);
257 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
258 kvm_s390_available_subfunc.kimd);
259 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
260 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200261 }
262 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200263 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
264 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200265 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200266 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
267 kvm_s390_available_subfunc.kmctr);
268 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
269 kvm_s390_available_subfunc.kmf);
270 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
271 kvm_s390_available_subfunc.kmo);
272 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
273 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200274 }
275 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100276 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200277 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200278
David Hildenbrand22be5a12016-01-21 13:22:54 +0100279 if (MACHINE_HAS_ESOP)
280 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200281 /*
282 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
283 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
284 */
285 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100286 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200287 return;
288 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100289 if (sclp.has_64bscao)
290 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100291 if (sclp.has_siif)
292 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100293 if (sclp.has_gpere)
294 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100295 if (sclp.has_gsls)
296 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100297 if (sclp.has_ib)
298 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100299 if (sclp.has_cei)
300 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100301 if (sclp.has_ibs)
302 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200303 /*
304 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
305 * all skey handling functions read/set the skey from the PGSTE
306 * instead of the real storage key.
307 *
308 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
309 * pages being detected as preserved although they are resident.
310 *
311 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
312 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
313 *
314 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
315 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
316 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
317 *
318 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
319 * cannot easily shadow the SCA because of the ipte lock.
320 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100321}
322
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100323int kvm_arch_init(void *opaque)
324{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200325 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
326 if (!kvm_s390_dbf)
327 return -ENOMEM;
328
329 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
330 debug_unregister(kvm_s390_dbf);
331 return -ENOMEM;
332 }
333
David Hildenbrand22be5a12016-01-21 13:22:54 +0100334 kvm_s390_cpu_feat_init();
335
Cornelia Huck84877d92014-09-02 10:27:35 +0100336 /* Register floating interrupt controller interface. */
337 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100338}
339
Christian Borntraeger78f26132015-07-22 15:50:58 +0200340void kvm_arch_exit(void)
341{
342 debug_unregister(kvm_s390_dbf);
343}
344
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100345/* Section: device related */
346long kvm_arch_dev_ioctl(struct file *filp,
347 unsigned int ioctl, unsigned long arg)
348{
349 if (ioctl == KVM_S390_ENABLE_SIE)
350 return s390_enable_sie();
351 return -EINVAL;
352}
353
Alexander Graf784aa3d2014-07-14 18:27:35 +0200354int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100355{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100356 int r;
357
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200358 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100359 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200360 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100361 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100362#ifdef CONFIG_KVM_S390_UCONTROL
363 case KVM_CAP_S390_UCONTROL:
364#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200365 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100366 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200367 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100368 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100369 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100370 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200371 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200372 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200373 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200374 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200375 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100376 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100377 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200378 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100379 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400380 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100381 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200382 case KVM_CAP_S390_USER_INSTR0:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100383 r = 1;
384 break;
Thomas Huth41408c22015-02-06 15:01:21 +0100385 case KVM_CAP_S390_MEM_OP:
386 r = MEM_OP_MAX_SIZE;
387 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200388 case KVM_CAP_NR_VCPUS:
389 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100390 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200391 if (!kvm_s390_use_sca_entries())
392 r = KVM_MAX_VCPUS;
393 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100394 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200395 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100396 case KVM_CAP_NR_MEMSLOTS:
397 r = KVM_USER_MEM_SLOTS;
398 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200399 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100400 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200401 break;
Eric Farman68c55752014-06-09 10:57:26 -0400402 case KVM_CAP_S390_VECTOR_REGISTERS:
403 r = MACHINE_HAS_VX;
404 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800405 case KVM_CAP_S390_RI:
406 r = test_facility(64);
407 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200408 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100409 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200410 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100411 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100412}
413
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400414static void kvm_s390_sync_dirty_log(struct kvm *kvm,
415 struct kvm_memory_slot *memslot)
416{
417 gfn_t cur_gfn, last_gfn;
418 unsigned long address;
419 struct gmap *gmap = kvm->arch.gmap;
420
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400421 /* Loop over all guest pages */
422 last_gfn = memslot->base_gfn + memslot->npages;
423 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
424 address = gfn_to_hva_memslot(memslot, cur_gfn);
425
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100426 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400427 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100428 if (fatal_signal_pending(current))
429 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100430 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400431 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400432}
433
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100434/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200435static void sca_del_vcpu(struct kvm_vcpu *vcpu);
436
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100437/*
438 * Get (and clear) the dirty memory log for a memory slot.
439 */
440int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
441 struct kvm_dirty_log *log)
442{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400443 int r;
444 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200445 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400446 struct kvm_memory_slot *memslot;
447 int is_dirty = 0;
448
Janosch Franke1e8a962017-02-02 16:39:31 +0100449 if (kvm_is_ucontrol(kvm))
450 return -EINVAL;
451
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400452 mutex_lock(&kvm->slots_lock);
453
454 r = -EINVAL;
455 if (log->slot >= KVM_USER_MEM_SLOTS)
456 goto out;
457
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200458 slots = kvm_memslots(kvm);
459 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400460 r = -ENOENT;
461 if (!memslot->dirty_bitmap)
462 goto out;
463
464 kvm_s390_sync_dirty_log(kvm, memslot);
465 r = kvm_get_dirty_log(kvm, log, &is_dirty);
466 if (r)
467 goto out;
468
469 /* Clear the dirty log */
470 if (is_dirty) {
471 n = kvm_dirty_bitmap_bytes(memslot);
472 memset(memslot->dirty_bitmap, 0, n);
473 }
474 r = 0;
475out:
476 mutex_unlock(&kvm->slots_lock);
477 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100478}
479
David Hildenbrand6502a342016-06-21 14:19:51 +0200480static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
481{
482 unsigned int i;
483 struct kvm_vcpu *vcpu;
484
485 kvm_for_each_vcpu(i, vcpu, kvm) {
486 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
487 }
488}
489
Cornelia Huckd938dc52013-10-23 18:26:34 +0200490static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
491{
492 int r;
493
494 if (cap->flags)
495 return -EINVAL;
496
497 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200498 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200499 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200500 kvm->arch.use_irqchip = 1;
501 r = 0;
502 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200503 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200504 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200505 kvm->arch.user_sigp = 1;
506 r = 0;
507 break;
Eric Farman68c55752014-06-09 10:57:26 -0400508 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100509 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200510 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100511 r = -EBUSY;
512 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100513 set_kvm_facility(kvm->arch.model.fac_mask, 129);
514 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200515 if (test_facility(134)) {
516 set_kvm_facility(kvm->arch.model.fac_mask, 134);
517 set_kvm_facility(kvm->arch.model.fac_list, 134);
518 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100519 if (test_facility(135)) {
520 set_kvm_facility(kvm->arch.model.fac_mask, 135);
521 set_kvm_facility(kvm->arch.model.fac_list, 135);
522 }
Michael Mueller18280d82015-03-16 16:05:41 +0100523 r = 0;
524 } else
525 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100526 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200527 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
528 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400529 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800530 case KVM_CAP_S390_RI:
531 r = -EINVAL;
532 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200533 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800534 r = -EBUSY;
535 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100536 set_kvm_facility(kvm->arch.model.fac_mask, 64);
537 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800538 r = 0;
539 }
540 mutex_unlock(&kvm->lock);
541 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
542 r ? "(not available)" : "(success)");
543 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100544 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200545 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100546 kvm->arch.user_stsi = 1;
547 r = 0;
548 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200549 case KVM_CAP_S390_USER_INSTR0:
550 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
551 kvm->arch.user_instr0 = 1;
552 icpt_operexc_on_all_vcpus(kvm);
553 r = 0;
554 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200555 default:
556 r = -EINVAL;
557 break;
558 }
559 return r;
560}
561
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100562static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
563{
564 int ret;
565
566 switch (attr->attr) {
567 case KVM_S390_VM_MEM_LIMIT_SIZE:
568 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200569 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100570 kvm->arch.mem_limit);
571 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100572 ret = -EFAULT;
573 break;
574 default:
575 ret = -ENXIO;
576 break;
577 }
578 return ret;
579}
580
581static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200582{
583 int ret;
584 unsigned int idx;
585 switch (attr->attr) {
586 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100587 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100588 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200589 break;
590
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200591 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200592 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200593 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200594 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200595 kvm->arch.use_cmma = 1;
596 ret = 0;
597 }
598 mutex_unlock(&kvm->lock);
599 break;
600 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100601 ret = -ENXIO;
602 if (!sclp.has_cmma)
603 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200604 ret = -EINVAL;
605 if (!kvm->arch.use_cmma)
606 break;
607
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200608 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200609 mutex_lock(&kvm->lock);
610 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200611 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200612 srcu_read_unlock(&kvm->srcu, idx);
613 mutex_unlock(&kvm->lock);
614 ret = 0;
615 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100616 case KVM_S390_VM_MEM_LIMIT_SIZE: {
617 unsigned long new_limit;
618
619 if (kvm_is_ucontrol(kvm))
620 return -EINVAL;
621
622 if (get_user(new_limit, (u64 __user *)attr->addr))
623 return -EFAULT;
624
Dominik Dingela3a92c32014-12-01 17:24:42 +0100625 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
626 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100627 return -E2BIG;
628
Dominik Dingela3a92c32014-12-01 17:24:42 +0100629 if (!new_limit)
630 return -EINVAL;
631
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100632 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100633 if (new_limit != KVM_S390_NO_MEM_LIMIT)
634 new_limit -= 1;
635
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100636 ret = -EBUSY;
637 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200638 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100639 /* gmap_create will round the limit up */
640 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100641
642 if (!new) {
643 ret = -ENOMEM;
644 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100645 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100646 new->private = kvm;
647 kvm->arch.gmap = new;
648 ret = 0;
649 }
650 }
651 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100652 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
653 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
654 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100655 break;
656 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200657 default:
658 ret = -ENXIO;
659 break;
660 }
661 return ret;
662}
663
Tony Krowiaka374e892014-09-03 10:13:53 +0200664static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
665
666static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
667{
668 struct kvm_vcpu *vcpu;
669 int i;
670
Michael Mueller9d8d5782015-02-02 15:42:51 +0100671 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200672 return -EINVAL;
673
674 mutex_lock(&kvm->lock);
675 switch (attr->attr) {
676 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
677 get_random_bytes(
678 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
679 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
680 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200681 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200682 break;
683 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
684 get_random_bytes(
685 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
686 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
687 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200688 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200689 break;
690 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
691 kvm->arch.crypto.aes_kw = 0;
692 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
693 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200694 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200695 break;
696 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
697 kvm->arch.crypto.dea_kw = 0;
698 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
699 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200700 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200701 break;
702 default:
703 mutex_unlock(&kvm->lock);
704 return -ENXIO;
705 }
706
707 kvm_for_each_vcpu(i, vcpu, kvm) {
708 kvm_s390_vcpu_crypto_setup(vcpu);
709 exit_sie(vcpu);
710 }
711 mutex_unlock(&kvm->lock);
712 return 0;
713}
714
Jason J. Herne72f25022014-11-25 09:46:02 -0500715static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
716{
717 u8 gtod_high;
718
719 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
720 sizeof(gtod_high)))
721 return -EFAULT;
722
723 if (gtod_high != 0)
724 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200725 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500726
727 return 0;
728}
729
730static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
731{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200732 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500733
734 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
735 return -EFAULT;
736
David Hildenbrand25ed1672015-05-12 09:49:14 +0200737 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200738 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500739 return 0;
740}
741
742static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
743{
744 int ret;
745
746 if (attr->flags)
747 return -EINVAL;
748
749 switch (attr->attr) {
750 case KVM_S390_VM_TOD_HIGH:
751 ret = kvm_s390_set_tod_high(kvm, attr);
752 break;
753 case KVM_S390_VM_TOD_LOW:
754 ret = kvm_s390_set_tod_low(kvm, attr);
755 break;
756 default:
757 ret = -ENXIO;
758 break;
759 }
760 return ret;
761}
762
763static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
764{
765 u8 gtod_high = 0;
766
767 if (copy_to_user((void __user *)attr->addr, &gtod_high,
768 sizeof(gtod_high)))
769 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200770 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500771
772 return 0;
773}
774
775static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
776{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200777 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500778
David Hildenbrand60417fc2015-09-29 16:20:36 +0200779 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500780 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
781 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200782 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500783
784 return 0;
785}
786
787static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
788{
789 int ret;
790
791 if (attr->flags)
792 return -EINVAL;
793
794 switch (attr->attr) {
795 case KVM_S390_VM_TOD_HIGH:
796 ret = kvm_s390_get_tod_high(kvm, attr);
797 break;
798 case KVM_S390_VM_TOD_LOW:
799 ret = kvm_s390_get_tod_low(kvm, attr);
800 break;
801 default:
802 ret = -ENXIO;
803 break;
804 }
805 return ret;
806}
807
Michael Mueller658b6ed2015-02-02 15:49:35 +0100808static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
809{
810 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200811 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100812 int ret = 0;
813
814 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200815 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +0100816 ret = -EBUSY;
817 goto out;
818 }
819 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
820 if (!proc) {
821 ret = -ENOMEM;
822 goto out;
823 }
824 if (!copy_from_user(proc, (void __user *)attr->addr,
825 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200826 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200827 lowest_ibc = sclp.ibc >> 16 & 0xfff;
828 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +0200829 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +0200830 if (proc->ibc > unblocked_ibc)
831 kvm->arch.model.ibc = unblocked_ibc;
832 else if (proc->ibc < lowest_ibc)
833 kvm->arch.model.ibc = lowest_ibc;
834 else
835 kvm->arch.model.ibc = proc->ibc;
836 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100837 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100838 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +0100839 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
840 kvm->arch.model.ibc,
841 kvm->arch.model.cpuid);
842 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
843 kvm->arch.model.fac_list[0],
844 kvm->arch.model.fac_list[1],
845 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100846 } else
847 ret = -EFAULT;
848 kfree(proc);
849out:
850 mutex_unlock(&kvm->lock);
851 return ret;
852}
853
David Hildenbrand15c97052015-03-19 17:36:43 +0100854static int kvm_s390_set_processor_feat(struct kvm *kvm,
855 struct kvm_device_attr *attr)
856{
857 struct kvm_s390_vm_cpu_feat data;
858 int ret = -EBUSY;
859
860 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
861 return -EFAULT;
862 if (!bitmap_subset((unsigned long *) data.feat,
863 kvm_s390_available_cpu_feat,
864 KVM_S390_VM_CPU_FEAT_NR_BITS))
865 return -EINVAL;
866
867 mutex_lock(&kvm->lock);
868 if (!atomic_read(&kvm->online_vcpus)) {
869 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
870 KVM_S390_VM_CPU_FEAT_NR_BITS);
871 ret = 0;
872 }
873 mutex_unlock(&kvm->lock);
874 return ret;
875}
876
David Hildenbrand0a763c72016-05-18 16:03:47 +0200877static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
878 struct kvm_device_attr *attr)
879{
880 /*
881 * Once supported by kernel + hw, we have to store the subfunctions
882 * in kvm->arch and remember that user space configured them.
883 */
884 return -ENXIO;
885}
886
Michael Mueller658b6ed2015-02-02 15:49:35 +0100887static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
888{
889 int ret = -ENXIO;
890
891 switch (attr->attr) {
892 case KVM_S390_VM_CPU_PROCESSOR:
893 ret = kvm_s390_set_processor(kvm, attr);
894 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100895 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
896 ret = kvm_s390_set_processor_feat(kvm, attr);
897 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200898 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
899 ret = kvm_s390_set_processor_subfunc(kvm, attr);
900 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100901 }
902 return ret;
903}
904
905static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
906{
907 struct kvm_s390_vm_cpu_processor *proc;
908 int ret = 0;
909
910 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
911 if (!proc) {
912 ret = -ENOMEM;
913 goto out;
914 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200915 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100916 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100917 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
918 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +0100919 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
920 kvm->arch.model.ibc,
921 kvm->arch.model.cpuid);
922 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
923 kvm->arch.model.fac_list[0],
924 kvm->arch.model.fac_list[1],
925 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100926 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
927 ret = -EFAULT;
928 kfree(proc);
929out:
930 return ret;
931}
932
933static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
934{
935 struct kvm_s390_vm_cpu_machine *mach;
936 int ret = 0;
937
938 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
939 if (!mach) {
940 ret = -ENOMEM;
941 goto out;
942 }
943 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200944 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100945 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +0100946 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100947 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +0100948 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +0100949 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
950 kvm->arch.model.ibc,
951 kvm->arch.model.cpuid);
952 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
953 mach->fac_mask[0],
954 mach->fac_mask[1],
955 mach->fac_mask[2]);
956 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
957 mach->fac_list[0],
958 mach->fac_list[1],
959 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100960 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
961 ret = -EFAULT;
962 kfree(mach);
963out:
964 return ret;
965}
966
David Hildenbrand15c97052015-03-19 17:36:43 +0100967static int kvm_s390_get_processor_feat(struct kvm *kvm,
968 struct kvm_device_attr *attr)
969{
970 struct kvm_s390_vm_cpu_feat data;
971
972 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
973 KVM_S390_VM_CPU_FEAT_NR_BITS);
974 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
975 return -EFAULT;
976 return 0;
977}
978
979static int kvm_s390_get_machine_feat(struct kvm *kvm,
980 struct kvm_device_attr *attr)
981{
982 struct kvm_s390_vm_cpu_feat data;
983
984 bitmap_copy((unsigned long *) data.feat,
985 kvm_s390_available_cpu_feat,
986 KVM_S390_VM_CPU_FEAT_NR_BITS);
987 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
988 return -EFAULT;
989 return 0;
990}
991
David Hildenbrand0a763c72016-05-18 16:03:47 +0200992static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
993 struct kvm_device_attr *attr)
994{
995 /*
996 * Once we can actually configure subfunctions (kernel + hw support),
997 * we have to check if they were already set by user space, if so copy
998 * them from kvm->arch.
999 */
1000 return -ENXIO;
1001}
1002
1003static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1004 struct kvm_device_attr *attr)
1005{
1006 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1007 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1008 return -EFAULT;
1009 return 0;
1010}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001011static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1012{
1013 int ret = -ENXIO;
1014
1015 switch (attr->attr) {
1016 case KVM_S390_VM_CPU_PROCESSOR:
1017 ret = kvm_s390_get_processor(kvm, attr);
1018 break;
1019 case KVM_S390_VM_CPU_MACHINE:
1020 ret = kvm_s390_get_machine(kvm, attr);
1021 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001022 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1023 ret = kvm_s390_get_processor_feat(kvm, attr);
1024 break;
1025 case KVM_S390_VM_CPU_MACHINE_FEAT:
1026 ret = kvm_s390_get_machine_feat(kvm, attr);
1027 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001028 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1029 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1030 break;
1031 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1032 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1033 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001034 }
1035 return ret;
1036}
1037
Dominik Dingelf2061652014-04-09 13:13:00 +02001038static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1039{
1040 int ret;
1041
1042 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001043 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001044 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001045 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001046 case KVM_S390_VM_TOD:
1047 ret = kvm_s390_set_tod(kvm, attr);
1048 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001049 case KVM_S390_VM_CPU_MODEL:
1050 ret = kvm_s390_set_cpu_model(kvm, attr);
1051 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001052 case KVM_S390_VM_CRYPTO:
1053 ret = kvm_s390_vm_set_crypto(kvm, attr);
1054 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001055 default:
1056 ret = -ENXIO;
1057 break;
1058 }
1059
1060 return ret;
1061}
1062
1063static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1064{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001065 int ret;
1066
1067 switch (attr->group) {
1068 case KVM_S390_VM_MEM_CTRL:
1069 ret = kvm_s390_get_mem_control(kvm, attr);
1070 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001071 case KVM_S390_VM_TOD:
1072 ret = kvm_s390_get_tod(kvm, attr);
1073 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001074 case KVM_S390_VM_CPU_MODEL:
1075 ret = kvm_s390_get_cpu_model(kvm, attr);
1076 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001077 default:
1078 ret = -ENXIO;
1079 break;
1080 }
1081
1082 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001083}
1084
1085static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1086{
1087 int ret;
1088
1089 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001090 case KVM_S390_VM_MEM_CTRL:
1091 switch (attr->attr) {
1092 case KVM_S390_VM_MEM_ENABLE_CMMA:
1093 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001094 ret = sclp.has_cmma ? 0 : -ENXIO;
1095 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001096 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001097 ret = 0;
1098 break;
1099 default:
1100 ret = -ENXIO;
1101 break;
1102 }
1103 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001104 case KVM_S390_VM_TOD:
1105 switch (attr->attr) {
1106 case KVM_S390_VM_TOD_LOW:
1107 case KVM_S390_VM_TOD_HIGH:
1108 ret = 0;
1109 break;
1110 default:
1111 ret = -ENXIO;
1112 break;
1113 }
1114 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001115 case KVM_S390_VM_CPU_MODEL:
1116 switch (attr->attr) {
1117 case KVM_S390_VM_CPU_PROCESSOR:
1118 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001119 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1120 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001121 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001122 ret = 0;
1123 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001124 /* configuring subfunctions is not supported yet */
1125 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001126 default:
1127 ret = -ENXIO;
1128 break;
1129 }
1130 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001131 case KVM_S390_VM_CRYPTO:
1132 switch (attr->attr) {
1133 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1134 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1135 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1136 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1137 ret = 0;
1138 break;
1139 default:
1140 ret = -ENXIO;
1141 break;
1142 }
1143 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001144 default:
1145 ret = -ENXIO;
1146 break;
1147 }
1148
1149 return ret;
1150}
1151
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001152static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1153{
1154 uint8_t *keys;
1155 uint64_t hva;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001156 int i, r = 0;
1157
1158 if (args->flags != 0)
1159 return -EINVAL;
1160
1161 /* Is this guest using storage keys? */
1162 if (!mm_use_skey(current->mm))
1163 return KVM_S390_GET_SKEYS_NONE;
1164
1165 /* Enforce sane limit on memory allocation */
1166 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1167 return -EINVAL;
1168
Michal Hocko752ade62017-05-08 15:57:27 -07001169 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001170 if (!keys)
1171 return -ENOMEM;
1172
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001173 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001174 for (i = 0; i < args->count; i++) {
1175 hva = gfn_to_hva(kvm, args->start_gfn + i);
1176 if (kvm_is_error_hva(hva)) {
1177 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001178 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001179 }
1180
David Hildenbrand154c8c12016-05-09 11:22:34 +02001181 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1182 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001183 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001184 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001185 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001186
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001187 if (!r) {
1188 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1189 sizeof(uint8_t) * args->count);
1190 if (r)
1191 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001192 }
1193
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001194 kvfree(keys);
1195 return r;
1196}
1197
1198static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1199{
1200 uint8_t *keys;
1201 uint64_t hva;
1202 int i, r = 0;
1203
1204 if (args->flags != 0)
1205 return -EINVAL;
1206
1207 /* Enforce sane limit on memory allocation */
1208 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1209 return -EINVAL;
1210
Michal Hocko752ade62017-05-08 15:57:27 -07001211 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001212 if (!keys)
1213 return -ENOMEM;
1214
1215 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1216 sizeof(uint8_t) * args->count);
1217 if (r) {
1218 r = -EFAULT;
1219 goto out;
1220 }
1221
1222 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001223 r = s390_enable_skey();
1224 if (r)
1225 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001226
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001227 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001228 for (i = 0; i < args->count; i++) {
1229 hva = gfn_to_hva(kvm, args->start_gfn + i);
1230 if (kvm_is_error_hva(hva)) {
1231 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001232 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001233 }
1234
1235 /* Lowest order bit is reserved */
1236 if (keys[i] & 0x01) {
1237 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001238 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001239 }
1240
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001241 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001242 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001243 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001244 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001245 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001246out:
1247 kvfree(keys);
1248 return r;
1249}
1250
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001251long kvm_arch_vm_ioctl(struct file *filp,
1252 unsigned int ioctl, unsigned long arg)
1253{
1254 struct kvm *kvm = filp->private_data;
1255 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001256 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001257 int r;
1258
1259 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001260 case KVM_S390_INTERRUPT: {
1261 struct kvm_s390_interrupt s390int;
1262
1263 r = -EFAULT;
1264 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1265 break;
1266 r = kvm_s390_inject_vm(kvm, &s390int);
1267 break;
1268 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001269 case KVM_ENABLE_CAP: {
1270 struct kvm_enable_cap cap;
1271 r = -EFAULT;
1272 if (copy_from_user(&cap, argp, sizeof(cap)))
1273 break;
1274 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1275 break;
1276 }
Cornelia Huck84223592013-07-15 13:36:01 +02001277 case KVM_CREATE_IRQCHIP: {
1278 struct kvm_irq_routing_entry routing;
1279
1280 r = -EINVAL;
1281 if (kvm->arch.use_irqchip) {
1282 /* Set up dummy routing. */
1283 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001284 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001285 }
1286 break;
1287 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001288 case KVM_SET_DEVICE_ATTR: {
1289 r = -EFAULT;
1290 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1291 break;
1292 r = kvm_s390_vm_set_attr(kvm, &attr);
1293 break;
1294 }
1295 case KVM_GET_DEVICE_ATTR: {
1296 r = -EFAULT;
1297 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1298 break;
1299 r = kvm_s390_vm_get_attr(kvm, &attr);
1300 break;
1301 }
1302 case KVM_HAS_DEVICE_ATTR: {
1303 r = -EFAULT;
1304 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1305 break;
1306 r = kvm_s390_vm_has_attr(kvm, &attr);
1307 break;
1308 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001309 case KVM_S390_GET_SKEYS: {
1310 struct kvm_s390_skeys args;
1311
1312 r = -EFAULT;
1313 if (copy_from_user(&args, argp,
1314 sizeof(struct kvm_s390_skeys)))
1315 break;
1316 r = kvm_s390_get_skeys(kvm, &args);
1317 break;
1318 }
1319 case KVM_S390_SET_SKEYS: {
1320 struct kvm_s390_skeys args;
1321
1322 r = -EFAULT;
1323 if (copy_from_user(&args, argp,
1324 sizeof(struct kvm_s390_skeys)))
1325 break;
1326 r = kvm_s390_set_skeys(kvm, &args);
1327 break;
1328 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001329 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001330 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001331 }
1332
1333 return r;
1334}
1335
Tony Krowiak45c9b472015-01-13 11:33:26 -05001336static int kvm_s390_query_ap_config(u8 *config)
1337{
1338 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001339 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001340
Christian Borntraeger86044c82015-02-26 13:53:47 +01001341 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001342 asm volatile(
1343 "lgr 0,%1\n"
1344 "lgr 2,%2\n"
1345 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001346 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001347 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001348 "1:\n"
1349 EX_TABLE(0b, 1b)
1350 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001351 : "r" (fcn_code), "r" (config)
1352 : "cc", "0", "2", "memory"
1353 );
1354
1355 return cc;
1356}
1357
1358static int kvm_s390_apxa_installed(void)
1359{
1360 u8 config[128];
1361 int cc;
1362
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001363 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001364 cc = kvm_s390_query_ap_config(config);
1365
1366 if (cc)
1367 pr_err("PQAP(QCI) failed with cc=%d", cc);
1368 else
1369 return config[0] & 0x40;
1370 }
1371
1372 return 0;
1373}
1374
1375static void kvm_s390_set_crycb_format(struct kvm *kvm)
1376{
1377 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1378
1379 if (kvm_s390_apxa_installed())
1380 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1381 else
1382 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1383}
1384
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001385static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001386{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001387 struct cpuid cpuid;
1388
1389 get_cpu_id(&cpuid);
1390 cpuid.version = 0xff;
1391 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001392}
1393
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001394static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001395{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001396 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001397 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001398
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001399 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001400 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001401
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001402 /* Enable AES/DEA protected key functions by default */
1403 kvm->arch.crypto.aes_kw = 1;
1404 kvm->arch.crypto.dea_kw = 1;
1405 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1406 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1407 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1408 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001409}
1410
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001411static void sca_dispose(struct kvm *kvm)
1412{
1413 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001414 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001415 else
1416 free_page((unsigned long)(kvm->arch.sca));
1417 kvm->arch.sca = NULL;
1418}
1419
Carsten Ottee08b9632012-01-04 10:25:20 +01001420int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001421{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001422 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001423 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001424 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001425 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001426
Carsten Ottee08b9632012-01-04 10:25:20 +01001427 rc = -EINVAL;
1428#ifdef CONFIG_KVM_S390_UCONTROL
1429 if (type & ~KVM_VM_S390_UCONTROL)
1430 goto out_err;
1431 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1432 goto out_err;
1433#else
1434 if (type)
1435 goto out_err;
1436#endif
1437
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001438 rc = s390_enable_sie();
1439 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001440 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001441
Carsten Otteb2904112011-10-18 12:27:13 +02001442 rc = -ENOMEM;
1443
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001444 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1445
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001446 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001447 if (!sclp.has_64bscao)
1448 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001449 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001450 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001451 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001452 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001453 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001454 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001455 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001456 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001457 kvm->arch.sca = (struct bsca_block *)
1458 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001459 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001460
1461 sprintf(debug_name, "kvm-%u", current->pid);
1462
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001463 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001464 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001465 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001466
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001467 kvm->arch.sie_page2 =
1468 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1469 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001470 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001471
Michael Muellerfb5bf932015-02-27 14:25:10 +01001472 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001473 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001474 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001475 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1476 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001477 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001478 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001479 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001480 }
1481
Michael Mueller981467c2015-02-24 13:51:04 +01001482 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001483 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1484 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001485 S390_ARCH_FAC_LIST_SIZE_BYTE);
1486
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001487 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1488 set_kvm_facility(kvm->arch.model.fac_list, 74);
1489
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001490 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001491 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001492
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001493 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001494
Carsten Otteba5c1e92008-03-25 18:47:26 +01001495 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001496 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1497 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001498 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001499 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001500
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001501 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001502 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001503
Carsten Ottee08b9632012-01-04 10:25:20 +01001504 if (type & KVM_VM_S390_UCONTROL) {
1505 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001506 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001507 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001508 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001509 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001510 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001511 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001512 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001513 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001514 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001515 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001516 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001517 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001518 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001519
1520 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001521 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001522 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001523
David Hildenbrand8ad35752014-03-14 11:00:21 +01001524 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001525 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001526 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001527
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001528 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001529out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001530 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001531 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001532 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001533 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001534 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001535}
1536
Luiz Capitulino235539b2016-09-07 14:47:23 -04001537bool kvm_arch_has_vcpu_debugfs(void)
1538{
1539 return false;
1540}
1541
1542int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1543{
1544 return 0;
1545}
1546
Christian Borntraegerd329c032008-11-26 14:50:27 +01001547void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1548{
1549 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001550 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001551 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001552 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001553 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001554 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001555
1556 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001557 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01001558
Dominik Dingele6db1d62015-05-07 15:41:57 +02001559 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001560 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001561 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001562
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001563 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001564 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001565}
1566
1567static void kvm_free_vcpus(struct kvm *kvm)
1568{
1569 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001570 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001571
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001572 kvm_for_each_vcpu(i, vcpu, kvm)
1573 kvm_arch_vcpu_destroy(vcpu);
1574
1575 mutex_lock(&kvm->lock);
1576 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1577 kvm->vcpus[i] = NULL;
1578
1579 atomic_set(&kvm->online_vcpus, 0);
1580 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001581}
1582
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001583void kvm_arch_destroy_vm(struct kvm *kvm)
1584{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001585 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001586 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001587 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001588 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001589 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001590 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001591 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001592 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001593 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001594 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001595}
1596
1597/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001598static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1599{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001600 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001601 if (!vcpu->arch.gmap)
1602 return -ENOMEM;
1603 vcpu->arch.gmap->private = vcpu->kvm;
1604
1605 return 0;
1606}
1607
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001608static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1609{
David Hildenbranda6940672016-08-08 22:39:32 +02001610 if (!kvm_s390_use_sca_entries())
1611 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001612 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001613 if (vcpu->kvm->arch.use_esca) {
1614 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001615
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001616 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001617 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001618 } else {
1619 struct bsca_block *sca = vcpu->kvm->arch.sca;
1620
1621 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001622 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001623 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001624 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001625}
1626
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001627static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001628{
David Hildenbranda6940672016-08-08 22:39:32 +02001629 if (!kvm_s390_use_sca_entries()) {
1630 struct bsca_block *sca = vcpu->kvm->arch.sca;
1631
1632 /* we still need the basic sca for the ipte control */
1633 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1634 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1635 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001636 read_lock(&vcpu->kvm->arch.sca_lock);
1637 if (vcpu->kvm->arch.use_esca) {
1638 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001639
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001640 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001641 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1642 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand25508822015-10-12 16:27:23 +02001643 vcpu->arch.sie_block->ecb2 |= 0x04U;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001644 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001645 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001646 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001647
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001648 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001649 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1650 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001651 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001652 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001653 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001654}
1655
1656/* Basic SCA to Extended SCA data copy routines */
1657static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1658{
1659 d->sda = s->sda;
1660 d->sigp_ctrl.c = s->sigp_ctrl.c;
1661 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1662}
1663
1664static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1665{
1666 int i;
1667
1668 d->ipte_control = s->ipte_control;
1669 d->mcn[0] = s->mcn;
1670 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1671 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1672}
1673
1674static int sca_switch_to_extended(struct kvm *kvm)
1675{
1676 struct bsca_block *old_sca = kvm->arch.sca;
1677 struct esca_block *new_sca;
1678 struct kvm_vcpu *vcpu;
1679 unsigned int vcpu_idx;
1680 u32 scaol, scaoh;
1681
1682 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1683 if (!new_sca)
1684 return -ENOMEM;
1685
1686 scaoh = (u32)((u64)(new_sca) >> 32);
1687 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1688
1689 kvm_s390_vcpu_block_all(kvm);
1690 write_lock(&kvm->arch.sca_lock);
1691
1692 sca_copy_b_to_e(new_sca, old_sca);
1693
1694 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1695 vcpu->arch.sie_block->scaoh = scaoh;
1696 vcpu->arch.sie_block->scaol = scaol;
1697 vcpu->arch.sie_block->ecb2 |= 0x04U;
1698 }
1699 kvm->arch.sca = new_sca;
1700 kvm->arch.use_esca = 1;
1701
1702 write_unlock(&kvm->arch.sca_lock);
1703 kvm_s390_vcpu_unblock_all(kvm);
1704
1705 free_page((unsigned long)old_sca);
1706
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001707 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1708 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001709 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001710}
1711
1712static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1713{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001714 int rc;
1715
David Hildenbranda6940672016-08-08 22:39:32 +02001716 if (!kvm_s390_use_sca_entries()) {
1717 if (id < KVM_MAX_VCPUS)
1718 return true;
1719 return false;
1720 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001721 if (id < KVM_S390_BSCA_CPU_SLOTS)
1722 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001723 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001724 return false;
1725
1726 mutex_lock(&kvm->lock);
1727 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1728 mutex_unlock(&kvm->lock);
1729
1730 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001731}
1732
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001733int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1734{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001735 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1736 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001737 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1738 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001739 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001740 KVM_SYNC_CRS |
1741 KVM_SYNC_ARCH0 |
1742 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02001743 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08001744 if (test_kvm_facility(vcpu->kvm, 64))
1745 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01001746 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1747 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1748 */
1749 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04001750 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001751 else
1752 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001753
1754 if (kvm_is_ucontrol(vcpu->kvm))
1755 return __kvm_ucontrol_vcpu_init(vcpu);
1756
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001757 return 0;
1758}
1759
David Hildenbranddb0758b2016-02-15 09:42:25 +01001760/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1761static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1762{
1763 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001764 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001765 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01001766 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001767}
1768
1769/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1770static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1771{
1772 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001773 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001774 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1775 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001776 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001777}
1778
1779/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1780static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1781{
1782 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1783 vcpu->arch.cputm_enabled = true;
1784 __start_cpu_timer_accounting(vcpu);
1785}
1786
1787/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1788static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1789{
1790 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1791 __stop_cpu_timer_accounting(vcpu);
1792 vcpu->arch.cputm_enabled = false;
1793}
1794
1795static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1796{
1797 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1798 __enable_cpu_timer_accounting(vcpu);
1799 preempt_enable();
1800}
1801
1802static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1803{
1804 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1805 __disable_cpu_timer_accounting(vcpu);
1806 preempt_enable();
1807}
1808
David Hildenbrand4287f242016-02-15 09:40:12 +01001809/* set the cpu timer - may only be called from the VCPU thread itself */
1810void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1811{
David Hildenbranddb0758b2016-02-15 09:42:25 +01001812 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01001813 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001814 if (vcpu->arch.cputm_enabled)
1815 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01001816 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001817 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001818 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01001819}
1820
David Hildenbranddb0758b2016-02-15 09:42:25 +01001821/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01001822__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1823{
David Hildenbrand9c23a132016-02-17 21:53:33 +01001824 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001825 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001826
1827 if (unlikely(!vcpu->arch.cputm_enabled))
1828 return vcpu->arch.sie_block->cputm;
1829
David Hildenbrand9c23a132016-02-17 21:53:33 +01001830 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1831 do {
1832 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1833 /*
1834 * If the writer would ever execute a read in the critical
1835 * section, e.g. in irq context, we have a deadlock.
1836 */
1837 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1838 value = vcpu->arch.sie_block->cputm;
1839 /* if cputm_start is 0, accounting is being started/stopped */
1840 if (likely(vcpu->arch.cputm_start))
1841 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1842 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1843 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01001844 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01001845}
1846
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001847void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1848{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001849
David Hildenbrand37d9df92015-03-11 16:47:33 +01001850 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001851 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01001852 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001853 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01001854 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001855}
1856
1857void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1858{
David Hildenbrand01a745a2016-02-12 20:41:56 +01001859 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01001860 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001861 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001862 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01001863 vcpu->arch.enabled_gmap = gmap_get_enabled();
1864 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001865
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001866}
1867
1868static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1869{
1870 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1871 vcpu->arch.sie_block->gpsw.mask = 0UL;
1872 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001873 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01001874 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001875 vcpu->arch.sie_block->ckc = 0UL;
1876 vcpu->arch.sie_block->todpr = 0;
1877 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1878 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1879 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001880 /* make sure the new fpc will be lazily loaded */
1881 save_fpu_regs();
1882 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001883 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001884 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001885 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1886 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001887 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1888 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001889 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001890}
1891
Dominik Dingel31928aa2014-12-04 15:47:07 +01001892void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001893{
Jason J. Herne72f25022014-11-25 09:46:02 -05001894 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001895 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001896 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001897 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001898 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02001899 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01001900 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001901 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02001902 }
David Hildenbrand6502a342016-06-21 14:19:51 +02001903 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
1904 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01001905 /* make vcpu_load load the right gmap on the first trigger */
1906 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001907}
1908
Tony Krowiak5102ee82014-06-27 14:46:01 -04001909static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1910{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001911 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001912 return;
1913
Tony Krowiaka374e892014-09-03 10:13:53 +02001914 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1915
1916 if (vcpu->kvm->arch.crypto.aes_kw)
1917 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1918 if (vcpu->kvm->arch.crypto.dea_kw)
1919 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1920
Tony Krowiak5102ee82014-06-27 14:46:01 -04001921 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1922}
1923
Dominik Dingelb31605c2014-03-25 13:47:11 +01001924void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1925{
1926 free_page(vcpu->arch.sie_block->cbrlo);
1927 vcpu->arch.sie_block->cbrlo = 0;
1928}
1929
1930int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1931{
1932 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1933 if (!vcpu->arch.sie_block->cbrlo)
1934 return -ENOMEM;
1935
1936 vcpu->arch.sie_block->ecb2 |= 0x80;
1937 vcpu->arch.sie_block->ecb2 &= ~0x08;
1938 return 0;
1939}
1940
Michael Mueller91520f12015-02-27 14:32:11 +01001941static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1942{
1943 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1944
Michael Mueller91520f12015-02-27 14:32:11 +01001945 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01001946 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001947 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01001948}
1949
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001950int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1951{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001952 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001953
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001954 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1955 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001956 CPUSTAT_STOPPED);
1957
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001958 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001959 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001960 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001961 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001962
Michael Mueller91520f12015-02-27 14:32:11 +01001963 kvm_s390_vcpu_setup_model(vcpu);
1964
David Hildenbrandbdab09f2016-04-12 11:07:49 +02001965 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1966 if (MACHINE_HAS_ESOP)
1967 vcpu->arch.sie_block->ecb |= 0x02;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01001968 if (test_kvm_facility(vcpu->kvm, 9))
1969 vcpu->arch.sie_block->ecb |= 0x04;
David Hildenbrandf597d242016-04-22 16:26:49 +02001970 if (test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001971 vcpu->arch.sie_block->ecb |= 0x10;
1972
David Hildenbrand873b4252016-04-04 15:53:47 +02001973 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrandd6af0b42016-03-04 11:55:56 +01001974 vcpu->arch.sie_block->ecb2 |= 0x08;
Janosch Frankcd1836f2016-08-04 09:57:36 +02001975 if (test_kvm_facility(vcpu->kvm, 130))
1976 vcpu->arch.sie_block->ecb2 |= 0x20;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02001977 vcpu->arch.sie_block->eca = 0x1002000U;
1978 if (sclp.has_cei)
1979 vcpu->arch.sie_block->eca |= 0x80000000U;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02001980 if (sclp.has_ib)
1981 vcpu->arch.sie_block->eca |= 0x40000000U;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001982 if (sclp.has_siif)
Heiko Carstens217a4402013-12-30 12:54:14 +01001983 vcpu->arch.sie_block->eca |= 1;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001984 if (sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001985 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller18280d82015-03-16 16:05:41 +01001986 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001987 vcpu->arch.sie_block->eca |= 0x00020000;
1988 vcpu->arch.sie_block->ecd |= 0x20000000;
1989 }
Fan Zhangc6e5f162016-01-07 18:24:29 +08001990 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Thomas Huth492d8642015-02-10 16:11:01 +01001991 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001992
Dominik Dingele6db1d62015-05-07 15:41:57 +02001993 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01001994 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1995 if (rc)
1996 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001997 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001998 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001999 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002000
Tony Krowiak5102ee82014-06-27 14:46:01 -04002001 kvm_s390_vcpu_crypto_setup(vcpu);
2002
Dominik Dingelb31605c2014-03-25 13:47:11 +01002003 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002004}
2005
2006struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2007 unsigned int id)
2008{
Carsten Otte4d475552011-10-18 12:27:12 +02002009 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002010 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002011 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002012
David Hildenbrand42158252015-10-12 12:57:22 +02002013 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002014 goto out;
2015
2016 rc = -ENOMEM;
2017
Michael Muellerb110fea2013-06-12 13:54:54 +02002018 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002019 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002020 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002021
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002022 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2023 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002024 goto out_free_cpu;
2025
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002026 vcpu->arch.sie_block = &sie_page->sie_block;
2027 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2028
David Hildenbrandefed1102015-04-16 12:32:41 +02002029 /* the real guest size will always be smaller than msl */
2030 vcpu->arch.sie_block->mso = 0;
2031 vcpu->arch.sie_block->msl = sclp.hamax;
2032
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002033 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002034 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002035 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002036 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002037 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002038 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002039
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002040 rc = kvm_vcpu_init(vcpu, kvm, id);
2041 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002042 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002043 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002044 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002045 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002046
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002047 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002048out_free_sie_block:
2049 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002050out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002051 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002052out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002053 return ERR_PTR(rc);
2054}
2055
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002056int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2057{
David Hildenbrand9a022062014-08-05 17:40:47 +02002058 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002059}
2060
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002061void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002062{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002063 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002064 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002065}
2066
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002067void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002068{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002069 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002070}
2071
Christian Borntraeger8e236542015-04-09 13:49:04 +02002072static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2073{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002074 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002075 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002076}
2077
2078static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2079{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002080 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002081}
2082
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002083/*
2084 * Kick a guest cpu out of SIE and wait until SIE is not running.
2085 * If the CPU is not running (e.g. waiting as idle) the function will
2086 * return immediately. */
2087void exit_sie(struct kvm_vcpu *vcpu)
2088{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002089 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002090 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2091 cpu_relax();
2092}
2093
Christian Borntraeger8e236542015-04-09 13:49:04 +02002094/* Kick a guest cpu out of SIE to process a request synchronously */
2095void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002096{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002097 kvm_make_request(req, vcpu);
2098 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002099}
2100
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002101static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2102 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002103{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002104 struct kvm *kvm = gmap->private;
2105 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002106 unsigned long prefix;
2107 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002108
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002109 if (gmap_is_shadow(gmap))
2110 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002111 if (start >= 1UL << 31)
2112 /* We are only interested in prefix pages */
2113 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002114 kvm_for_each_vcpu(i, vcpu, kvm) {
2115 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002116 prefix = kvm_s390_get_prefix(vcpu);
2117 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2118 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2119 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002120 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002121 }
2122 }
2123}
2124
Christoffer Dallb6d33832012-03-08 16:44:24 -05002125int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2126{
2127 /* kvm common code refers to this, but never calls it */
2128 BUG();
2129 return 0;
2130}
2131
Carsten Otte14eebd92012-05-15 14:15:26 +02002132static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2133 struct kvm_one_reg *reg)
2134{
2135 int r = -EINVAL;
2136
2137 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002138 case KVM_REG_S390_TODPR:
2139 r = put_user(vcpu->arch.sie_block->todpr,
2140 (u32 __user *)reg->addr);
2141 break;
2142 case KVM_REG_S390_EPOCHDIFF:
2143 r = put_user(vcpu->arch.sie_block->epoch,
2144 (u64 __user *)reg->addr);
2145 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002146 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002147 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002148 (u64 __user *)reg->addr);
2149 break;
2150 case KVM_REG_S390_CLOCK_COMP:
2151 r = put_user(vcpu->arch.sie_block->ckc,
2152 (u64 __user *)reg->addr);
2153 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002154 case KVM_REG_S390_PFTOKEN:
2155 r = put_user(vcpu->arch.pfault_token,
2156 (u64 __user *)reg->addr);
2157 break;
2158 case KVM_REG_S390_PFCOMPARE:
2159 r = put_user(vcpu->arch.pfault_compare,
2160 (u64 __user *)reg->addr);
2161 break;
2162 case KVM_REG_S390_PFSELECT:
2163 r = put_user(vcpu->arch.pfault_select,
2164 (u64 __user *)reg->addr);
2165 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002166 case KVM_REG_S390_PP:
2167 r = put_user(vcpu->arch.sie_block->pp,
2168 (u64 __user *)reg->addr);
2169 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002170 case KVM_REG_S390_GBEA:
2171 r = put_user(vcpu->arch.sie_block->gbea,
2172 (u64 __user *)reg->addr);
2173 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002174 default:
2175 break;
2176 }
2177
2178 return r;
2179}
2180
2181static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2182 struct kvm_one_reg *reg)
2183{
2184 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002185 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002186
2187 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002188 case KVM_REG_S390_TODPR:
2189 r = get_user(vcpu->arch.sie_block->todpr,
2190 (u32 __user *)reg->addr);
2191 break;
2192 case KVM_REG_S390_EPOCHDIFF:
2193 r = get_user(vcpu->arch.sie_block->epoch,
2194 (u64 __user *)reg->addr);
2195 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002196 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002197 r = get_user(val, (u64 __user *)reg->addr);
2198 if (!r)
2199 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002200 break;
2201 case KVM_REG_S390_CLOCK_COMP:
2202 r = get_user(vcpu->arch.sie_block->ckc,
2203 (u64 __user *)reg->addr);
2204 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002205 case KVM_REG_S390_PFTOKEN:
2206 r = get_user(vcpu->arch.pfault_token,
2207 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002208 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2209 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002210 break;
2211 case KVM_REG_S390_PFCOMPARE:
2212 r = get_user(vcpu->arch.pfault_compare,
2213 (u64 __user *)reg->addr);
2214 break;
2215 case KVM_REG_S390_PFSELECT:
2216 r = get_user(vcpu->arch.pfault_select,
2217 (u64 __user *)reg->addr);
2218 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002219 case KVM_REG_S390_PP:
2220 r = get_user(vcpu->arch.sie_block->pp,
2221 (u64 __user *)reg->addr);
2222 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002223 case KVM_REG_S390_GBEA:
2224 r = get_user(vcpu->arch.sie_block->gbea,
2225 (u64 __user *)reg->addr);
2226 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002227 default:
2228 break;
2229 }
2230
2231 return r;
2232}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002233
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002234static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2235{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002236 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002237 return 0;
2238}
2239
2240int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2241{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002242 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002243 return 0;
2244}
2245
2246int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2247{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002248 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002249 return 0;
2250}
2251
2252int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2253 struct kvm_sregs *sregs)
2254{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002255 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002256 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002257 return 0;
2258}
2259
2260int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2261 struct kvm_sregs *sregs)
2262{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002263 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002264 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002265 return 0;
2266}
2267
2268int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2269{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002270 if (test_fp_ctl(fpu->fpc))
2271 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002272 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002273 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002274 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2275 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002276 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002277 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002278 return 0;
2279}
2280
2281int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2282{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002283 /* make sure we have the latest values */
2284 save_fpu_regs();
2285 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002286 convert_vx_to_fp((freg_t *) fpu->fprs,
2287 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002288 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002289 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002290 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002291 return 0;
2292}
2293
2294static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2295{
2296 int rc = 0;
2297
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002298 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002299 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002300 else {
2301 vcpu->run->psw_mask = psw.mask;
2302 vcpu->run->psw_addr = psw.addr;
2303 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002304 return rc;
2305}
2306
2307int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2308 struct kvm_translation *tr)
2309{
2310 return -EINVAL; /* not implemented yet */
2311}
2312
David Hildenbrand27291e22014-01-23 12:26:52 +01002313#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2314 KVM_GUESTDBG_USE_HW_BP | \
2315 KVM_GUESTDBG_ENABLE)
2316
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002317int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2318 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002319{
David Hildenbrand27291e22014-01-23 12:26:52 +01002320 int rc = 0;
2321
2322 vcpu->guest_debug = 0;
2323 kvm_s390_clear_bp_data(vcpu);
2324
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002325 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002326 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002327 if (!sclp.has_gpere)
2328 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002329
2330 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2331 vcpu->guest_debug = dbg->control;
2332 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002333 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002334
2335 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2336 rc = kvm_s390_import_bp_data(vcpu, dbg);
2337 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002338 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002339 vcpu->arch.guestdbg.last_bp = 0;
2340 }
2341
2342 if (rc) {
2343 vcpu->guest_debug = 0;
2344 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002345 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002346 }
2347
2348 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002349}
2350
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002351int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2352 struct kvm_mp_state *mp_state)
2353{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002354 /* CHECK_STOP and LOAD are not supported yet */
2355 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2356 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002357}
2358
2359int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2360 struct kvm_mp_state *mp_state)
2361{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002362 int rc = 0;
2363
2364 /* user space knows about this interface - let it control the state */
2365 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2366
2367 switch (mp_state->mp_state) {
2368 case KVM_MP_STATE_STOPPED:
2369 kvm_s390_vcpu_stop(vcpu);
2370 break;
2371 case KVM_MP_STATE_OPERATING:
2372 kvm_s390_vcpu_start(vcpu);
2373 break;
2374 case KVM_MP_STATE_LOAD:
2375 case KVM_MP_STATE_CHECK_STOP:
2376 /* fall through - CHECK_STOP and LOAD are not supported yet */
2377 default:
2378 rc = -ENXIO;
2379 }
2380
2381 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002382}
2383
David Hildenbrand8ad35752014-03-14 11:00:21 +01002384static bool ibs_enabled(struct kvm_vcpu *vcpu)
2385{
2386 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2387}
2388
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002389static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2390{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002391retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002392 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002393 if (!vcpu->requests)
2394 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002395 /*
2396 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002397 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002398 * This ensures that the ipte instruction for this request has
2399 * already finished. We might race against a second unmapper that
2400 * wants to set the blocking bit. Lets just retry the request loop.
2401 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002402 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002403 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002404 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2405 kvm_s390_get_prefix(vcpu),
2406 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002407 if (rc) {
2408 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002409 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002410 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002411 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002412 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002413
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002414 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2415 vcpu->arch.sie_block->ihcpu = 0xffff;
2416 goto retry;
2417 }
2418
David Hildenbrand8ad35752014-03-14 11:00:21 +01002419 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2420 if (!ibs_enabled(vcpu)) {
2421 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002422 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002423 &vcpu->arch.sie_block->cpuflags);
2424 }
2425 goto retry;
2426 }
2427
2428 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2429 if (ibs_enabled(vcpu)) {
2430 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002431 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002432 &vcpu->arch.sie_block->cpuflags);
2433 }
2434 goto retry;
2435 }
2436
David Hildenbrand6502a342016-06-21 14:19:51 +02002437 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2438 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2439 goto retry;
2440 }
2441
David Hildenbrand0759d062014-05-13 16:54:32 +02002442 /* nothing to do, just clear the request */
2443 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2444
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002445 return 0;
2446}
2447
David Hildenbrand25ed1672015-05-12 09:49:14 +02002448void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2449{
2450 struct kvm_vcpu *vcpu;
2451 int i;
2452
2453 mutex_lock(&kvm->lock);
2454 preempt_disable();
2455 kvm->arch.epoch = tod - get_tod_clock();
2456 kvm_s390_vcpu_block_all(kvm);
2457 kvm_for_each_vcpu(i, vcpu, kvm)
2458 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2459 kvm_s390_vcpu_unblock_all(kvm);
2460 preempt_enable();
2461 mutex_unlock(&kvm->lock);
2462}
2463
Thomas Huthfa576c52014-05-06 17:20:16 +02002464/**
2465 * kvm_arch_fault_in_page - fault-in guest page if necessary
2466 * @vcpu: The corresponding virtual cpu
2467 * @gpa: Guest physical address
2468 * @writable: Whether the page should be writable or not
2469 *
2470 * Make sure that a guest page has been faulted-in on the host.
2471 *
2472 * Return: Zero on success, negative error code otherwise.
2473 */
2474long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002475{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002476 return gmap_fault(vcpu->arch.gmap, gpa,
2477 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002478}
2479
Dominik Dingel3c038e62013-10-07 17:11:48 +02002480static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2481 unsigned long token)
2482{
2483 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002484 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002485
2486 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002487 irq.u.ext.ext_params2 = token;
2488 irq.type = KVM_S390_INT_PFAULT_INIT;
2489 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002490 } else {
2491 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002492 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002493 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2494 }
2495}
2496
2497void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2498 struct kvm_async_pf *work)
2499{
2500 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2501 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2502}
2503
2504void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2505 struct kvm_async_pf *work)
2506{
2507 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2508 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2509}
2510
2511void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2512 struct kvm_async_pf *work)
2513{
2514 /* s390 will always inject the page directly */
2515}
2516
2517bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2518{
2519 /*
2520 * s390 will always inject the page directly,
2521 * but we still want check_async_completion to cleanup
2522 */
2523 return true;
2524}
2525
2526static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2527{
2528 hva_t hva;
2529 struct kvm_arch_async_pf arch;
2530 int rc;
2531
2532 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2533 return 0;
2534 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2535 vcpu->arch.pfault_compare)
2536 return 0;
2537 if (psw_extint_disabled(vcpu))
2538 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002539 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002540 return 0;
2541 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2542 return 0;
2543 if (!vcpu->arch.gmap->pfault_enabled)
2544 return 0;
2545
Heiko Carstens81480cc2014-01-01 16:36:07 +01002546 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2547 hva += current->thread.gmap_addr & ~PAGE_MASK;
2548 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002549 return 0;
2550
2551 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2552 return rc;
2553}
2554
Thomas Huth3fb4c402013-09-12 10:33:43 +02002555static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002556{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002557 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002558
Dominik Dingel3c038e62013-10-07 17:11:48 +02002559 /*
2560 * On s390 notifications for arriving pages will be delivered directly
2561 * to the guest but the house keeping for completed pfaults is
2562 * handled outside the worker.
2563 */
2564 kvm_check_async_pf_completion(vcpu);
2565
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002566 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2567 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002568
2569 if (need_resched())
2570 schedule();
2571
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002572 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002573 s390_handle_mcck();
2574
Jens Freimann79395032014-04-17 10:10:30 +02002575 if (!kvm_is_ucontrol(vcpu->kvm)) {
2576 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2577 if (rc)
2578 return rc;
2579 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002580
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002581 rc = kvm_s390_handle_requests(vcpu);
2582 if (rc)
2583 return rc;
2584
David Hildenbrand27291e22014-01-23 12:26:52 +01002585 if (guestdbg_enabled(vcpu)) {
2586 kvm_s390_backup_guest_per_regs(vcpu);
2587 kvm_s390_patch_guest_per_regs(vcpu);
2588 }
2589
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002590 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002591 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2592 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2593 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002594
Thomas Huth3fb4c402013-09-12 10:33:43 +02002595 return 0;
2596}
2597
Thomas Huth492d8642015-02-10 16:11:01 +01002598static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2599{
David Hildenbrand56317922016-01-12 17:37:58 +01002600 struct kvm_s390_pgm_info pgm_info = {
2601 .code = PGM_ADDRESSING,
2602 };
2603 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01002604 int rc;
2605
2606 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2607 trace_kvm_s390_sie_fault(vcpu);
2608
2609 /*
2610 * We want to inject an addressing exception, which is defined as a
2611 * suppressing or terminating exception. However, since we came here
2612 * by a DAT access exception, the PSW still points to the faulting
2613 * instruction since DAT exceptions are nullifying. So we've got
2614 * to look up the current opcode to get the length of the instruction
2615 * to be able to forward the PSW.
2616 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02002617 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01002618 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01002619 if (rc < 0) {
2620 return rc;
2621 } else if (rc) {
2622 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2623 * Forward by arbitrary ilc, injection will take care of
2624 * nullification if necessary.
2625 */
2626 pgm_info = vcpu->arch.pgm;
2627 ilen = 4;
2628 }
David Hildenbrand56317922016-01-12 17:37:58 +01002629 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2630 kvm_s390_forward_psw(vcpu, ilen);
2631 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01002632}
2633
Thomas Huth3fb4c402013-09-12 10:33:43 +02002634static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2635{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002636 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2637 vcpu->arch.sie_block->icptcode);
2638 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2639
David Hildenbrand27291e22014-01-23 12:26:52 +01002640 if (guestdbg_enabled(vcpu))
2641 kvm_s390_restore_guest_per_regs(vcpu);
2642
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002643 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2644 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002645
2646 if (vcpu->arch.sie_block->icptcode > 0) {
2647 int rc = kvm_handle_sie_intercept(vcpu);
2648
2649 if (rc != -EOPNOTSUPP)
2650 return rc;
2651 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2652 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2653 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2654 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2655 return -EREMOTE;
2656 } else if (exit_reason != -EFAULT) {
2657 vcpu->stat.exit_null++;
2658 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002659 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2660 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2661 vcpu->run->s390_ucontrol.trans_exc_code =
2662 current->thread.gmap_addr;
2663 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002664 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002665 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002666 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002667 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002668 if (kvm_arch_setup_async_pf(vcpu))
2669 return 0;
2670 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002671 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02002672 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002673}
2674
2675static int __vcpu_run(struct kvm_vcpu *vcpu)
2676{
2677 int rc, exit_reason;
2678
Thomas Huth800c1062013-09-12 10:33:45 +02002679 /*
2680 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2681 * ning the guest), so that memslots (and other stuff) are protected
2682 */
2683 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2684
Thomas Hutha76ccff2013-09-12 10:33:44 +02002685 do {
2686 rc = vcpu_pre_run(vcpu);
2687 if (rc)
2688 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002689
Thomas Huth800c1062013-09-12 10:33:45 +02002690 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002691 /*
2692 * As PF_VCPU will be used in fault handler, between
2693 * guest_enter and guest_exit should be no uaccess.
2694 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002695 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02002696 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002697 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002698 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002699 exit_reason = sie64a(vcpu->arch.sie_block,
2700 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002701 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002702 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02002703 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02002704 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002705 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002706
Thomas Hutha76ccff2013-09-12 10:33:44 +02002707 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002708 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002709
Thomas Huth800c1062013-09-12 10:33:45 +02002710 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002711 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002712}
2713
David Hildenbrandb028ee32014-07-17 10:47:43 +02002714static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2715{
2716 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2717 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2718 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2719 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2720 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2721 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002722 /* some control register changes require a tlb flush */
2723 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002724 }
2725 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01002726 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002727 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2728 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2729 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2730 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2731 }
2732 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2733 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2734 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2735 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002736 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2737 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002738 }
Fan Zhang80cd8762016-08-15 04:53:22 +02002739 /*
2740 * If userspace sets the riccb (e.g. after migration) to a valid state,
2741 * we should enable RI here instead of doing the lazy enablement.
2742 */
2743 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
2744 test_kvm_facility(vcpu->kvm, 64)) {
2745 struct runtime_instr_cb *riccb =
2746 (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
2747
2748 if (riccb->valid)
2749 vcpu->arch.sie_block->ecb3 |= 0x01;
2750 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01002751 save_access_regs(vcpu->arch.host_acrs);
2752 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002753 /* save host (userspace) fprs/vrs */
2754 save_fpu_regs();
2755 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
2756 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
2757 if (MACHINE_HAS_VX)
2758 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
2759 else
2760 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
2761 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
2762 if (test_fp_ctl(current->thread.fpu.fpc))
2763 /* User space provided an invalid FPC, let's clear it */
2764 current->thread.fpu.fpc = 0;
Fan Zhang80cd8762016-08-15 04:53:22 +02002765
David Hildenbrandb028ee32014-07-17 10:47:43 +02002766 kvm_run->kvm_dirty_regs = 0;
2767}
2768
2769static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2770{
2771 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2772 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2773 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2774 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01002775 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002776 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2777 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2778 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2779 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2780 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2781 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2782 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01002783 save_access_regs(vcpu->run->s.regs.acrs);
2784 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002785 /* Save guest register state */
2786 save_fpu_regs();
2787 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
2788 /* Restore will be done lazily at return */
2789 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
2790 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
2791
David Hildenbrandb028ee32014-07-17 10:47:43 +02002792}
2793
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002794int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2795{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002796 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002797 sigset_t sigsaved;
2798
Paolo Bonzini460df4c2017-02-08 11:50:15 +01002799 if (kvm_run->immediate_exit)
2800 return -EINTR;
2801
David Hildenbrand27291e22014-01-23 12:26:52 +01002802 if (guestdbg_exit_pending(vcpu)) {
2803 kvm_s390_prepare_debug_exit(vcpu);
2804 return 0;
2805 }
2806
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002807 if (vcpu->sigset_active)
2808 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2809
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002810 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2811 kvm_s390_vcpu_start(vcpu);
2812 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002813 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002814 vcpu->vcpu_id);
2815 return -EINVAL;
2816 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002817
David Hildenbrandb028ee32014-07-17 10:47:43 +02002818 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002819 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002820
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002821 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002822 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002823
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002824 if (signal_pending(current) && !rc) {
2825 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002826 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002827 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002828
David Hildenbrand27291e22014-01-23 12:26:52 +01002829 if (guestdbg_exit_pending(vcpu) && !rc) {
2830 kvm_s390_prepare_debug_exit(vcpu);
2831 rc = 0;
2832 }
2833
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002834 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02002835 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002836 rc = 0;
2837 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002838
David Hildenbranddb0758b2016-02-15 09:42:25 +01002839 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002840 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002841
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002842 if (vcpu->sigset_active)
2843 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2844
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002845 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002846 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002847}
2848
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002849/*
2850 * store status at address
2851 * we use have two special cases:
2852 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2853 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2854 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002855int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002856{
Carsten Otte092670c2011-07-24 10:48:22 +02002857 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002858 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02002859 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01002860 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002861 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002862
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002863 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002864 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2865 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002866 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002867 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002868 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2869 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002870 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002871 gpa = px;
2872 } else
2873 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002874
2875 /* manually convert vector registers if necessary */
2876 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01002877 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002878 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2879 fprs, 128);
2880 } else {
2881 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002882 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002883 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002884 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002885 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002886 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002887 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002888 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02002889 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002890 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002891 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002892 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002893 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01002894 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002895 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01002896 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002897 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002898 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002899 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002900 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002901 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002902 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002903 &vcpu->arch.sie_block->gcr, 128);
2904 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002905}
2906
Thomas Huthe8798922013-11-06 15:46:33 +01002907int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2908{
2909 /*
2910 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01002911 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01002912 * it into the save area
2913 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002914 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002915 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01002916 save_access_regs(vcpu->run->s.regs.acrs);
2917
2918 return kvm_s390_store_status_unloaded(vcpu, addr);
2919}
2920
David Hildenbrand8ad35752014-03-14 11:00:21 +01002921static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2922{
2923 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002924 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002925}
2926
2927static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2928{
2929 unsigned int i;
2930 struct kvm_vcpu *vcpu;
2931
2932 kvm_for_each_vcpu(i, vcpu, kvm) {
2933 __disable_ibs_on_vcpu(vcpu);
2934 }
2935}
2936
2937static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2938{
David Hildenbrand09a400e2016-04-04 15:57:08 +02002939 if (!sclp.has_ibs)
2940 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002941 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002942 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002943}
2944
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002945void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2946{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002947 int i, online_vcpus, started_vcpus = 0;
2948
2949 if (!is_vcpu_stopped(vcpu))
2950 return;
2951
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002952 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002953 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002954 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002955 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2956
2957 for (i = 0; i < online_vcpus; i++) {
2958 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2959 started_vcpus++;
2960 }
2961
2962 if (started_vcpus == 0) {
2963 /* we're the only active VCPU -> speed it up */
2964 __enable_ibs_on_vcpu(vcpu);
2965 } else if (started_vcpus == 1) {
2966 /*
2967 * As we are starting a second VCPU, we have to disable
2968 * the IBS facility on all VCPUs to remove potentially
2969 * oustanding ENABLE requests.
2970 */
2971 __disable_ibs_on_all_vcpus(vcpu->kvm);
2972 }
2973
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002974 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002975 /*
2976 * Another VCPU might have used IBS while we were offline.
2977 * Let's play safe and flush the VCPU at startup.
2978 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002979 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002980 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002981 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002982}
2983
2984void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2985{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002986 int i, online_vcpus, started_vcpus = 0;
2987 struct kvm_vcpu *started_vcpu = NULL;
2988
2989 if (is_vcpu_stopped(vcpu))
2990 return;
2991
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002992 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002993 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002994 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002995 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2996
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002997 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002998 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002999
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003000 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003001 __disable_ibs_on_vcpu(vcpu);
3002
3003 for (i = 0; i < online_vcpus; i++) {
3004 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3005 started_vcpus++;
3006 started_vcpu = vcpu->kvm->vcpus[i];
3007 }
3008 }
3009
3010 if (started_vcpus == 1) {
3011 /*
3012 * As we only have one VCPU left, we want to enable the
3013 * IBS facility for that VCPU to speed it up.
3014 */
3015 __enable_ibs_on_vcpu(started_vcpu);
3016 }
3017
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003018 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003019 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003020}
3021
Cornelia Huckd6712df2012-12-20 15:32:11 +01003022static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3023 struct kvm_enable_cap *cap)
3024{
3025 int r;
3026
3027 if (cap->flags)
3028 return -EINVAL;
3029
3030 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003031 case KVM_CAP_S390_CSS_SUPPORT:
3032 if (!vcpu->kvm->arch.css_support) {
3033 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003034 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003035 trace_kvm_s390_enable_css(vcpu->kvm);
3036 }
3037 r = 0;
3038 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003039 default:
3040 r = -EINVAL;
3041 break;
3042 }
3043 return r;
3044}
3045
Thomas Huth41408c22015-02-06 15:01:21 +01003046static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3047 struct kvm_s390_mem_op *mop)
3048{
3049 void __user *uaddr = (void __user *)mop->buf;
3050 void *tmpbuf = NULL;
3051 int r, srcu_idx;
3052 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3053 | KVM_S390_MEMOP_F_CHECK_ONLY;
3054
3055 if (mop->flags & ~supported_flags)
3056 return -EINVAL;
3057
3058 if (mop->size > MEM_OP_MAX_SIZE)
3059 return -E2BIG;
3060
3061 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3062 tmpbuf = vmalloc(mop->size);
3063 if (!tmpbuf)
3064 return -ENOMEM;
3065 }
3066
3067 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3068
3069 switch (mop->op) {
3070 case KVM_S390_MEMOP_LOGICAL_READ:
3071 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003072 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3073 mop->size, GACC_FETCH);
Thomas Huth41408c22015-02-06 15:01:21 +01003074 break;
3075 }
3076 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3077 if (r == 0) {
3078 if (copy_to_user(uaddr, tmpbuf, mop->size))
3079 r = -EFAULT;
3080 }
3081 break;
3082 case KVM_S390_MEMOP_LOGICAL_WRITE:
3083 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003084 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3085 mop->size, GACC_STORE);
Thomas Huth41408c22015-02-06 15:01:21 +01003086 break;
3087 }
3088 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3089 r = -EFAULT;
3090 break;
3091 }
3092 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3093 break;
3094 default:
3095 r = -EINVAL;
3096 }
3097
3098 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3099
3100 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3101 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3102
3103 vfree(tmpbuf);
3104 return r;
3105}
3106
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003107long kvm_arch_vcpu_ioctl(struct file *filp,
3108 unsigned int ioctl, unsigned long arg)
3109{
3110 struct kvm_vcpu *vcpu = filp->private_data;
3111 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003112 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003113 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003114
Avi Kivity93736622010-05-13 12:35:17 +03003115 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003116 case KVM_S390_IRQ: {
3117 struct kvm_s390_irq s390irq;
3118
3119 r = -EFAULT;
3120 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3121 break;
3122 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3123 break;
3124 }
Avi Kivity93736622010-05-13 12:35:17 +03003125 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003126 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003127 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003128
Avi Kivity93736622010-05-13 12:35:17 +03003129 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003130 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03003131 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003132 if (s390int_to_s390irq(&s390int, &s390irq))
3133 return -EINVAL;
3134 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03003135 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003136 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003137 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003138 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003139 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003140 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003141 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003142 case KVM_S390_SET_INITIAL_PSW: {
3143 psw_t psw;
3144
Avi Kivitybc923cc2010-05-13 12:21:46 +03003145 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003146 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003147 break;
3148 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3149 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003150 }
3151 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003152 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3153 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003154 case KVM_SET_ONE_REG:
3155 case KVM_GET_ONE_REG: {
3156 struct kvm_one_reg reg;
3157 r = -EFAULT;
3158 if (copy_from_user(&reg, argp, sizeof(reg)))
3159 break;
3160 if (ioctl == KVM_SET_ONE_REG)
3161 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3162 else
3163 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3164 break;
3165 }
Carsten Otte27e03932012-01-04 10:25:21 +01003166#ifdef CONFIG_KVM_S390_UCONTROL
3167 case KVM_S390_UCAS_MAP: {
3168 struct kvm_s390_ucas_mapping ucasmap;
3169
3170 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3171 r = -EFAULT;
3172 break;
3173 }
3174
3175 if (!kvm_is_ucontrol(vcpu->kvm)) {
3176 r = -EINVAL;
3177 break;
3178 }
3179
3180 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3181 ucasmap.vcpu_addr, ucasmap.length);
3182 break;
3183 }
3184 case KVM_S390_UCAS_UNMAP: {
3185 struct kvm_s390_ucas_mapping ucasmap;
3186
3187 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3188 r = -EFAULT;
3189 break;
3190 }
3191
3192 if (!kvm_is_ucontrol(vcpu->kvm)) {
3193 r = -EINVAL;
3194 break;
3195 }
3196
3197 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3198 ucasmap.length);
3199 break;
3200 }
3201#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003202 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003203 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003204 break;
3205 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003206 case KVM_ENABLE_CAP:
3207 {
3208 struct kvm_enable_cap cap;
3209 r = -EFAULT;
3210 if (copy_from_user(&cap, argp, sizeof(cap)))
3211 break;
3212 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3213 break;
3214 }
Thomas Huth41408c22015-02-06 15:01:21 +01003215 case KVM_S390_MEM_OP: {
3216 struct kvm_s390_mem_op mem_op;
3217
3218 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3219 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3220 else
3221 r = -EFAULT;
3222 break;
3223 }
Jens Freimann816c7662014-11-24 17:13:46 +01003224 case KVM_S390_SET_IRQ_STATE: {
3225 struct kvm_s390_irq_state irq_state;
3226
3227 r = -EFAULT;
3228 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3229 break;
3230 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3231 irq_state.len == 0 ||
3232 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3233 r = -EINVAL;
3234 break;
3235 }
3236 r = kvm_s390_set_irq_state(vcpu,
3237 (void __user *) irq_state.buf,
3238 irq_state.len);
3239 break;
3240 }
3241 case KVM_S390_GET_IRQ_STATE: {
3242 struct kvm_s390_irq_state irq_state;
3243
3244 r = -EFAULT;
3245 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3246 break;
3247 if (irq_state.len == 0) {
3248 r = -EINVAL;
3249 break;
3250 }
3251 r = kvm_s390_get_irq_state(vcpu,
3252 (__u8 __user *) irq_state.buf,
3253 irq_state.len);
3254 break;
3255 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003256 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003257 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003258 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003259 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003260}
3261
Carsten Otte5b1c1492012-01-04 10:25:23 +01003262int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3263{
3264#ifdef CONFIG_KVM_S390_UCONTROL
3265 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3266 && (kvm_is_ucontrol(vcpu->kvm))) {
3267 vmf->page = virt_to_page(vcpu->arch.sie_block);
3268 get_page(vmf->page);
3269 return 0;
3270 }
3271#endif
3272 return VM_FAULT_SIGBUS;
3273}
3274
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303275int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3276 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003277{
3278 return 0;
3279}
3280
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003281/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003282int kvm_arch_prepare_memory_region(struct kvm *kvm,
3283 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003284 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003285 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003286{
Nick Wangdd2887e2013-03-25 17:22:57 +01003287 /* A few sanity checks. We can have memory slots which have to be
3288 located/ended at a segment boundary (1MB). The memory in userland is
3289 ok to be fragmented into various different vmas. It is okay to mmap()
3290 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003291
Carsten Otte598841c2011-07-24 10:48:21 +02003292 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003293 return -EINVAL;
3294
Carsten Otte598841c2011-07-24 10:48:21 +02003295 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003296 return -EINVAL;
3297
Dominik Dingela3a92c32014-12-01 17:24:42 +01003298 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3299 return -EINVAL;
3300
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003301 return 0;
3302}
3303
3304void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003305 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003306 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003307 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003308 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003309{
Carsten Ottef7850c92011-07-24 10:48:23 +02003310 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003311
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003312 /* If the basics of the memslot do not change, we do not want
3313 * to update the gmap. Every update causes several unnecessary
3314 * segment translation exceptions. This is usually handled just
3315 * fine by the normal fault handler + gmap, but it will also
3316 * cause faults on the prefix page of running guest CPUs.
3317 */
3318 if (old->userspace_addr == mem->userspace_addr &&
3319 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3320 old->npages * PAGE_SIZE == mem->memory_size)
3321 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003322
3323 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3324 mem->guest_phys_addr, mem->memory_size);
3325 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003326 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003327 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003328}
3329
Alexander Yarygin60a37702016-04-01 15:38:57 +03003330static inline unsigned long nonhyp_mask(int i)
3331{
3332 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3333
3334 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3335}
3336
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003337void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3338{
3339 vcpu->valid_wakeup = false;
3340}
3341
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003342static int __init kvm_s390_init(void)
3343{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003344 int i;
3345
David Hildenbrand07197fd2015-01-30 16:01:38 +01003346 if (!sclp.has_sief2) {
3347 pr_info("SIE not available\n");
3348 return -ENODEV;
3349 }
3350
Alexander Yarygin60a37702016-04-01 15:38:57 +03003351 for (i = 0; i < 16; i++)
3352 kvm_s390_fac_list_mask[i] |=
3353 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3354
Michael Mueller9d8d5782015-02-02 15:42:51 +01003355 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003356}
3357
3358static void __exit kvm_s390_exit(void)
3359{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003360 kvm_exit();
3361}
3362
3363module_init(kvm_s390_init);
3364module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003365
3366/*
3367 * Enable autoloading of the kvm module.
3368 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3369 * since x86 takes a different approach.
3370 */
3371#include <linux/miscdevice.h>
3372MODULE_ALIAS_MISCDEV(KVM_MINOR);
3373MODULE_ALIAS("devname:kvm");