blob: ce9813afd5028086e797c4e58a06c3e1b2e8bab2 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020026#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010027#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010028#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010029#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010030#include <linux/bitmap.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010031#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010032#include <asm/lowcore.h>
Fan Zhangfdf03652015-05-13 10:58:41 +020033#include <asm/etr.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010034#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010035#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010036#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010037#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020038#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020039#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020040#include <asm/cpacf.h>
41#include <asm/etr.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010042#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010043#include "gaccess.h"
44
David Hildenbrandea2cdd22015-05-20 13:24:02 +020045#define KMSG_COMPONENT "kvm-s390"
46#undef pr_fmt
47#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
48
Cornelia Huck5786fff2012-07-23 17:20:29 +020049#define CREATE_TRACE_POINTS
50#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020051#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020052
Thomas Huth41408c282015-02-06 15:01:21 +010053#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010054#define LOCAL_IRQS 32
55#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010057
Heiko Carstensb0c632d2008-03-25 18:47:20 +010058#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
59
60struct kvm_stats_debugfs_item debugfs_entries[] = {
61 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020062 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010063 { "exit_validity", VCPU_STAT(exit_validity) },
64 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
65 { "exit_external_request", VCPU_STAT(exit_external_request) },
66 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010067 { "exit_instruction", VCPU_STAT(exit_instruction) },
68 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
69 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020070 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010071 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020072 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020073 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020074 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020075 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010076 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010077 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
78 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010079 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020080 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
82 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
83 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
84 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
85 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
86 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
87 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020088 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010089 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
90 { "instruction_spx", VCPU_STAT(instruction_spx) },
91 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
92 { "instruction_stap", VCPU_STAT(instruction_stap) },
93 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010094 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010095 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
96 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020097 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010098 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
99 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200100 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200101 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200102 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100103 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100104 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200105 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100106 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200107 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
108 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100109 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200110 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
111 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500112 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100113 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
114 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
115 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200116 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
117 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
118 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100119 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100120 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200121 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200122 { "diagnose_258", VCPU_STAT(diagnose_258) },
123 { "diagnose_308", VCPU_STAT(diagnose_308) },
124 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100125 { NULL }
126};
127
Michael Mueller9d8d5782015-02-02 15:42:51 +0100128/* upper facilities limit for kvm */
Alexander Yarygin60a37702016-04-01 15:38:57 +0300129unsigned long kvm_s390_fac_list_mask[16] = {
130 0xffe6000000000000UL,
131 0x005e000000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100132};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100133
Michael Mueller9d8d5782015-02-02 15:42:51 +0100134unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200135{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100136 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
137 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200138}
139
David Hildenbrand15c97052015-03-19 17:36:43 +0100140/* available cpu features supported by kvm */
141static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200142/* available subfunctions indicated via query / "test bit" */
143static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100144
Michael Mueller9d8d5782015-02-02 15:42:51 +0100145static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200146static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200147debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100148
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100149/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200150int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100151{
152 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200153 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100154}
155
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100156static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
157 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200158
Fan Zhangfdf03652015-05-13 10:58:41 +0200159/*
160 * This callback is executed during stop_machine(). All CPUs are therefore
161 * temporarily stopped. In order not to change guest behavior, we have to
162 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
163 * so a CPU won't be stopped while calculating with the epoch.
164 */
165static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
166 void *v)
167{
168 struct kvm *kvm;
169 struct kvm_vcpu *vcpu;
170 int i;
171 unsigned long long *delta = v;
172
173 list_for_each_entry(kvm, &vm_list, vm_list) {
174 kvm->arch.epoch -= *delta;
175 kvm_for_each_vcpu(i, vcpu, kvm) {
176 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100177 if (vcpu->arch.cputm_enabled)
178 vcpu->arch.cputm_start += *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200179 }
180 }
181 return NOTIFY_OK;
182}
183
184static struct notifier_block kvm_clock_notifier = {
185 .notifier_call = kvm_clock_sync,
186};
187
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100188int kvm_arch_hardware_setup(void)
189{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200190 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100191 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200192 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
193 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200194 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
195 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100196 return 0;
197}
198
199void kvm_arch_hardware_unsetup(void)
200{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100201 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200202 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200203 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100205}
206
David Hildenbrand22be5a12016-01-21 13:22:54 +0100207static void allow_cpu_feat(unsigned long nr)
208{
209 set_bit_inv(nr, kvm_s390_available_cpu_feat);
210}
211
David Hildenbrand0a763c72016-05-18 16:03:47 +0200212static inline int plo_test_bit(unsigned char nr)
213{
214 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
215 int cc = 3; /* subfunction not available */
216
217 asm volatile(
218 /* Parameter registers are ignored for "test bit" */
219 " plo 0,0,0,0(0)\n"
220 " ipm %0\n"
221 " srl %0,28\n"
222 : "=d" (cc)
223 : "d" (r0)
224 : "cc");
225 return cc == 0;
226}
227
David Hildenbrand22be5a12016-01-21 13:22:54 +0100228static void kvm_s390_cpu_feat_init(void)
229{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200230 int i;
231
232 for (i = 0; i < 256; ++i) {
233 if (plo_test_bit(i))
234 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
235 }
236
237 if (test_facility(28)) /* TOD-clock steering */
238 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
239
240 if (test_facility(17)) { /* MSA */
241 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
242 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
243 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
244 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
245 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
246 }
247 if (test_facility(76)) /* MSA3 */
248 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
249 if (test_facility(77)) { /* MSA4 */
250 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
251 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
252 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
253 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
254 }
255 if (test_facility(57)) /* MSA5 */
256 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
257
David Hildenbrand22be5a12016-01-21 13:22:54 +0100258 if (MACHINE_HAS_ESOP)
259 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200260 /*
261 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
262 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
263 */
264 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
265 !test_facility(3))
266 return;
267 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100268 if (sclp.has_64bscao)
269 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100270 if (sclp.has_siif)
271 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100272 if (sclp.has_gpere)
273 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100274 if (sclp.has_gsls)
275 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand22be5a12016-01-21 13:22:54 +0100276}
277
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100278int kvm_arch_init(void *opaque)
279{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200280 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
281 if (!kvm_s390_dbf)
282 return -ENOMEM;
283
284 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
285 debug_unregister(kvm_s390_dbf);
286 return -ENOMEM;
287 }
288
David Hildenbrand22be5a12016-01-21 13:22:54 +0100289 kvm_s390_cpu_feat_init();
290
Cornelia Huck84877d92014-09-02 10:27:35 +0100291 /* Register floating interrupt controller interface. */
292 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100293}
294
Christian Borntraeger78f26132015-07-22 15:50:58 +0200295void kvm_arch_exit(void)
296{
297 debug_unregister(kvm_s390_dbf);
298}
299
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100300/* Section: device related */
301long kvm_arch_dev_ioctl(struct file *filp,
302 unsigned int ioctl, unsigned long arg)
303{
304 if (ioctl == KVM_S390_ENABLE_SIE)
305 return s390_enable_sie();
306 return -EINVAL;
307}
308
Alexander Graf784aa3d2014-07-14 18:27:35 +0200309int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100310{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100311 int r;
312
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200313 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100314 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200315 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100316 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100317#ifdef CONFIG_KVM_S390_UCONTROL
318 case KVM_CAP_S390_UCONTROL:
319#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200320 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100321 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200322 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100323 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100324 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100325 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200326 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200327 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200328 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200329 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200330 case KVM_CAP_MP_STATE:
Jens Freimann47b43c52014-11-11 20:57:06 +0100331 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200332 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100333 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400334 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100335 case KVM_CAP_S390_IRQ_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100336 r = 1;
337 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100338 case KVM_CAP_S390_MEM_OP:
339 r = MEM_OP_MAX_SIZE;
340 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200341 case KVM_CAP_NR_VCPUS:
342 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100343 r = KVM_S390_BSCA_CPU_SLOTS;
344 if (sclp.has_esca && sclp.has_64bscao)
345 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200346 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100347 case KVM_CAP_NR_MEMSLOTS:
348 r = KVM_USER_MEM_SLOTS;
349 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200350 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100351 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200352 break;
Eric Farman68c55752014-06-09 10:57:26 -0400353 case KVM_CAP_S390_VECTOR_REGISTERS:
354 r = MACHINE_HAS_VX;
355 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800356 case KVM_CAP_S390_RI:
357 r = test_facility(64);
358 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200359 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100360 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200361 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100362 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100363}
364
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400365static void kvm_s390_sync_dirty_log(struct kvm *kvm,
366 struct kvm_memory_slot *memslot)
367{
368 gfn_t cur_gfn, last_gfn;
369 unsigned long address;
370 struct gmap *gmap = kvm->arch.gmap;
371
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400372 /* Loop over all guest pages */
373 last_gfn = memslot->base_gfn + memslot->npages;
374 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
375 address = gfn_to_hva_memslot(memslot, cur_gfn);
376
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100377 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400378 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100379 if (fatal_signal_pending(current))
380 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100381 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400382 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400383}
384
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100385/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200386static void sca_del_vcpu(struct kvm_vcpu *vcpu);
387
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100388/*
389 * Get (and clear) the dirty memory log for a memory slot.
390 */
391int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
392 struct kvm_dirty_log *log)
393{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400394 int r;
395 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200396 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400397 struct kvm_memory_slot *memslot;
398 int is_dirty = 0;
399
400 mutex_lock(&kvm->slots_lock);
401
402 r = -EINVAL;
403 if (log->slot >= KVM_USER_MEM_SLOTS)
404 goto out;
405
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200406 slots = kvm_memslots(kvm);
407 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400408 r = -ENOENT;
409 if (!memslot->dirty_bitmap)
410 goto out;
411
412 kvm_s390_sync_dirty_log(kvm, memslot);
413 r = kvm_get_dirty_log(kvm, log, &is_dirty);
414 if (r)
415 goto out;
416
417 /* Clear the dirty log */
418 if (is_dirty) {
419 n = kvm_dirty_bitmap_bytes(memslot);
420 memset(memslot->dirty_bitmap, 0, n);
421 }
422 r = 0;
423out:
424 mutex_unlock(&kvm->slots_lock);
425 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100426}
427
Cornelia Huckd938dc52013-10-23 18:26:34 +0200428static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
429{
430 int r;
431
432 if (cap->flags)
433 return -EINVAL;
434
435 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200436 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200437 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200438 kvm->arch.use_irqchip = 1;
439 r = 0;
440 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200441 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200442 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200443 kvm->arch.user_sigp = 1;
444 r = 0;
445 break;
Eric Farman68c55752014-06-09 10:57:26 -0400446 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100447 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200448 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100449 r = -EBUSY;
450 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100451 set_kvm_facility(kvm->arch.model.fac_mask, 129);
452 set_kvm_facility(kvm->arch.model.fac_list, 129);
Michael Mueller18280d82015-03-16 16:05:41 +0100453 r = 0;
454 } else
455 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100456 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200457 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
458 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400459 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800460 case KVM_CAP_S390_RI:
461 r = -EINVAL;
462 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200463 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800464 r = -EBUSY;
465 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100466 set_kvm_facility(kvm->arch.model.fac_mask, 64);
467 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800468 r = 0;
469 }
470 mutex_unlock(&kvm->lock);
471 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
472 r ? "(not available)" : "(success)");
473 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100474 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200475 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100476 kvm->arch.user_stsi = 1;
477 r = 0;
478 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200479 default:
480 r = -EINVAL;
481 break;
482 }
483 return r;
484}
485
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100486static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
487{
488 int ret;
489
490 switch (attr->attr) {
491 case KVM_S390_VM_MEM_LIMIT_SIZE:
492 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200493 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100494 kvm->arch.mem_limit);
495 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100496 ret = -EFAULT;
497 break;
498 default:
499 ret = -ENXIO;
500 break;
501 }
502 return ret;
503}
504
505static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200506{
507 int ret;
508 unsigned int idx;
509 switch (attr->attr) {
510 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100511 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100512 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200513 break;
514
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200515 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200516 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200517 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200518 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200519 kvm->arch.use_cmma = 1;
520 ret = 0;
521 }
522 mutex_unlock(&kvm->lock);
523 break;
524 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100525 ret = -ENXIO;
526 if (!sclp.has_cmma)
527 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200528 ret = -EINVAL;
529 if (!kvm->arch.use_cmma)
530 break;
531
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200532 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200533 mutex_lock(&kvm->lock);
534 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200535 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200536 srcu_read_unlock(&kvm->srcu, idx);
537 mutex_unlock(&kvm->lock);
538 ret = 0;
539 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100540 case KVM_S390_VM_MEM_LIMIT_SIZE: {
541 unsigned long new_limit;
542
543 if (kvm_is_ucontrol(kvm))
544 return -EINVAL;
545
546 if (get_user(new_limit, (u64 __user *)attr->addr))
547 return -EFAULT;
548
Dominik Dingela3a92c32014-12-01 17:24:42 +0100549 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
550 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100551 return -E2BIG;
552
Dominik Dingela3a92c32014-12-01 17:24:42 +0100553 if (!new_limit)
554 return -EINVAL;
555
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100556 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100557 if (new_limit != KVM_S390_NO_MEM_LIMIT)
558 new_limit -= 1;
559
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100560 ret = -EBUSY;
561 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200562 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100563 /* gmap_create will round the limit up */
564 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100565
566 if (!new) {
567 ret = -ENOMEM;
568 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100569 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100570 new->private = kvm;
571 kvm->arch.gmap = new;
572 ret = 0;
573 }
574 }
575 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100576 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
577 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
578 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100579 break;
580 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200581 default:
582 ret = -ENXIO;
583 break;
584 }
585 return ret;
586}
587
Tony Krowiaka374e892014-09-03 10:13:53 +0200588static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
589
590static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
591{
592 struct kvm_vcpu *vcpu;
593 int i;
594
Michael Mueller9d8d5782015-02-02 15:42:51 +0100595 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200596 return -EINVAL;
597
598 mutex_lock(&kvm->lock);
599 switch (attr->attr) {
600 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
601 get_random_bytes(
602 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
603 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
604 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200605 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200606 break;
607 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
608 get_random_bytes(
609 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
610 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
611 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200612 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200613 break;
614 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
615 kvm->arch.crypto.aes_kw = 0;
616 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
617 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200618 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200619 break;
620 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
621 kvm->arch.crypto.dea_kw = 0;
622 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
623 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200624 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200625 break;
626 default:
627 mutex_unlock(&kvm->lock);
628 return -ENXIO;
629 }
630
631 kvm_for_each_vcpu(i, vcpu, kvm) {
632 kvm_s390_vcpu_crypto_setup(vcpu);
633 exit_sie(vcpu);
634 }
635 mutex_unlock(&kvm->lock);
636 return 0;
637}
638
Jason J. Herne72f25022014-11-25 09:46:02 -0500639static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
640{
641 u8 gtod_high;
642
643 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
644 sizeof(gtod_high)))
645 return -EFAULT;
646
647 if (gtod_high != 0)
648 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200649 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500650
651 return 0;
652}
653
654static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
655{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200656 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500657
658 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
659 return -EFAULT;
660
David Hildenbrand25ed1672015-05-12 09:49:14 +0200661 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200662 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500663 return 0;
664}
665
666static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
667{
668 int ret;
669
670 if (attr->flags)
671 return -EINVAL;
672
673 switch (attr->attr) {
674 case KVM_S390_VM_TOD_HIGH:
675 ret = kvm_s390_set_tod_high(kvm, attr);
676 break;
677 case KVM_S390_VM_TOD_LOW:
678 ret = kvm_s390_set_tod_low(kvm, attr);
679 break;
680 default:
681 ret = -ENXIO;
682 break;
683 }
684 return ret;
685}
686
687static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
688{
689 u8 gtod_high = 0;
690
691 if (copy_to_user((void __user *)attr->addr, &gtod_high,
692 sizeof(gtod_high)))
693 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200694 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500695
696 return 0;
697}
698
699static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
700{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200701 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500702
David Hildenbrand60417fc2015-09-29 16:20:36 +0200703 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500704 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
705 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200706 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500707
708 return 0;
709}
710
711static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
712{
713 int ret;
714
715 if (attr->flags)
716 return -EINVAL;
717
718 switch (attr->attr) {
719 case KVM_S390_VM_TOD_HIGH:
720 ret = kvm_s390_get_tod_high(kvm, attr);
721 break;
722 case KVM_S390_VM_TOD_LOW:
723 ret = kvm_s390_get_tod_low(kvm, attr);
724 break;
725 default:
726 ret = -ENXIO;
727 break;
728 }
729 return ret;
730}
731
Michael Mueller658b6ed2015-02-02 15:49:35 +0100732static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
733{
734 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200735 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100736 int ret = 0;
737
738 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200739 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +0100740 ret = -EBUSY;
741 goto out;
742 }
743 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
744 if (!proc) {
745 ret = -ENOMEM;
746 goto out;
747 }
748 if (!copy_from_user(proc, (void __user *)attr->addr,
749 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200750 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200751 lowest_ibc = sclp.ibc >> 16 & 0xfff;
752 unblocked_ibc = sclp.ibc & 0xfff;
753 if (lowest_ibc) {
754 if (proc->ibc > unblocked_ibc)
755 kvm->arch.model.ibc = unblocked_ibc;
756 else if (proc->ibc < lowest_ibc)
757 kvm->arch.model.ibc = lowest_ibc;
758 else
759 kvm->arch.model.ibc = proc->ibc;
760 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100761 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100762 S390_ARCH_FAC_LIST_SIZE_BYTE);
763 } else
764 ret = -EFAULT;
765 kfree(proc);
766out:
767 mutex_unlock(&kvm->lock);
768 return ret;
769}
770
David Hildenbrand15c97052015-03-19 17:36:43 +0100771static int kvm_s390_set_processor_feat(struct kvm *kvm,
772 struct kvm_device_attr *attr)
773{
774 struct kvm_s390_vm_cpu_feat data;
775 int ret = -EBUSY;
776
777 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
778 return -EFAULT;
779 if (!bitmap_subset((unsigned long *) data.feat,
780 kvm_s390_available_cpu_feat,
781 KVM_S390_VM_CPU_FEAT_NR_BITS))
782 return -EINVAL;
783
784 mutex_lock(&kvm->lock);
785 if (!atomic_read(&kvm->online_vcpus)) {
786 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
787 KVM_S390_VM_CPU_FEAT_NR_BITS);
788 ret = 0;
789 }
790 mutex_unlock(&kvm->lock);
791 return ret;
792}
793
David Hildenbrand0a763c72016-05-18 16:03:47 +0200794static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
795 struct kvm_device_attr *attr)
796{
797 /*
798 * Once supported by kernel + hw, we have to store the subfunctions
799 * in kvm->arch and remember that user space configured them.
800 */
801 return -ENXIO;
802}
803
Michael Mueller658b6ed2015-02-02 15:49:35 +0100804static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
805{
806 int ret = -ENXIO;
807
808 switch (attr->attr) {
809 case KVM_S390_VM_CPU_PROCESSOR:
810 ret = kvm_s390_set_processor(kvm, attr);
811 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100812 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
813 ret = kvm_s390_set_processor_feat(kvm, attr);
814 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200815 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
816 ret = kvm_s390_set_processor_subfunc(kvm, attr);
817 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100818 }
819 return ret;
820}
821
822static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
823{
824 struct kvm_s390_vm_cpu_processor *proc;
825 int ret = 0;
826
827 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
828 if (!proc) {
829 ret = -ENOMEM;
830 goto out;
831 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200832 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100833 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100834 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
835 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100836 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
837 ret = -EFAULT;
838 kfree(proc);
839out:
840 return ret;
841}
842
843static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
844{
845 struct kvm_s390_vm_cpu_machine *mach;
846 int ret = 0;
847
848 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
849 if (!mach) {
850 ret = -ENOMEM;
851 goto out;
852 }
853 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200854 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100855 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +0100856 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100857 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100858 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100859 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
860 ret = -EFAULT;
861 kfree(mach);
862out:
863 return ret;
864}
865
David Hildenbrand15c97052015-03-19 17:36:43 +0100866static int kvm_s390_get_processor_feat(struct kvm *kvm,
867 struct kvm_device_attr *attr)
868{
869 struct kvm_s390_vm_cpu_feat data;
870
871 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
872 KVM_S390_VM_CPU_FEAT_NR_BITS);
873 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
874 return -EFAULT;
875 return 0;
876}
877
878static int kvm_s390_get_machine_feat(struct kvm *kvm,
879 struct kvm_device_attr *attr)
880{
881 struct kvm_s390_vm_cpu_feat data;
882
883 bitmap_copy((unsigned long *) data.feat,
884 kvm_s390_available_cpu_feat,
885 KVM_S390_VM_CPU_FEAT_NR_BITS);
886 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
887 return -EFAULT;
888 return 0;
889}
890
David Hildenbrand0a763c72016-05-18 16:03:47 +0200891static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
892 struct kvm_device_attr *attr)
893{
894 /*
895 * Once we can actually configure subfunctions (kernel + hw support),
896 * we have to check if they were already set by user space, if so copy
897 * them from kvm->arch.
898 */
899 return -ENXIO;
900}
901
902static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
903 struct kvm_device_attr *attr)
904{
905 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
906 sizeof(struct kvm_s390_vm_cpu_subfunc)))
907 return -EFAULT;
908 return 0;
909}
Michael Mueller658b6ed2015-02-02 15:49:35 +0100910static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
911{
912 int ret = -ENXIO;
913
914 switch (attr->attr) {
915 case KVM_S390_VM_CPU_PROCESSOR:
916 ret = kvm_s390_get_processor(kvm, attr);
917 break;
918 case KVM_S390_VM_CPU_MACHINE:
919 ret = kvm_s390_get_machine(kvm, attr);
920 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100921 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
922 ret = kvm_s390_get_processor_feat(kvm, attr);
923 break;
924 case KVM_S390_VM_CPU_MACHINE_FEAT:
925 ret = kvm_s390_get_machine_feat(kvm, attr);
926 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200927 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
928 ret = kvm_s390_get_processor_subfunc(kvm, attr);
929 break;
930 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
931 ret = kvm_s390_get_machine_subfunc(kvm, attr);
932 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100933 }
934 return ret;
935}
936
Dominik Dingelf2061652014-04-09 13:13:00 +0200937static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
938{
939 int ret;
940
941 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200942 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100943 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200944 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500945 case KVM_S390_VM_TOD:
946 ret = kvm_s390_set_tod(kvm, attr);
947 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100948 case KVM_S390_VM_CPU_MODEL:
949 ret = kvm_s390_set_cpu_model(kvm, attr);
950 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200951 case KVM_S390_VM_CRYPTO:
952 ret = kvm_s390_vm_set_crypto(kvm, attr);
953 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200954 default:
955 ret = -ENXIO;
956 break;
957 }
958
959 return ret;
960}
961
962static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
963{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100964 int ret;
965
966 switch (attr->group) {
967 case KVM_S390_VM_MEM_CTRL:
968 ret = kvm_s390_get_mem_control(kvm, attr);
969 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500970 case KVM_S390_VM_TOD:
971 ret = kvm_s390_get_tod(kvm, attr);
972 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100973 case KVM_S390_VM_CPU_MODEL:
974 ret = kvm_s390_get_cpu_model(kvm, attr);
975 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100976 default:
977 ret = -ENXIO;
978 break;
979 }
980
981 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200982}
983
984static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
985{
986 int ret;
987
988 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200989 case KVM_S390_VM_MEM_CTRL:
990 switch (attr->attr) {
991 case KVM_S390_VM_MEM_ENABLE_CMMA:
992 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100993 ret = sclp.has_cmma ? 0 : -ENXIO;
994 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100995 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200996 ret = 0;
997 break;
998 default:
999 ret = -ENXIO;
1000 break;
1001 }
1002 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001003 case KVM_S390_VM_TOD:
1004 switch (attr->attr) {
1005 case KVM_S390_VM_TOD_LOW:
1006 case KVM_S390_VM_TOD_HIGH:
1007 ret = 0;
1008 break;
1009 default:
1010 ret = -ENXIO;
1011 break;
1012 }
1013 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001014 case KVM_S390_VM_CPU_MODEL:
1015 switch (attr->attr) {
1016 case KVM_S390_VM_CPU_PROCESSOR:
1017 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001018 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1019 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001020 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001021 ret = 0;
1022 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001023 /* configuring subfunctions is not supported yet */
1024 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001025 default:
1026 ret = -ENXIO;
1027 break;
1028 }
1029 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001030 case KVM_S390_VM_CRYPTO:
1031 switch (attr->attr) {
1032 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1033 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1034 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1035 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1036 ret = 0;
1037 break;
1038 default:
1039 ret = -ENXIO;
1040 break;
1041 }
1042 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001043 default:
1044 ret = -ENXIO;
1045 break;
1046 }
1047
1048 return ret;
1049}
1050
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001051static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1052{
1053 uint8_t *keys;
1054 uint64_t hva;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001055 int i, r = 0;
1056
1057 if (args->flags != 0)
1058 return -EINVAL;
1059
1060 /* Is this guest using storage keys? */
1061 if (!mm_use_skey(current->mm))
1062 return KVM_S390_GET_SKEYS_NONE;
1063
1064 /* Enforce sane limit on memory allocation */
1065 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1066 return -EINVAL;
1067
1068 keys = kmalloc_array(args->count, sizeof(uint8_t),
1069 GFP_KERNEL | __GFP_NOWARN);
1070 if (!keys)
1071 keys = vmalloc(sizeof(uint8_t) * args->count);
1072 if (!keys)
1073 return -ENOMEM;
1074
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001075 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001076 for (i = 0; i < args->count; i++) {
1077 hva = gfn_to_hva(kvm, args->start_gfn + i);
1078 if (kvm_is_error_hva(hva)) {
1079 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001080 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001081 }
1082
David Hildenbrand154c8c12016-05-09 11:22:34 +02001083 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1084 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001085 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001086 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001087 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001088
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001089 if (!r) {
1090 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1091 sizeof(uint8_t) * args->count);
1092 if (r)
1093 r = -EFAULT;
1094 }
1095
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001096 kvfree(keys);
1097 return r;
1098}
1099
1100static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1101{
1102 uint8_t *keys;
1103 uint64_t hva;
1104 int i, r = 0;
1105
1106 if (args->flags != 0)
1107 return -EINVAL;
1108
1109 /* Enforce sane limit on memory allocation */
1110 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1111 return -EINVAL;
1112
1113 keys = kmalloc_array(args->count, sizeof(uint8_t),
1114 GFP_KERNEL | __GFP_NOWARN);
1115 if (!keys)
1116 keys = vmalloc(sizeof(uint8_t) * args->count);
1117 if (!keys)
1118 return -ENOMEM;
1119
1120 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1121 sizeof(uint8_t) * args->count);
1122 if (r) {
1123 r = -EFAULT;
1124 goto out;
1125 }
1126
1127 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001128 r = s390_enable_skey();
1129 if (r)
1130 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001131
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001132 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001133 for (i = 0; i < args->count; i++) {
1134 hva = gfn_to_hva(kvm, args->start_gfn + i);
1135 if (kvm_is_error_hva(hva)) {
1136 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001137 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001138 }
1139
1140 /* Lowest order bit is reserved */
1141 if (keys[i] & 0x01) {
1142 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001143 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001144 }
1145
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001146 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001147 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001148 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001149 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001150 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001151out:
1152 kvfree(keys);
1153 return r;
1154}
1155
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001156long kvm_arch_vm_ioctl(struct file *filp,
1157 unsigned int ioctl, unsigned long arg)
1158{
1159 struct kvm *kvm = filp->private_data;
1160 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001161 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001162 int r;
1163
1164 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001165 case KVM_S390_INTERRUPT: {
1166 struct kvm_s390_interrupt s390int;
1167
1168 r = -EFAULT;
1169 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1170 break;
1171 r = kvm_s390_inject_vm(kvm, &s390int);
1172 break;
1173 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001174 case KVM_ENABLE_CAP: {
1175 struct kvm_enable_cap cap;
1176 r = -EFAULT;
1177 if (copy_from_user(&cap, argp, sizeof(cap)))
1178 break;
1179 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1180 break;
1181 }
Cornelia Huck84223592013-07-15 13:36:01 +02001182 case KVM_CREATE_IRQCHIP: {
1183 struct kvm_irq_routing_entry routing;
1184
1185 r = -EINVAL;
1186 if (kvm->arch.use_irqchip) {
1187 /* Set up dummy routing. */
1188 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001189 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001190 }
1191 break;
1192 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001193 case KVM_SET_DEVICE_ATTR: {
1194 r = -EFAULT;
1195 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1196 break;
1197 r = kvm_s390_vm_set_attr(kvm, &attr);
1198 break;
1199 }
1200 case KVM_GET_DEVICE_ATTR: {
1201 r = -EFAULT;
1202 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1203 break;
1204 r = kvm_s390_vm_get_attr(kvm, &attr);
1205 break;
1206 }
1207 case KVM_HAS_DEVICE_ATTR: {
1208 r = -EFAULT;
1209 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1210 break;
1211 r = kvm_s390_vm_has_attr(kvm, &attr);
1212 break;
1213 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001214 case KVM_S390_GET_SKEYS: {
1215 struct kvm_s390_skeys args;
1216
1217 r = -EFAULT;
1218 if (copy_from_user(&args, argp,
1219 sizeof(struct kvm_s390_skeys)))
1220 break;
1221 r = kvm_s390_get_skeys(kvm, &args);
1222 break;
1223 }
1224 case KVM_S390_SET_SKEYS: {
1225 struct kvm_s390_skeys args;
1226
1227 r = -EFAULT;
1228 if (copy_from_user(&args, argp,
1229 sizeof(struct kvm_s390_skeys)))
1230 break;
1231 r = kvm_s390_set_skeys(kvm, &args);
1232 break;
1233 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001234 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001235 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001236 }
1237
1238 return r;
1239}
1240
Tony Krowiak45c9b472015-01-13 11:33:26 -05001241static int kvm_s390_query_ap_config(u8 *config)
1242{
1243 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001244 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001245
Christian Borntraeger86044c82015-02-26 13:53:47 +01001246 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001247 asm volatile(
1248 "lgr 0,%1\n"
1249 "lgr 2,%2\n"
1250 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001251 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001252 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001253 "1:\n"
1254 EX_TABLE(0b, 1b)
1255 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001256 : "r" (fcn_code), "r" (config)
1257 : "cc", "0", "2", "memory"
1258 );
1259
1260 return cc;
1261}
1262
1263static int kvm_s390_apxa_installed(void)
1264{
1265 u8 config[128];
1266 int cc;
1267
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001268 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001269 cc = kvm_s390_query_ap_config(config);
1270
1271 if (cc)
1272 pr_err("PQAP(QCI) failed with cc=%d", cc);
1273 else
1274 return config[0] & 0x40;
1275 }
1276
1277 return 0;
1278}
1279
1280static void kvm_s390_set_crycb_format(struct kvm *kvm)
1281{
1282 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1283
1284 if (kvm_s390_apxa_installed())
1285 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1286 else
1287 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1288}
1289
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001290static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001291{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001292 struct cpuid cpuid;
1293
1294 get_cpu_id(&cpuid);
1295 cpuid.version = 0xff;
1296 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001297}
1298
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001299static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001300{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001301 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001302 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001303
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001304 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001305 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001306
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001307 /* Enable AES/DEA protected key functions by default */
1308 kvm->arch.crypto.aes_kw = 1;
1309 kvm->arch.crypto.dea_kw = 1;
1310 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1311 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1312 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1313 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001314}
1315
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001316static void sca_dispose(struct kvm *kvm)
1317{
1318 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001319 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001320 else
1321 free_page((unsigned long)(kvm->arch.sca));
1322 kvm->arch.sca = NULL;
1323}
1324
Carsten Ottee08b9632012-01-04 10:25:20 +01001325int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001326{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001327 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001328 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001329 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001330 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001331
Carsten Ottee08b9632012-01-04 10:25:20 +01001332 rc = -EINVAL;
1333#ifdef CONFIG_KVM_S390_UCONTROL
1334 if (type & ~KVM_VM_S390_UCONTROL)
1335 goto out_err;
1336 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1337 goto out_err;
1338#else
1339 if (type)
1340 goto out_err;
1341#endif
1342
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001343 rc = s390_enable_sie();
1344 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001345 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001346
Carsten Otteb2904112011-10-18 12:27:13 +02001347 rc = -ENOMEM;
1348
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001349 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1350
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001351 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001352 if (!sclp.has_64bscao)
1353 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001354 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001355 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001356 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001357 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001358 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001359 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001360 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001361 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001362 kvm->arch.sca = (struct bsca_block *)
1363 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001364 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001365
1366 sprintf(debug_name, "kvm-%u", current->pid);
1367
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001368 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001369 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001370 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001371
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001372 kvm->arch.sie_page2 =
1373 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1374 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001375 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001376
Michael Muellerfb5bf932015-02-27 14:25:10 +01001377 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001378 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001379 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001380 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1381 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001382 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001383 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001384 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001385 }
1386
Michael Mueller981467c2015-02-24 13:51:04 +01001387 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001388 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1389 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001390 S390_ARCH_FAC_LIST_SIZE_BYTE);
1391
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001392 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1393 set_kvm_facility(kvm->arch.model.fac_list, 74);
1394
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001395 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001396 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001397
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001398 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001399
Carsten Otteba5c1e92008-03-25 18:47:26 +01001400 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001401 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1402 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001403 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001404 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001405
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001406 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001407 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001408
Carsten Ottee08b9632012-01-04 10:25:20 +01001409 if (type & KVM_VM_S390_UCONTROL) {
1410 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001411 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001412 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001413 if (sclp.hamax == U64_MAX)
1414 kvm->arch.mem_limit = TASK_MAX_SIZE;
1415 else
1416 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1417 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001418 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001419 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001420 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001421 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001422 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001423 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001424
1425 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001426 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001427 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001428
David Hildenbrand8ad35752014-03-14 11:00:21 +01001429 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001430 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001431 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001432
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001433 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001434out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001435 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001436 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001437 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001438 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001439 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001440}
1441
Christian Borntraegerd329c032008-11-26 14:50:27 +01001442void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1443{
1444 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001445 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001446 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001447 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001448 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001449 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001450
1451 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001452 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01001453
Dominik Dingele6db1d62015-05-07 15:41:57 +02001454 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001455 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001456 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001457
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001458 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001459 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001460}
1461
1462static void kvm_free_vcpus(struct kvm *kvm)
1463{
1464 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001465 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001466
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001467 kvm_for_each_vcpu(i, vcpu, kvm)
1468 kvm_arch_vcpu_destroy(vcpu);
1469
1470 mutex_lock(&kvm->lock);
1471 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1472 kvm->vcpus[i] = NULL;
1473
1474 atomic_set(&kvm->online_vcpus, 0);
1475 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001476}
1477
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001478void kvm_arch_destroy_vm(struct kvm *kvm)
1479{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001480 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001481 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001482 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001483 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001484 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001485 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001486 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001487 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001488 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001489 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001490}
1491
1492/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001493static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1494{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001495 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001496 if (!vcpu->arch.gmap)
1497 return -ENOMEM;
1498 vcpu->arch.gmap->private = vcpu->kvm;
1499
1500 return 0;
1501}
1502
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001503static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1504{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001505 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001506 if (vcpu->kvm->arch.use_esca) {
1507 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001508
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001509 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001510 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001511 } else {
1512 struct bsca_block *sca = vcpu->kvm->arch.sca;
1513
1514 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001515 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001516 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001517 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001518}
1519
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001520static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001521{
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001522 read_lock(&vcpu->kvm->arch.sca_lock);
1523 if (vcpu->kvm->arch.use_esca) {
1524 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001525
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001526 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001527 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1528 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand25508822015-10-12 16:27:23 +02001529 vcpu->arch.sie_block->ecb2 |= 0x04U;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001530 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001531 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001532 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001533
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001534 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001535 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1536 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001537 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001538 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001539 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001540}
1541
1542/* Basic SCA to Extended SCA data copy routines */
1543static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1544{
1545 d->sda = s->sda;
1546 d->sigp_ctrl.c = s->sigp_ctrl.c;
1547 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1548}
1549
1550static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1551{
1552 int i;
1553
1554 d->ipte_control = s->ipte_control;
1555 d->mcn[0] = s->mcn;
1556 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1557 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1558}
1559
1560static int sca_switch_to_extended(struct kvm *kvm)
1561{
1562 struct bsca_block *old_sca = kvm->arch.sca;
1563 struct esca_block *new_sca;
1564 struct kvm_vcpu *vcpu;
1565 unsigned int vcpu_idx;
1566 u32 scaol, scaoh;
1567
1568 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1569 if (!new_sca)
1570 return -ENOMEM;
1571
1572 scaoh = (u32)((u64)(new_sca) >> 32);
1573 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1574
1575 kvm_s390_vcpu_block_all(kvm);
1576 write_lock(&kvm->arch.sca_lock);
1577
1578 sca_copy_b_to_e(new_sca, old_sca);
1579
1580 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1581 vcpu->arch.sie_block->scaoh = scaoh;
1582 vcpu->arch.sie_block->scaol = scaol;
1583 vcpu->arch.sie_block->ecb2 |= 0x04U;
1584 }
1585 kvm->arch.sca = new_sca;
1586 kvm->arch.use_esca = 1;
1587
1588 write_unlock(&kvm->arch.sca_lock);
1589 kvm_s390_vcpu_unblock_all(kvm);
1590
1591 free_page((unsigned long)old_sca);
1592
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001593 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1594 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001595 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001596}
1597
1598static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1599{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001600 int rc;
1601
1602 if (id < KVM_S390_BSCA_CPU_SLOTS)
1603 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001604 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001605 return false;
1606
1607 mutex_lock(&kvm->lock);
1608 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1609 mutex_unlock(&kvm->lock);
1610
1611 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001612}
1613
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001614int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1615{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001616 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1617 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001618 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1619 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001620 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001621 KVM_SYNC_CRS |
1622 KVM_SYNC_ARCH0 |
1623 KVM_SYNC_PFAULT;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001624 if (test_kvm_facility(vcpu->kvm, 64))
1625 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01001626 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1627 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1628 */
1629 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04001630 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001631 else
1632 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001633
1634 if (kvm_is_ucontrol(vcpu->kvm))
1635 return __kvm_ucontrol_vcpu_init(vcpu);
1636
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001637 return 0;
1638}
1639
David Hildenbranddb0758b2016-02-15 09:42:25 +01001640/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1641static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1642{
1643 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001644 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001645 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01001646 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001647}
1648
1649/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1650static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1651{
1652 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001653 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001654 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1655 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001656 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001657}
1658
1659/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1660static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1661{
1662 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1663 vcpu->arch.cputm_enabled = true;
1664 __start_cpu_timer_accounting(vcpu);
1665}
1666
1667/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1668static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1669{
1670 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1671 __stop_cpu_timer_accounting(vcpu);
1672 vcpu->arch.cputm_enabled = false;
1673}
1674
1675static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1676{
1677 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1678 __enable_cpu_timer_accounting(vcpu);
1679 preempt_enable();
1680}
1681
1682static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1683{
1684 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1685 __disable_cpu_timer_accounting(vcpu);
1686 preempt_enable();
1687}
1688
David Hildenbrand4287f242016-02-15 09:40:12 +01001689/* set the cpu timer - may only be called from the VCPU thread itself */
1690void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1691{
David Hildenbranddb0758b2016-02-15 09:42:25 +01001692 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01001693 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001694 if (vcpu->arch.cputm_enabled)
1695 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01001696 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001697 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001698 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01001699}
1700
David Hildenbranddb0758b2016-02-15 09:42:25 +01001701/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01001702__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1703{
David Hildenbrand9c23a132016-02-17 21:53:33 +01001704 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001705 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001706
1707 if (unlikely(!vcpu->arch.cputm_enabled))
1708 return vcpu->arch.sie_block->cputm;
1709
David Hildenbrand9c23a132016-02-17 21:53:33 +01001710 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1711 do {
1712 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1713 /*
1714 * If the writer would ever execute a read in the critical
1715 * section, e.g. in irq context, we have a deadlock.
1716 */
1717 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1718 value = vcpu->arch.sie_block->cputm;
1719 /* if cputm_start is 0, accounting is being started/stopped */
1720 if (likely(vcpu->arch.cputm_start))
1721 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1722 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1723 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01001724 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01001725}
1726
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001727void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1728{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001729 /* Save host register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001730 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001731 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1732 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001733
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001734 if (MACHINE_HAS_VX)
1735 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1736 else
1737 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001738 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001739 if (test_fp_ctl(current->thread.fpu.fpc))
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001740 /* User space provided an invalid FPC, let's clear it */
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001741 current->thread.fpu.fpc = 0;
1742
1743 save_access_regs(vcpu->arch.host_acrs);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001744 restore_access_regs(vcpu->run->s.regs.acrs);
David Hildenbrand37d9df92015-03-11 16:47:33 +01001745 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001746 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01001747 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001748 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01001749 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001750}
1751
1752void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1753{
David Hildenbrand01a745a2016-02-12 20:41:56 +01001754 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01001755 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001756 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001757 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01001758 vcpu->arch.enabled_gmap = gmap_get_enabled();
1759 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001760
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001761 /* Save guest register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001762 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001763 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001764
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001765 /* Restore host register state */
1766 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1767 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001768
1769 save_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001770 restore_access_regs(vcpu->arch.host_acrs);
1771}
1772
1773static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1774{
1775 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1776 vcpu->arch.sie_block->gpsw.mask = 0UL;
1777 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001778 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01001779 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001780 vcpu->arch.sie_block->ckc = 0UL;
1781 vcpu->arch.sie_block->todpr = 0;
1782 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1783 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1784 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001785 /* make sure the new fpc will be lazily loaded */
1786 save_fpu_regs();
1787 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001788 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001789 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001790 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1791 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001792 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1793 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001794 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001795}
1796
Dominik Dingel31928aa2014-12-04 15:47:07 +01001797void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001798{
Jason J. Herne72f25022014-11-25 09:46:02 -05001799 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001800 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001801 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001802 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001803 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02001804 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01001805 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001806 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02001807 }
David Hildenbrand37d9df92015-03-11 16:47:33 +01001808 /* make vcpu_load load the right gmap on the first trigger */
1809 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001810}
1811
Tony Krowiak5102ee82014-06-27 14:46:01 -04001812static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1813{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001814 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001815 return;
1816
Tony Krowiaka374e892014-09-03 10:13:53 +02001817 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1818
1819 if (vcpu->kvm->arch.crypto.aes_kw)
1820 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1821 if (vcpu->kvm->arch.crypto.dea_kw)
1822 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1823
Tony Krowiak5102ee82014-06-27 14:46:01 -04001824 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1825}
1826
Dominik Dingelb31605c2014-03-25 13:47:11 +01001827void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1828{
1829 free_page(vcpu->arch.sie_block->cbrlo);
1830 vcpu->arch.sie_block->cbrlo = 0;
1831}
1832
1833int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1834{
1835 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1836 if (!vcpu->arch.sie_block->cbrlo)
1837 return -ENOMEM;
1838
1839 vcpu->arch.sie_block->ecb2 |= 0x80;
1840 vcpu->arch.sie_block->ecb2 &= ~0x08;
1841 return 0;
1842}
1843
Michael Mueller91520f12015-02-27 14:32:11 +01001844static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1845{
1846 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1847
Michael Mueller91520f12015-02-27 14:32:11 +01001848 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01001849 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001850 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01001851}
1852
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001853int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1854{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001855 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001856
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001857 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1858 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001859 CPUSTAT_STOPPED);
1860
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001861 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001862 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001863 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001864 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001865
Michael Mueller91520f12015-02-27 14:32:11 +01001866 kvm_s390_vcpu_setup_model(vcpu);
1867
David Hildenbrandbdab09f2016-04-12 11:07:49 +02001868 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1869 if (MACHINE_HAS_ESOP)
1870 vcpu->arch.sie_block->ecb |= 0x02;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01001871 if (test_kvm_facility(vcpu->kvm, 9))
1872 vcpu->arch.sie_block->ecb |= 0x04;
David Hildenbrandf597d242016-04-22 16:26:49 +02001873 if (test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001874 vcpu->arch.sie_block->ecb |= 0x10;
1875
David Hildenbrand873b4252016-04-04 15:53:47 +02001876 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrandd6af0b42016-03-04 11:55:56 +01001877 vcpu->arch.sie_block->ecb2 |= 0x08;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02001878 vcpu->arch.sie_block->eca = 0x1002000U;
1879 if (sclp.has_cei)
1880 vcpu->arch.sie_block->eca |= 0x80000000U;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02001881 if (sclp.has_ib)
1882 vcpu->arch.sie_block->eca |= 0x40000000U;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001883 if (sclp.has_siif)
Heiko Carstens217a4402013-12-30 12:54:14 +01001884 vcpu->arch.sie_block->eca |= 1;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001885 if (sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001886 vcpu->arch.sie_block->eca |= 0x10000000U;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001887 if (test_kvm_facility(vcpu->kvm, 64))
1888 vcpu->arch.sie_block->ecb3 |= 0x01;
Michael Mueller18280d82015-03-16 16:05:41 +01001889 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001890 vcpu->arch.sie_block->eca |= 0x00020000;
1891 vcpu->arch.sie_block->ecd |= 0x20000000;
1892 }
Fan Zhangc6e5f162016-01-07 18:24:29 +08001893 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Thomas Huth492d8642015-02-10 16:11:01 +01001894 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001895 if (test_kvm_facility(vcpu->kvm, 74))
1896 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001897
Dominik Dingele6db1d62015-05-07 15:41:57 +02001898 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01001899 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1900 if (rc)
1901 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001902 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001903 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001904 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001905
Tony Krowiak5102ee82014-06-27 14:46:01 -04001906 kvm_s390_vcpu_crypto_setup(vcpu);
1907
Dominik Dingelb31605c2014-03-25 13:47:11 +01001908 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001909}
1910
1911struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1912 unsigned int id)
1913{
Carsten Otte4d475552011-10-18 12:27:12 +02001914 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001915 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001916 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001917
David Hildenbrand42158252015-10-12 12:57:22 +02001918 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02001919 goto out;
1920
1921 rc = -ENOMEM;
1922
Michael Muellerb110fea2013-06-12 13:54:54 +02001923 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001924 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001925 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001926
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001927 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1928 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001929 goto out_free_cpu;
1930
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001931 vcpu->arch.sie_block = &sie_page->sie_block;
1932 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1933
David Hildenbrandefed1102015-04-16 12:32:41 +02001934 /* the real guest size will always be smaller than msl */
1935 vcpu->arch.sie_block->mso = 0;
1936 vcpu->arch.sie_block->msl = sclp.hamax;
1937
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001938 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001939 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001940 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001941 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001942 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001943 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001944
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001945 rc = kvm_vcpu_init(vcpu, kvm, id);
1946 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001947 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001948 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001949 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001950 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001951
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001952 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001953out_free_sie_block:
1954 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001955out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001956 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001957out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001958 return ERR_PTR(rc);
1959}
1960
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001961int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1962{
David Hildenbrand9a022062014-08-05 17:40:47 +02001963 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001964}
1965
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001966void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001967{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001968 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001969 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001970}
1971
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001972void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001973{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001974 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001975}
1976
Christian Borntraeger8e236542015-04-09 13:49:04 +02001977static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1978{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001979 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001980 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001981}
1982
1983static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1984{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04001985 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001986}
1987
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001988/*
1989 * Kick a guest cpu out of SIE and wait until SIE is not running.
1990 * If the CPU is not running (e.g. waiting as idle) the function will
1991 * return immediately. */
1992void exit_sie(struct kvm_vcpu *vcpu)
1993{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001994 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001995 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1996 cpu_relax();
1997}
1998
Christian Borntraeger8e236542015-04-09 13:49:04 +02001999/* Kick a guest cpu out of SIE to process a request synchronously */
2000void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002001{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002002 kvm_make_request(req, vcpu);
2003 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002004}
2005
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002006static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2007 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002008{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002009 struct kvm *kvm = gmap->private;
2010 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002011 unsigned long prefix;
2012 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002013
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002014 if (gmap_is_shadow(gmap))
2015 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002016 if (start >= 1UL << 31)
2017 /* We are only interested in prefix pages */
2018 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002019 kvm_for_each_vcpu(i, vcpu, kvm) {
2020 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002021 prefix = kvm_s390_get_prefix(vcpu);
2022 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2023 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2024 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002025 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002026 }
2027 }
2028}
2029
Christoffer Dallb6d33832012-03-08 16:44:24 -05002030int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2031{
2032 /* kvm common code refers to this, but never calls it */
2033 BUG();
2034 return 0;
2035}
2036
Carsten Otte14eebd92012-05-15 14:15:26 +02002037static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2038 struct kvm_one_reg *reg)
2039{
2040 int r = -EINVAL;
2041
2042 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002043 case KVM_REG_S390_TODPR:
2044 r = put_user(vcpu->arch.sie_block->todpr,
2045 (u32 __user *)reg->addr);
2046 break;
2047 case KVM_REG_S390_EPOCHDIFF:
2048 r = put_user(vcpu->arch.sie_block->epoch,
2049 (u64 __user *)reg->addr);
2050 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002051 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002052 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002053 (u64 __user *)reg->addr);
2054 break;
2055 case KVM_REG_S390_CLOCK_COMP:
2056 r = put_user(vcpu->arch.sie_block->ckc,
2057 (u64 __user *)reg->addr);
2058 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002059 case KVM_REG_S390_PFTOKEN:
2060 r = put_user(vcpu->arch.pfault_token,
2061 (u64 __user *)reg->addr);
2062 break;
2063 case KVM_REG_S390_PFCOMPARE:
2064 r = put_user(vcpu->arch.pfault_compare,
2065 (u64 __user *)reg->addr);
2066 break;
2067 case KVM_REG_S390_PFSELECT:
2068 r = put_user(vcpu->arch.pfault_select,
2069 (u64 __user *)reg->addr);
2070 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002071 case KVM_REG_S390_PP:
2072 r = put_user(vcpu->arch.sie_block->pp,
2073 (u64 __user *)reg->addr);
2074 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002075 case KVM_REG_S390_GBEA:
2076 r = put_user(vcpu->arch.sie_block->gbea,
2077 (u64 __user *)reg->addr);
2078 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002079 default:
2080 break;
2081 }
2082
2083 return r;
2084}
2085
2086static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2087 struct kvm_one_reg *reg)
2088{
2089 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002090 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002091
2092 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002093 case KVM_REG_S390_TODPR:
2094 r = get_user(vcpu->arch.sie_block->todpr,
2095 (u32 __user *)reg->addr);
2096 break;
2097 case KVM_REG_S390_EPOCHDIFF:
2098 r = get_user(vcpu->arch.sie_block->epoch,
2099 (u64 __user *)reg->addr);
2100 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002101 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002102 r = get_user(val, (u64 __user *)reg->addr);
2103 if (!r)
2104 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002105 break;
2106 case KVM_REG_S390_CLOCK_COMP:
2107 r = get_user(vcpu->arch.sie_block->ckc,
2108 (u64 __user *)reg->addr);
2109 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002110 case KVM_REG_S390_PFTOKEN:
2111 r = get_user(vcpu->arch.pfault_token,
2112 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002113 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2114 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002115 break;
2116 case KVM_REG_S390_PFCOMPARE:
2117 r = get_user(vcpu->arch.pfault_compare,
2118 (u64 __user *)reg->addr);
2119 break;
2120 case KVM_REG_S390_PFSELECT:
2121 r = get_user(vcpu->arch.pfault_select,
2122 (u64 __user *)reg->addr);
2123 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002124 case KVM_REG_S390_PP:
2125 r = get_user(vcpu->arch.sie_block->pp,
2126 (u64 __user *)reg->addr);
2127 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002128 case KVM_REG_S390_GBEA:
2129 r = get_user(vcpu->arch.sie_block->gbea,
2130 (u64 __user *)reg->addr);
2131 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002132 default:
2133 break;
2134 }
2135
2136 return r;
2137}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002138
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002139static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2140{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002141 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002142 return 0;
2143}
2144
2145int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2146{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002147 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002148 return 0;
2149}
2150
2151int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2152{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002153 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002154 return 0;
2155}
2156
2157int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2158 struct kvm_sregs *sregs)
2159{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002160 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002161 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01002162 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002163 return 0;
2164}
2165
2166int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2167 struct kvm_sregs *sregs)
2168{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002169 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002170 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002171 return 0;
2172}
2173
2174int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2175{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002176 /* make sure the new values will be lazily loaded */
2177 save_fpu_regs();
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002178 if (test_fp_ctl(fpu->fpc))
2179 return -EINVAL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002180 current->thread.fpu.fpc = fpu->fpc;
2181 if (MACHINE_HAS_VX)
2182 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2183 else
2184 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002185 return 0;
2186}
2187
2188int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2189{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002190 /* make sure we have the latest values */
2191 save_fpu_regs();
2192 if (MACHINE_HAS_VX)
2193 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2194 else
2195 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2196 fpu->fpc = current->thread.fpu.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002197 return 0;
2198}
2199
2200static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2201{
2202 int rc = 0;
2203
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002204 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002205 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002206 else {
2207 vcpu->run->psw_mask = psw.mask;
2208 vcpu->run->psw_addr = psw.addr;
2209 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002210 return rc;
2211}
2212
2213int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2214 struct kvm_translation *tr)
2215{
2216 return -EINVAL; /* not implemented yet */
2217}
2218
David Hildenbrand27291e22014-01-23 12:26:52 +01002219#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2220 KVM_GUESTDBG_USE_HW_BP | \
2221 KVM_GUESTDBG_ENABLE)
2222
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002223int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2224 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002225{
David Hildenbrand27291e22014-01-23 12:26:52 +01002226 int rc = 0;
2227
2228 vcpu->guest_debug = 0;
2229 kvm_s390_clear_bp_data(vcpu);
2230
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002231 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002232 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002233 if (!sclp.has_gpere)
2234 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002235
2236 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2237 vcpu->guest_debug = dbg->control;
2238 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002239 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002240
2241 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2242 rc = kvm_s390_import_bp_data(vcpu, dbg);
2243 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002244 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002245 vcpu->arch.guestdbg.last_bp = 0;
2246 }
2247
2248 if (rc) {
2249 vcpu->guest_debug = 0;
2250 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002251 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002252 }
2253
2254 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002255}
2256
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002257int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2258 struct kvm_mp_state *mp_state)
2259{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002260 /* CHECK_STOP and LOAD are not supported yet */
2261 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2262 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002263}
2264
2265int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2266 struct kvm_mp_state *mp_state)
2267{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002268 int rc = 0;
2269
2270 /* user space knows about this interface - let it control the state */
2271 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2272
2273 switch (mp_state->mp_state) {
2274 case KVM_MP_STATE_STOPPED:
2275 kvm_s390_vcpu_stop(vcpu);
2276 break;
2277 case KVM_MP_STATE_OPERATING:
2278 kvm_s390_vcpu_start(vcpu);
2279 break;
2280 case KVM_MP_STATE_LOAD:
2281 case KVM_MP_STATE_CHECK_STOP:
2282 /* fall through - CHECK_STOP and LOAD are not supported yet */
2283 default:
2284 rc = -ENXIO;
2285 }
2286
2287 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002288}
2289
David Hildenbrand8ad35752014-03-14 11:00:21 +01002290static bool ibs_enabled(struct kvm_vcpu *vcpu)
2291{
2292 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2293}
2294
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002295static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2296{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002297retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002298 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002299 if (!vcpu->requests)
2300 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002301 /*
2302 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002303 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002304 * This ensures that the ipte instruction for this request has
2305 * already finished. We might race against a second unmapper that
2306 * wants to set the blocking bit. Lets just retry the request loop.
2307 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002308 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002309 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002310 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2311 kvm_s390_get_prefix(vcpu),
2312 PAGE_SIZE * 2, PROT_WRITE);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002313 if (rc)
2314 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002315 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002316 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002317
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002318 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2319 vcpu->arch.sie_block->ihcpu = 0xffff;
2320 goto retry;
2321 }
2322
David Hildenbrand8ad35752014-03-14 11:00:21 +01002323 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2324 if (!ibs_enabled(vcpu)) {
2325 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002326 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002327 &vcpu->arch.sie_block->cpuflags);
2328 }
2329 goto retry;
2330 }
2331
2332 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2333 if (ibs_enabled(vcpu)) {
2334 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002335 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002336 &vcpu->arch.sie_block->cpuflags);
2337 }
2338 goto retry;
2339 }
2340
David Hildenbrand0759d062014-05-13 16:54:32 +02002341 /* nothing to do, just clear the request */
2342 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2343
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002344 return 0;
2345}
2346
David Hildenbrand25ed1672015-05-12 09:49:14 +02002347void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2348{
2349 struct kvm_vcpu *vcpu;
2350 int i;
2351
2352 mutex_lock(&kvm->lock);
2353 preempt_disable();
2354 kvm->arch.epoch = tod - get_tod_clock();
2355 kvm_s390_vcpu_block_all(kvm);
2356 kvm_for_each_vcpu(i, vcpu, kvm)
2357 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2358 kvm_s390_vcpu_unblock_all(kvm);
2359 preempt_enable();
2360 mutex_unlock(&kvm->lock);
2361}
2362
Thomas Huthfa576c52014-05-06 17:20:16 +02002363/**
2364 * kvm_arch_fault_in_page - fault-in guest page if necessary
2365 * @vcpu: The corresponding virtual cpu
2366 * @gpa: Guest physical address
2367 * @writable: Whether the page should be writable or not
2368 *
2369 * Make sure that a guest page has been faulted-in on the host.
2370 *
2371 * Return: Zero on success, negative error code otherwise.
2372 */
2373long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002374{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002375 return gmap_fault(vcpu->arch.gmap, gpa,
2376 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002377}
2378
Dominik Dingel3c038e62013-10-07 17:11:48 +02002379static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2380 unsigned long token)
2381{
2382 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002383 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002384
2385 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002386 irq.u.ext.ext_params2 = token;
2387 irq.type = KVM_S390_INT_PFAULT_INIT;
2388 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002389 } else {
2390 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002391 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002392 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2393 }
2394}
2395
2396void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2397 struct kvm_async_pf *work)
2398{
2399 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2400 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2401}
2402
2403void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2404 struct kvm_async_pf *work)
2405{
2406 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2407 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2408}
2409
2410void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2411 struct kvm_async_pf *work)
2412{
2413 /* s390 will always inject the page directly */
2414}
2415
2416bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2417{
2418 /*
2419 * s390 will always inject the page directly,
2420 * but we still want check_async_completion to cleanup
2421 */
2422 return true;
2423}
2424
2425static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2426{
2427 hva_t hva;
2428 struct kvm_arch_async_pf arch;
2429 int rc;
2430
2431 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2432 return 0;
2433 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2434 vcpu->arch.pfault_compare)
2435 return 0;
2436 if (psw_extint_disabled(vcpu))
2437 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002438 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002439 return 0;
2440 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2441 return 0;
2442 if (!vcpu->arch.gmap->pfault_enabled)
2443 return 0;
2444
Heiko Carstens81480cc2014-01-01 16:36:07 +01002445 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2446 hva += current->thread.gmap_addr & ~PAGE_MASK;
2447 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002448 return 0;
2449
2450 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2451 return rc;
2452}
2453
Thomas Huth3fb4c402013-09-12 10:33:43 +02002454static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002455{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002456 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002457
Dominik Dingel3c038e62013-10-07 17:11:48 +02002458 /*
2459 * On s390 notifications for arriving pages will be delivered directly
2460 * to the guest but the house keeping for completed pfaults is
2461 * handled outside the worker.
2462 */
2463 kvm_check_async_pf_completion(vcpu);
2464
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002465 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2466 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002467
2468 if (need_resched())
2469 schedule();
2470
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002471 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002472 s390_handle_mcck();
2473
Jens Freimann79395032014-04-17 10:10:30 +02002474 if (!kvm_is_ucontrol(vcpu->kvm)) {
2475 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2476 if (rc)
2477 return rc;
2478 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002479
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002480 rc = kvm_s390_handle_requests(vcpu);
2481 if (rc)
2482 return rc;
2483
David Hildenbrand27291e22014-01-23 12:26:52 +01002484 if (guestdbg_enabled(vcpu)) {
2485 kvm_s390_backup_guest_per_regs(vcpu);
2486 kvm_s390_patch_guest_per_regs(vcpu);
2487 }
2488
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002489 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002490 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2491 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2492 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002493
Thomas Huth3fb4c402013-09-12 10:33:43 +02002494 return 0;
2495}
2496
Thomas Huth492d8642015-02-10 16:11:01 +01002497static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2498{
David Hildenbrand56317922016-01-12 17:37:58 +01002499 struct kvm_s390_pgm_info pgm_info = {
2500 .code = PGM_ADDRESSING,
2501 };
2502 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01002503 int rc;
2504
2505 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2506 trace_kvm_s390_sie_fault(vcpu);
2507
2508 /*
2509 * We want to inject an addressing exception, which is defined as a
2510 * suppressing or terminating exception. However, since we came here
2511 * by a DAT access exception, the PSW still points to the faulting
2512 * instruction since DAT exceptions are nullifying. So we've got
2513 * to look up the current opcode to get the length of the instruction
2514 * to be able to forward the PSW.
2515 */
David Hildenbrand65977322015-11-16 16:17:45 +01002516 rc = read_guest_instr(vcpu, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01002517 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01002518 if (rc < 0) {
2519 return rc;
2520 } else if (rc) {
2521 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2522 * Forward by arbitrary ilc, injection will take care of
2523 * nullification if necessary.
2524 */
2525 pgm_info = vcpu->arch.pgm;
2526 ilen = 4;
2527 }
David Hildenbrand56317922016-01-12 17:37:58 +01002528 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2529 kvm_s390_forward_psw(vcpu, ilen);
2530 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01002531}
2532
Thomas Huth3fb4c402013-09-12 10:33:43 +02002533static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2534{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002535 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2536 vcpu->arch.sie_block->icptcode);
2537 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2538
David Hildenbrand27291e22014-01-23 12:26:52 +01002539 if (guestdbg_enabled(vcpu))
2540 kvm_s390_restore_guest_per_regs(vcpu);
2541
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002542 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2543 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002544
2545 if (vcpu->arch.sie_block->icptcode > 0) {
2546 int rc = kvm_handle_sie_intercept(vcpu);
2547
2548 if (rc != -EOPNOTSUPP)
2549 return rc;
2550 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2551 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2552 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2553 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2554 return -EREMOTE;
2555 } else if (exit_reason != -EFAULT) {
2556 vcpu->stat.exit_null++;
2557 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002558 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2559 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2560 vcpu->run->s390_ucontrol.trans_exc_code =
2561 current->thread.gmap_addr;
2562 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002563 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002564 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002565 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002566 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002567 if (kvm_arch_setup_async_pf(vcpu))
2568 return 0;
2569 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002570 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02002571 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002572}
2573
2574static int __vcpu_run(struct kvm_vcpu *vcpu)
2575{
2576 int rc, exit_reason;
2577
Thomas Huth800c1062013-09-12 10:33:45 +02002578 /*
2579 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2580 * ning the guest), so that memslots (and other stuff) are protected
2581 */
2582 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2583
Thomas Hutha76ccff2013-09-12 10:33:44 +02002584 do {
2585 rc = vcpu_pre_run(vcpu);
2586 if (rc)
2587 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002588
Thomas Huth800c1062013-09-12 10:33:45 +02002589 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002590 /*
2591 * As PF_VCPU will be used in fault handler, between
2592 * guest_enter and guest_exit should be no uaccess.
2593 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002594 local_irq_disable();
2595 __kvm_guest_enter();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002596 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002597 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002598 exit_reason = sie64a(vcpu->arch.sie_block,
2599 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002600 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002601 __enable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002602 __kvm_guest_exit();
2603 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002604 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002605
Thomas Hutha76ccff2013-09-12 10:33:44 +02002606 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002607 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002608
Thomas Huth800c1062013-09-12 10:33:45 +02002609 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002610 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002611}
2612
David Hildenbrandb028ee32014-07-17 10:47:43 +02002613static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2614{
2615 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2616 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2617 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2618 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2619 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2620 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002621 /* some control register changes require a tlb flush */
2622 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002623 }
2624 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01002625 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002626 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2627 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2628 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2629 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2630 }
2631 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2632 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2633 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2634 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002635 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2636 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002637 }
2638 kvm_run->kvm_dirty_regs = 0;
2639}
2640
2641static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2642{
2643 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2644 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2645 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2646 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01002647 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002648 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2649 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2650 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2651 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2652 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2653 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2654 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2655}
2656
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002657int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2658{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002659 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002660 sigset_t sigsaved;
2661
David Hildenbrand27291e22014-01-23 12:26:52 +01002662 if (guestdbg_exit_pending(vcpu)) {
2663 kvm_s390_prepare_debug_exit(vcpu);
2664 return 0;
2665 }
2666
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002667 if (vcpu->sigset_active)
2668 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2669
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002670 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2671 kvm_s390_vcpu_start(vcpu);
2672 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002673 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002674 vcpu->vcpu_id);
2675 return -EINVAL;
2676 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002677
David Hildenbrandb028ee32014-07-17 10:47:43 +02002678 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002679 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002680
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002681 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002682 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002683
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002684 if (signal_pending(current) && !rc) {
2685 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002686 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002687 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002688
David Hildenbrand27291e22014-01-23 12:26:52 +01002689 if (guestdbg_exit_pending(vcpu) && !rc) {
2690 kvm_s390_prepare_debug_exit(vcpu);
2691 rc = 0;
2692 }
2693
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002694 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02002695 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002696 rc = 0;
2697 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002698
David Hildenbranddb0758b2016-02-15 09:42:25 +01002699 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002700 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002701
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002702 if (vcpu->sigset_active)
2703 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2704
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002705 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002706 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002707}
2708
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002709/*
2710 * store status at address
2711 * we use have two special cases:
2712 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2713 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2714 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002715int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002716{
Carsten Otte092670c2011-07-24 10:48:22 +02002717 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002718 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02002719 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01002720 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002721 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002722
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002723 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002724 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2725 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002726 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002727 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002728 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2729 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002730 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002731 gpa = px;
2732 } else
2733 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002734
2735 /* manually convert vector registers if necessary */
2736 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01002737 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002738 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2739 fprs, 128);
2740 } else {
2741 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002742 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002743 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002744 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002745 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002746 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002747 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002748 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02002749 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002750 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002751 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002752 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002753 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01002754 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002755 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01002756 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002757 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002758 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002759 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002760 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002761 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002762 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002763 &vcpu->arch.sie_block->gcr, 128);
2764 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002765}
2766
Thomas Huthe8798922013-11-06 15:46:33 +01002767int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2768{
2769 /*
2770 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2771 * copying in vcpu load/put. Lets update our copies before we save
2772 * it into the save area
2773 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002774 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002775 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01002776 save_access_regs(vcpu->run->s.regs.acrs);
2777
2778 return kvm_s390_store_status_unloaded(vcpu, addr);
2779}
2780
Eric Farmanbc17de72014-04-14 16:01:09 -04002781/*
2782 * store additional status at address
2783 */
2784int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2785 unsigned long gpa)
2786{
2787 /* Only bits 0-53 are used for address formation */
2788 if (!(gpa & ~0x3ff))
2789 return 0;
2790
2791 return write_guest_abs(vcpu, gpa & ~0x3ff,
2792 (void *)&vcpu->run->s.regs.vrs, 512);
2793}
2794
2795int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2796{
2797 if (!test_kvm_facility(vcpu->kvm, 129))
2798 return 0;
2799
2800 /*
2801 * The guest VXRS are in the host VXRs due to the lazy
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002802 * copying in vcpu load/put. We can simply call save_fpu_regs()
2803 * to save the current register state because we are in the
2804 * middle of a load/put cycle.
2805 *
2806 * Let's update our copies before we save it into the save area.
Eric Farmanbc17de72014-04-14 16:01:09 -04002807 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002808 save_fpu_regs();
Eric Farmanbc17de72014-04-14 16:01:09 -04002809
2810 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2811}
2812
David Hildenbrand8ad35752014-03-14 11:00:21 +01002813static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2814{
2815 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002816 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002817}
2818
2819static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2820{
2821 unsigned int i;
2822 struct kvm_vcpu *vcpu;
2823
2824 kvm_for_each_vcpu(i, vcpu, kvm) {
2825 __disable_ibs_on_vcpu(vcpu);
2826 }
2827}
2828
2829static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2830{
David Hildenbrand09a400e2016-04-04 15:57:08 +02002831 if (!sclp.has_ibs)
2832 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002833 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002834 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002835}
2836
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002837void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2838{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002839 int i, online_vcpus, started_vcpus = 0;
2840
2841 if (!is_vcpu_stopped(vcpu))
2842 return;
2843
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002844 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002845 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002846 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002847 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2848
2849 for (i = 0; i < online_vcpus; i++) {
2850 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2851 started_vcpus++;
2852 }
2853
2854 if (started_vcpus == 0) {
2855 /* we're the only active VCPU -> speed it up */
2856 __enable_ibs_on_vcpu(vcpu);
2857 } else if (started_vcpus == 1) {
2858 /*
2859 * As we are starting a second VCPU, we have to disable
2860 * the IBS facility on all VCPUs to remove potentially
2861 * oustanding ENABLE requests.
2862 */
2863 __disable_ibs_on_all_vcpus(vcpu->kvm);
2864 }
2865
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002866 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002867 /*
2868 * Another VCPU might have used IBS while we were offline.
2869 * Let's play safe and flush the VCPU at startup.
2870 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002871 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002872 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002873 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002874}
2875
2876void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2877{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002878 int i, online_vcpus, started_vcpus = 0;
2879 struct kvm_vcpu *started_vcpu = NULL;
2880
2881 if (is_vcpu_stopped(vcpu))
2882 return;
2883
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002884 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002885 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002886 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002887 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2888
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002889 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002890 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002891
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002892 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002893 __disable_ibs_on_vcpu(vcpu);
2894
2895 for (i = 0; i < online_vcpus; i++) {
2896 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2897 started_vcpus++;
2898 started_vcpu = vcpu->kvm->vcpus[i];
2899 }
2900 }
2901
2902 if (started_vcpus == 1) {
2903 /*
2904 * As we only have one VCPU left, we want to enable the
2905 * IBS facility for that VCPU to speed it up.
2906 */
2907 __enable_ibs_on_vcpu(started_vcpu);
2908 }
2909
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002910 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002911 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002912}
2913
Cornelia Huckd6712df2012-12-20 15:32:11 +01002914static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2915 struct kvm_enable_cap *cap)
2916{
2917 int r;
2918
2919 if (cap->flags)
2920 return -EINVAL;
2921
2922 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002923 case KVM_CAP_S390_CSS_SUPPORT:
2924 if (!vcpu->kvm->arch.css_support) {
2925 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02002926 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002927 trace_kvm_s390_enable_css(vcpu->kvm);
2928 }
2929 r = 0;
2930 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002931 default:
2932 r = -EINVAL;
2933 break;
2934 }
2935 return r;
2936}
2937
Thomas Huth41408c282015-02-06 15:01:21 +01002938static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2939 struct kvm_s390_mem_op *mop)
2940{
2941 void __user *uaddr = (void __user *)mop->buf;
2942 void *tmpbuf = NULL;
2943 int r, srcu_idx;
2944 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2945 | KVM_S390_MEMOP_F_CHECK_ONLY;
2946
2947 if (mop->flags & ~supported_flags)
2948 return -EINVAL;
2949
2950 if (mop->size > MEM_OP_MAX_SIZE)
2951 return -E2BIG;
2952
2953 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2954 tmpbuf = vmalloc(mop->size);
2955 if (!tmpbuf)
2956 return -ENOMEM;
2957 }
2958
2959 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2960
2961 switch (mop->op) {
2962 case KVM_S390_MEMOP_LOGICAL_READ:
2963 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002964 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2965 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01002966 break;
2967 }
2968 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2969 if (r == 0) {
2970 if (copy_to_user(uaddr, tmpbuf, mop->size))
2971 r = -EFAULT;
2972 }
2973 break;
2974 case KVM_S390_MEMOP_LOGICAL_WRITE:
2975 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002976 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2977 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01002978 break;
2979 }
2980 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2981 r = -EFAULT;
2982 break;
2983 }
2984 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2985 break;
2986 default:
2987 r = -EINVAL;
2988 }
2989
2990 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2991
2992 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2993 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2994
2995 vfree(tmpbuf);
2996 return r;
2997}
2998
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002999long kvm_arch_vcpu_ioctl(struct file *filp,
3000 unsigned int ioctl, unsigned long arg)
3001{
3002 struct kvm_vcpu *vcpu = filp->private_data;
3003 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003004 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003005 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003006
Avi Kivity937366242010-05-13 12:35:17 +03003007 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003008 case KVM_S390_IRQ: {
3009 struct kvm_s390_irq s390irq;
3010
3011 r = -EFAULT;
3012 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3013 break;
3014 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3015 break;
3016 }
Avi Kivity937366242010-05-13 12:35:17 +03003017 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003018 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003019 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003020
Avi Kivity937366242010-05-13 12:35:17 +03003021 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003022 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03003023 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003024 if (s390int_to_s390irq(&s390int, &s390irq))
3025 return -EINVAL;
3026 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03003027 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003028 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003029 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003030 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003031 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003032 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003033 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003034 case KVM_S390_SET_INITIAL_PSW: {
3035 psw_t psw;
3036
Avi Kivitybc923cc2010-05-13 12:21:46 +03003037 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003038 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003039 break;
3040 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3041 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003042 }
3043 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003044 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3045 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003046 case KVM_SET_ONE_REG:
3047 case KVM_GET_ONE_REG: {
3048 struct kvm_one_reg reg;
3049 r = -EFAULT;
3050 if (copy_from_user(&reg, argp, sizeof(reg)))
3051 break;
3052 if (ioctl == KVM_SET_ONE_REG)
3053 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3054 else
3055 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3056 break;
3057 }
Carsten Otte27e03932012-01-04 10:25:21 +01003058#ifdef CONFIG_KVM_S390_UCONTROL
3059 case KVM_S390_UCAS_MAP: {
3060 struct kvm_s390_ucas_mapping ucasmap;
3061
3062 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3063 r = -EFAULT;
3064 break;
3065 }
3066
3067 if (!kvm_is_ucontrol(vcpu->kvm)) {
3068 r = -EINVAL;
3069 break;
3070 }
3071
3072 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3073 ucasmap.vcpu_addr, ucasmap.length);
3074 break;
3075 }
3076 case KVM_S390_UCAS_UNMAP: {
3077 struct kvm_s390_ucas_mapping ucasmap;
3078
3079 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3080 r = -EFAULT;
3081 break;
3082 }
3083
3084 if (!kvm_is_ucontrol(vcpu->kvm)) {
3085 r = -EINVAL;
3086 break;
3087 }
3088
3089 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3090 ucasmap.length);
3091 break;
3092 }
3093#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003094 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003095 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003096 break;
3097 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003098 case KVM_ENABLE_CAP:
3099 {
3100 struct kvm_enable_cap cap;
3101 r = -EFAULT;
3102 if (copy_from_user(&cap, argp, sizeof(cap)))
3103 break;
3104 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3105 break;
3106 }
Thomas Huth41408c282015-02-06 15:01:21 +01003107 case KVM_S390_MEM_OP: {
3108 struct kvm_s390_mem_op mem_op;
3109
3110 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3111 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3112 else
3113 r = -EFAULT;
3114 break;
3115 }
Jens Freimann816c7662014-11-24 17:13:46 +01003116 case KVM_S390_SET_IRQ_STATE: {
3117 struct kvm_s390_irq_state irq_state;
3118
3119 r = -EFAULT;
3120 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3121 break;
3122 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3123 irq_state.len == 0 ||
3124 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3125 r = -EINVAL;
3126 break;
3127 }
3128 r = kvm_s390_set_irq_state(vcpu,
3129 (void __user *) irq_state.buf,
3130 irq_state.len);
3131 break;
3132 }
3133 case KVM_S390_GET_IRQ_STATE: {
3134 struct kvm_s390_irq_state irq_state;
3135
3136 r = -EFAULT;
3137 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3138 break;
3139 if (irq_state.len == 0) {
3140 r = -EINVAL;
3141 break;
3142 }
3143 r = kvm_s390_get_irq_state(vcpu,
3144 (__u8 __user *) irq_state.buf,
3145 irq_state.len);
3146 break;
3147 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003148 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003149 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003150 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003151 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003152}
3153
Carsten Otte5b1c1492012-01-04 10:25:23 +01003154int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3155{
3156#ifdef CONFIG_KVM_S390_UCONTROL
3157 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3158 && (kvm_is_ucontrol(vcpu->kvm))) {
3159 vmf->page = virt_to_page(vcpu->arch.sie_block);
3160 get_page(vmf->page);
3161 return 0;
3162 }
3163#endif
3164 return VM_FAULT_SIGBUS;
3165}
3166
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303167int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3168 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003169{
3170 return 0;
3171}
3172
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003173/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003174int kvm_arch_prepare_memory_region(struct kvm *kvm,
3175 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003176 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003177 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003178{
Nick Wangdd2887e2013-03-25 17:22:57 +01003179 /* A few sanity checks. We can have memory slots which have to be
3180 located/ended at a segment boundary (1MB). The memory in userland is
3181 ok to be fragmented into various different vmas. It is okay to mmap()
3182 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003183
Carsten Otte598841c2011-07-24 10:48:21 +02003184 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003185 return -EINVAL;
3186
Carsten Otte598841c2011-07-24 10:48:21 +02003187 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003188 return -EINVAL;
3189
Dominik Dingela3a92c32014-12-01 17:24:42 +01003190 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3191 return -EINVAL;
3192
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003193 return 0;
3194}
3195
3196void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003197 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003198 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003199 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003200 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003201{
Carsten Ottef7850c92011-07-24 10:48:23 +02003202 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003203
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003204 /* If the basics of the memslot do not change, we do not want
3205 * to update the gmap. Every update causes several unnecessary
3206 * segment translation exceptions. This is usually handled just
3207 * fine by the normal fault handler + gmap, but it will also
3208 * cause faults on the prefix page of running guest CPUs.
3209 */
3210 if (old->userspace_addr == mem->userspace_addr &&
3211 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3212 old->npages * PAGE_SIZE == mem->memory_size)
3213 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003214
3215 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3216 mem->guest_phys_addr, mem->memory_size);
3217 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003218 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003219 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003220}
3221
Alexander Yarygin60a37702016-04-01 15:38:57 +03003222static inline unsigned long nonhyp_mask(int i)
3223{
3224 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3225
3226 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3227}
3228
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003229void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3230{
3231 vcpu->valid_wakeup = false;
3232}
3233
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003234static int __init kvm_s390_init(void)
3235{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003236 int i;
3237
David Hildenbrand07197fd2015-01-30 16:01:38 +01003238 if (!sclp.has_sief2) {
3239 pr_info("SIE not available\n");
3240 return -ENODEV;
3241 }
3242
Alexander Yarygin60a37702016-04-01 15:38:57 +03003243 for (i = 0; i < 16; i++)
3244 kvm_s390_fac_list_mask[i] |=
3245 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3246
Michael Mueller9d8d5782015-02-02 15:42:51 +01003247 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003248}
3249
3250static void __exit kvm_s390_exit(void)
3251{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003252 kvm_exit();
3253}
3254
3255module_init(kvm_s390_init);
3256module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003257
3258/*
3259 * Enable autoloading of the kvm module.
3260 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3261 * since x86 takes a different approach.
3262 */
3263#include <linux/miscdevice.h>
3264MODULE_ALIAS_MISCDEV(KVM_MINOR);
3265MODULE_ALIAS("devname:kvm");