blob: 2a239554eb890fcebbd513b4a39ea014c982eae1 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010029#include <linux/bitmap.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010030#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010031#include <asm/lowcore.h>
Fan Zhangfdf03652015-05-13 10:58:41 +020032#include <asm/etr.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010033#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010034#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010035#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010036#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020037#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020038#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020039#include <asm/cpacf.h>
40#include <asm/etr.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010041#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010042#include "gaccess.h"
43
David Hildenbrandea2cdd22015-05-20 13:24:02 +020044#define KMSG_COMPONENT "kvm-s390"
45#undef pr_fmt
46#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
47
Cornelia Huck5786fff2012-07-23 17:20:29 +020048#define CREATE_TRACE_POINTS
49#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020050#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020051
Thomas Huth41408c282015-02-06 15:01:21 +010052#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010053#define LOCAL_IRQS 32
54#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
55 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010056
Heiko Carstensb0c632d2008-03-25 18:47:20 +010057#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
58
59struct kvm_stats_debugfs_item debugfs_entries[] = {
60 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020061 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010062 { "exit_validity", VCPU_STAT(exit_validity) },
63 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
64 { "exit_external_request", VCPU_STAT(exit_external_request) },
65 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010066 { "exit_instruction", VCPU_STAT(exit_instruction) },
67 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
68 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020069 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010070 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020071 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020072 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020073 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020074 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010075 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010076 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
77 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010078 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020079 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
81 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
82 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
83 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
84 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
85 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
86 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020087 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010088 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
89 { "instruction_spx", VCPU_STAT(instruction_spx) },
90 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
91 { "instruction_stap", VCPU_STAT(instruction_stap) },
92 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010093 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010094 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
95 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020096 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010097 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
98 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020099 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200100 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100101 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100102 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200103 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100104 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200105 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
106 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100107 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200108 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
109 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500110 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
112 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
113 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200114 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
115 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
116 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100117 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100118 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200119 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200120 { "diagnose_258", VCPU_STAT(diagnose_258) },
121 { "diagnose_308", VCPU_STAT(diagnose_308) },
122 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123 { NULL }
124};
125
Michael Mueller9d8d5782015-02-02 15:42:51 +0100126/* upper facilities limit for kvm */
Alexander Yarygin60a37702016-04-01 15:38:57 +0300127unsigned long kvm_s390_fac_list_mask[16] = {
128 0xffe6000000000000UL,
129 0x005e000000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100130};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100131
Michael Mueller9d8d5782015-02-02 15:42:51 +0100132unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200133{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100134 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
135 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200136}
137
David Hildenbrand15c97052015-03-19 17:36:43 +0100138/* available cpu features supported by kvm */
139static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200140/* available subfunctions indicated via query / "test bit" */
141static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100142
Michael Mueller9d8d5782015-02-02 15:42:51 +0100143static struct gmap_notifier gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200144debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100145
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100146/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200147int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100148{
149 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200150 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100151}
152
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200153static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
154
Fan Zhangfdf03652015-05-13 10:58:41 +0200155/*
156 * This callback is executed during stop_machine(). All CPUs are therefore
157 * temporarily stopped. In order not to change guest behavior, we have to
158 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
159 * so a CPU won't be stopped while calculating with the epoch.
160 */
161static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
162 void *v)
163{
164 struct kvm *kvm;
165 struct kvm_vcpu *vcpu;
166 int i;
167 unsigned long long *delta = v;
168
169 list_for_each_entry(kvm, &vm_list, vm_list) {
170 kvm->arch.epoch -= *delta;
171 kvm_for_each_vcpu(i, vcpu, kvm) {
172 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100173 if (vcpu->arch.cputm_enabled)
174 vcpu->arch.cputm_start += *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200175 }
176 }
177 return NOTIFY_OK;
178}
179
180static struct notifier_block kvm_clock_notifier = {
181 .notifier_call = kvm_clock_sync,
182};
183
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100184int kvm_arch_hardware_setup(void)
185{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200186 gmap_notifier.notifier_call = kvm_gmap_notifier;
187 gmap_register_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200188 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
189 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100190 return 0;
191}
192
193void kvm_arch_hardware_unsetup(void)
194{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200195 gmap_unregister_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200196 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
197 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100198}
199
David Hildenbrand22be5a12016-01-21 13:22:54 +0100200static void allow_cpu_feat(unsigned long nr)
201{
202 set_bit_inv(nr, kvm_s390_available_cpu_feat);
203}
204
David Hildenbrand0a763c72016-05-18 16:03:47 +0200205static inline int plo_test_bit(unsigned char nr)
206{
207 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
208 int cc = 3; /* subfunction not available */
209
210 asm volatile(
211 /* Parameter registers are ignored for "test bit" */
212 " plo 0,0,0,0(0)\n"
213 " ipm %0\n"
214 " srl %0,28\n"
215 : "=d" (cc)
216 : "d" (r0)
217 : "cc");
218 return cc == 0;
219}
220
David Hildenbrand22be5a12016-01-21 13:22:54 +0100221static void kvm_s390_cpu_feat_init(void)
222{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200223 int i;
224
225 for (i = 0; i < 256; ++i) {
226 if (plo_test_bit(i))
227 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
228 }
229
230 if (test_facility(28)) /* TOD-clock steering */
231 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
232
233 if (test_facility(17)) { /* MSA */
234 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
235 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
236 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
237 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
238 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
239 }
240 if (test_facility(76)) /* MSA3 */
241 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
242 if (test_facility(77)) { /* MSA4 */
243 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
244 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
245 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
246 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
247 }
248 if (test_facility(57)) /* MSA5 */
249 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
250
David Hildenbrand22be5a12016-01-21 13:22:54 +0100251 if (MACHINE_HAS_ESOP)
252 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
253}
254
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100255int kvm_arch_init(void *opaque)
256{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200257 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
258 if (!kvm_s390_dbf)
259 return -ENOMEM;
260
261 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
262 debug_unregister(kvm_s390_dbf);
263 return -ENOMEM;
264 }
265
David Hildenbrand22be5a12016-01-21 13:22:54 +0100266 kvm_s390_cpu_feat_init();
267
Cornelia Huck84877d92014-09-02 10:27:35 +0100268 /* Register floating interrupt controller interface. */
269 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100270}
271
Christian Borntraeger78f26132015-07-22 15:50:58 +0200272void kvm_arch_exit(void)
273{
274 debug_unregister(kvm_s390_dbf);
275}
276
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100277/* Section: device related */
278long kvm_arch_dev_ioctl(struct file *filp,
279 unsigned int ioctl, unsigned long arg)
280{
281 if (ioctl == KVM_S390_ENABLE_SIE)
282 return s390_enable_sie();
283 return -EINVAL;
284}
285
Alexander Graf784aa3d2014-07-14 18:27:35 +0200286int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100287{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100288 int r;
289
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200290 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100291 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200292 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100293 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100294#ifdef CONFIG_KVM_S390_UCONTROL
295 case KVM_CAP_S390_UCONTROL:
296#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200297 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100298 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200299 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100300 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100301 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100302 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200303 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200304 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200305 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200306 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200307 case KVM_CAP_MP_STATE:
Jens Freimann47b43c52014-11-11 20:57:06 +0100308 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200309 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100310 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400311 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100312 case KVM_CAP_S390_IRQ_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100313 r = 1;
314 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100315 case KVM_CAP_S390_MEM_OP:
316 r = MEM_OP_MAX_SIZE;
317 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200318 case KVM_CAP_NR_VCPUS:
319 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100320 r = KVM_S390_BSCA_CPU_SLOTS;
321 if (sclp.has_esca && sclp.has_64bscao)
322 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200323 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100324 case KVM_CAP_NR_MEMSLOTS:
325 r = KVM_USER_MEM_SLOTS;
326 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200327 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100328 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200329 break;
Eric Farman68c55752014-06-09 10:57:26 -0400330 case KVM_CAP_S390_VECTOR_REGISTERS:
331 r = MACHINE_HAS_VX;
332 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800333 case KVM_CAP_S390_RI:
334 r = test_facility(64);
335 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200336 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100337 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200338 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100339 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100340}
341
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400342static void kvm_s390_sync_dirty_log(struct kvm *kvm,
343 struct kvm_memory_slot *memslot)
344{
345 gfn_t cur_gfn, last_gfn;
346 unsigned long address;
347 struct gmap *gmap = kvm->arch.gmap;
348
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400349 /* Loop over all guest pages */
350 last_gfn = memslot->base_gfn + memslot->npages;
351 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
352 address = gfn_to_hva_memslot(memslot, cur_gfn);
353
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100354 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400355 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100356 if (fatal_signal_pending(current))
357 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100358 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400359 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400360}
361
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100362/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200363static void sca_del_vcpu(struct kvm_vcpu *vcpu);
364
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100365/*
366 * Get (and clear) the dirty memory log for a memory slot.
367 */
368int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
369 struct kvm_dirty_log *log)
370{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400371 int r;
372 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200373 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400374 struct kvm_memory_slot *memslot;
375 int is_dirty = 0;
376
377 mutex_lock(&kvm->slots_lock);
378
379 r = -EINVAL;
380 if (log->slot >= KVM_USER_MEM_SLOTS)
381 goto out;
382
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200383 slots = kvm_memslots(kvm);
384 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400385 r = -ENOENT;
386 if (!memslot->dirty_bitmap)
387 goto out;
388
389 kvm_s390_sync_dirty_log(kvm, memslot);
390 r = kvm_get_dirty_log(kvm, log, &is_dirty);
391 if (r)
392 goto out;
393
394 /* Clear the dirty log */
395 if (is_dirty) {
396 n = kvm_dirty_bitmap_bytes(memslot);
397 memset(memslot->dirty_bitmap, 0, n);
398 }
399 r = 0;
400out:
401 mutex_unlock(&kvm->slots_lock);
402 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100403}
404
Cornelia Huckd938dc52013-10-23 18:26:34 +0200405static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
406{
407 int r;
408
409 if (cap->flags)
410 return -EINVAL;
411
412 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200413 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200414 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200415 kvm->arch.use_irqchip = 1;
416 r = 0;
417 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200418 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200419 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200420 kvm->arch.user_sigp = 1;
421 r = 0;
422 break;
Eric Farman68c55752014-06-09 10:57:26 -0400423 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100424 mutex_lock(&kvm->lock);
425 if (atomic_read(&kvm->online_vcpus)) {
426 r = -EBUSY;
427 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100428 set_kvm_facility(kvm->arch.model.fac_mask, 129);
429 set_kvm_facility(kvm->arch.model.fac_list, 129);
Michael Mueller18280d82015-03-16 16:05:41 +0100430 r = 0;
431 } else
432 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100433 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200434 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
435 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400436 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800437 case KVM_CAP_S390_RI:
438 r = -EINVAL;
439 mutex_lock(&kvm->lock);
440 if (atomic_read(&kvm->online_vcpus)) {
441 r = -EBUSY;
442 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100443 set_kvm_facility(kvm->arch.model.fac_mask, 64);
444 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800445 r = 0;
446 }
447 mutex_unlock(&kvm->lock);
448 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
449 r ? "(not available)" : "(success)");
450 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100451 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200452 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100453 kvm->arch.user_stsi = 1;
454 r = 0;
455 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200456 default:
457 r = -EINVAL;
458 break;
459 }
460 return r;
461}
462
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100463static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
464{
465 int ret;
466
467 switch (attr->attr) {
468 case KVM_S390_VM_MEM_LIMIT_SIZE:
469 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200470 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100471 kvm->arch.mem_limit);
472 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100473 ret = -EFAULT;
474 break;
475 default:
476 ret = -ENXIO;
477 break;
478 }
479 return ret;
480}
481
482static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200483{
484 int ret;
485 unsigned int idx;
486 switch (attr->attr) {
487 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100488 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100489 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200490 break;
491
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200492 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200493 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200494 mutex_lock(&kvm->lock);
495 if (atomic_read(&kvm->online_vcpus) == 0) {
496 kvm->arch.use_cmma = 1;
497 ret = 0;
498 }
499 mutex_unlock(&kvm->lock);
500 break;
501 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100502 ret = -ENXIO;
503 if (!sclp.has_cmma)
504 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200505 ret = -EINVAL;
506 if (!kvm->arch.use_cmma)
507 break;
508
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200509 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200510 mutex_lock(&kvm->lock);
511 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200512 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200513 srcu_read_unlock(&kvm->srcu, idx);
514 mutex_unlock(&kvm->lock);
515 ret = 0;
516 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100517 case KVM_S390_VM_MEM_LIMIT_SIZE: {
518 unsigned long new_limit;
519
520 if (kvm_is_ucontrol(kvm))
521 return -EINVAL;
522
523 if (get_user(new_limit, (u64 __user *)attr->addr))
524 return -EFAULT;
525
Dominik Dingela3a92c32014-12-01 17:24:42 +0100526 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
527 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100528 return -E2BIG;
529
Dominik Dingela3a92c32014-12-01 17:24:42 +0100530 if (!new_limit)
531 return -EINVAL;
532
533 /* gmap_alloc takes last usable address */
534 if (new_limit != KVM_S390_NO_MEM_LIMIT)
535 new_limit -= 1;
536
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100537 ret = -EBUSY;
538 mutex_lock(&kvm->lock);
539 if (atomic_read(&kvm->online_vcpus) == 0) {
540 /* gmap_alloc will round the limit up */
541 struct gmap *new = gmap_alloc(current->mm, new_limit);
542
543 if (!new) {
544 ret = -ENOMEM;
545 } else {
546 gmap_free(kvm->arch.gmap);
547 new->private = kvm;
548 kvm->arch.gmap = new;
549 ret = 0;
550 }
551 }
552 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100553 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
554 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
555 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100556 break;
557 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200558 default:
559 ret = -ENXIO;
560 break;
561 }
562 return ret;
563}
564
Tony Krowiaka374e892014-09-03 10:13:53 +0200565static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
566
567static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
568{
569 struct kvm_vcpu *vcpu;
570 int i;
571
Michael Mueller9d8d5782015-02-02 15:42:51 +0100572 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200573 return -EINVAL;
574
575 mutex_lock(&kvm->lock);
576 switch (attr->attr) {
577 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
578 get_random_bytes(
579 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
580 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
581 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200582 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200583 break;
584 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
585 get_random_bytes(
586 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
587 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
588 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200589 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200590 break;
591 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
592 kvm->arch.crypto.aes_kw = 0;
593 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
594 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200595 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200596 break;
597 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
598 kvm->arch.crypto.dea_kw = 0;
599 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
600 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200601 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200602 break;
603 default:
604 mutex_unlock(&kvm->lock);
605 return -ENXIO;
606 }
607
608 kvm_for_each_vcpu(i, vcpu, kvm) {
609 kvm_s390_vcpu_crypto_setup(vcpu);
610 exit_sie(vcpu);
611 }
612 mutex_unlock(&kvm->lock);
613 return 0;
614}
615
Jason J. Herne72f25022014-11-25 09:46:02 -0500616static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
617{
618 u8 gtod_high;
619
620 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
621 sizeof(gtod_high)))
622 return -EFAULT;
623
624 if (gtod_high != 0)
625 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200626 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500627
628 return 0;
629}
630
631static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
632{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200633 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500634
635 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
636 return -EFAULT;
637
David Hildenbrand25ed1672015-05-12 09:49:14 +0200638 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200639 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500640 return 0;
641}
642
643static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
644{
645 int ret;
646
647 if (attr->flags)
648 return -EINVAL;
649
650 switch (attr->attr) {
651 case KVM_S390_VM_TOD_HIGH:
652 ret = kvm_s390_set_tod_high(kvm, attr);
653 break;
654 case KVM_S390_VM_TOD_LOW:
655 ret = kvm_s390_set_tod_low(kvm, attr);
656 break;
657 default:
658 ret = -ENXIO;
659 break;
660 }
661 return ret;
662}
663
664static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
665{
666 u8 gtod_high = 0;
667
668 if (copy_to_user((void __user *)attr->addr, &gtod_high,
669 sizeof(gtod_high)))
670 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200671 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500672
673 return 0;
674}
675
676static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
677{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200678 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500679
David Hildenbrand60417fc2015-09-29 16:20:36 +0200680 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500681 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
682 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200683 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500684
685 return 0;
686}
687
688static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
689{
690 int ret;
691
692 if (attr->flags)
693 return -EINVAL;
694
695 switch (attr->attr) {
696 case KVM_S390_VM_TOD_HIGH:
697 ret = kvm_s390_get_tod_high(kvm, attr);
698 break;
699 case KVM_S390_VM_TOD_LOW:
700 ret = kvm_s390_get_tod_low(kvm, attr);
701 break;
702 default:
703 ret = -ENXIO;
704 break;
705 }
706 return ret;
707}
708
Michael Mueller658b6ed2015-02-02 15:49:35 +0100709static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
710{
711 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200712 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100713 int ret = 0;
714
715 mutex_lock(&kvm->lock);
716 if (atomic_read(&kvm->online_vcpus)) {
717 ret = -EBUSY;
718 goto out;
719 }
720 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
721 if (!proc) {
722 ret = -ENOMEM;
723 goto out;
724 }
725 if (!copy_from_user(proc, (void __user *)attr->addr,
726 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200727 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200728 lowest_ibc = sclp.ibc >> 16 & 0xfff;
729 unblocked_ibc = sclp.ibc & 0xfff;
730 if (lowest_ibc) {
731 if (proc->ibc > unblocked_ibc)
732 kvm->arch.model.ibc = unblocked_ibc;
733 else if (proc->ibc < lowest_ibc)
734 kvm->arch.model.ibc = lowest_ibc;
735 else
736 kvm->arch.model.ibc = proc->ibc;
737 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100738 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100739 S390_ARCH_FAC_LIST_SIZE_BYTE);
740 } else
741 ret = -EFAULT;
742 kfree(proc);
743out:
744 mutex_unlock(&kvm->lock);
745 return ret;
746}
747
David Hildenbrand15c97052015-03-19 17:36:43 +0100748static int kvm_s390_set_processor_feat(struct kvm *kvm,
749 struct kvm_device_attr *attr)
750{
751 struct kvm_s390_vm_cpu_feat data;
752 int ret = -EBUSY;
753
754 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
755 return -EFAULT;
756 if (!bitmap_subset((unsigned long *) data.feat,
757 kvm_s390_available_cpu_feat,
758 KVM_S390_VM_CPU_FEAT_NR_BITS))
759 return -EINVAL;
760
761 mutex_lock(&kvm->lock);
762 if (!atomic_read(&kvm->online_vcpus)) {
763 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
764 KVM_S390_VM_CPU_FEAT_NR_BITS);
765 ret = 0;
766 }
767 mutex_unlock(&kvm->lock);
768 return ret;
769}
770
David Hildenbrand0a763c72016-05-18 16:03:47 +0200771static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
772 struct kvm_device_attr *attr)
773{
774 /*
775 * Once supported by kernel + hw, we have to store the subfunctions
776 * in kvm->arch and remember that user space configured them.
777 */
778 return -ENXIO;
779}
780
Michael Mueller658b6ed2015-02-02 15:49:35 +0100781static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
782{
783 int ret = -ENXIO;
784
785 switch (attr->attr) {
786 case KVM_S390_VM_CPU_PROCESSOR:
787 ret = kvm_s390_set_processor(kvm, attr);
788 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100789 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
790 ret = kvm_s390_set_processor_feat(kvm, attr);
791 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200792 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
793 ret = kvm_s390_set_processor_subfunc(kvm, attr);
794 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100795 }
796 return ret;
797}
798
799static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
800{
801 struct kvm_s390_vm_cpu_processor *proc;
802 int ret = 0;
803
804 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
805 if (!proc) {
806 ret = -ENOMEM;
807 goto out;
808 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200809 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100810 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100811 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
812 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100813 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
814 ret = -EFAULT;
815 kfree(proc);
816out:
817 return ret;
818}
819
820static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
821{
822 struct kvm_s390_vm_cpu_machine *mach;
823 int ret = 0;
824
825 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
826 if (!mach) {
827 ret = -ENOMEM;
828 goto out;
829 }
830 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200831 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100832 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +0100833 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100834 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100835 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100836 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
837 ret = -EFAULT;
838 kfree(mach);
839out:
840 return ret;
841}
842
David Hildenbrand15c97052015-03-19 17:36:43 +0100843static int kvm_s390_get_processor_feat(struct kvm *kvm,
844 struct kvm_device_attr *attr)
845{
846 struct kvm_s390_vm_cpu_feat data;
847
848 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
849 KVM_S390_VM_CPU_FEAT_NR_BITS);
850 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
851 return -EFAULT;
852 return 0;
853}
854
855static int kvm_s390_get_machine_feat(struct kvm *kvm,
856 struct kvm_device_attr *attr)
857{
858 struct kvm_s390_vm_cpu_feat data;
859
860 bitmap_copy((unsigned long *) data.feat,
861 kvm_s390_available_cpu_feat,
862 KVM_S390_VM_CPU_FEAT_NR_BITS);
863 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
864 return -EFAULT;
865 return 0;
866}
867
David Hildenbrand0a763c72016-05-18 16:03:47 +0200868static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
869 struct kvm_device_attr *attr)
870{
871 /*
872 * Once we can actually configure subfunctions (kernel + hw support),
873 * we have to check if they were already set by user space, if so copy
874 * them from kvm->arch.
875 */
876 return -ENXIO;
877}
878
879static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
880 struct kvm_device_attr *attr)
881{
882 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
883 sizeof(struct kvm_s390_vm_cpu_subfunc)))
884 return -EFAULT;
885 return 0;
886}
Michael Mueller658b6ed2015-02-02 15:49:35 +0100887static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
888{
889 int ret = -ENXIO;
890
891 switch (attr->attr) {
892 case KVM_S390_VM_CPU_PROCESSOR:
893 ret = kvm_s390_get_processor(kvm, attr);
894 break;
895 case KVM_S390_VM_CPU_MACHINE:
896 ret = kvm_s390_get_machine(kvm, attr);
897 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100898 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
899 ret = kvm_s390_get_processor_feat(kvm, attr);
900 break;
901 case KVM_S390_VM_CPU_MACHINE_FEAT:
902 ret = kvm_s390_get_machine_feat(kvm, attr);
903 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200904 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
905 ret = kvm_s390_get_processor_subfunc(kvm, attr);
906 break;
907 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
908 ret = kvm_s390_get_machine_subfunc(kvm, attr);
909 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100910 }
911 return ret;
912}
913
Dominik Dingelf2061652014-04-09 13:13:00 +0200914static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
915{
916 int ret;
917
918 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200919 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100920 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200921 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500922 case KVM_S390_VM_TOD:
923 ret = kvm_s390_set_tod(kvm, attr);
924 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100925 case KVM_S390_VM_CPU_MODEL:
926 ret = kvm_s390_set_cpu_model(kvm, attr);
927 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200928 case KVM_S390_VM_CRYPTO:
929 ret = kvm_s390_vm_set_crypto(kvm, attr);
930 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200931 default:
932 ret = -ENXIO;
933 break;
934 }
935
936 return ret;
937}
938
939static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
940{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100941 int ret;
942
943 switch (attr->group) {
944 case KVM_S390_VM_MEM_CTRL:
945 ret = kvm_s390_get_mem_control(kvm, attr);
946 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500947 case KVM_S390_VM_TOD:
948 ret = kvm_s390_get_tod(kvm, attr);
949 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100950 case KVM_S390_VM_CPU_MODEL:
951 ret = kvm_s390_get_cpu_model(kvm, attr);
952 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100953 default:
954 ret = -ENXIO;
955 break;
956 }
957
958 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200959}
960
961static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
962{
963 int ret;
964
965 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200966 case KVM_S390_VM_MEM_CTRL:
967 switch (attr->attr) {
968 case KVM_S390_VM_MEM_ENABLE_CMMA:
969 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100970 ret = sclp.has_cmma ? 0 : -ENXIO;
971 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100972 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200973 ret = 0;
974 break;
975 default:
976 ret = -ENXIO;
977 break;
978 }
979 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500980 case KVM_S390_VM_TOD:
981 switch (attr->attr) {
982 case KVM_S390_VM_TOD_LOW:
983 case KVM_S390_VM_TOD_HIGH:
984 ret = 0;
985 break;
986 default:
987 ret = -ENXIO;
988 break;
989 }
990 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100991 case KVM_S390_VM_CPU_MODEL:
992 switch (attr->attr) {
993 case KVM_S390_VM_CPU_PROCESSOR:
994 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +0100995 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
996 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +0200997 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +0100998 ret = 0;
999 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001000 /* configuring subfunctions is not supported yet */
1001 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001002 default:
1003 ret = -ENXIO;
1004 break;
1005 }
1006 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001007 case KVM_S390_VM_CRYPTO:
1008 switch (attr->attr) {
1009 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1010 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1011 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1012 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1013 ret = 0;
1014 break;
1015 default:
1016 ret = -ENXIO;
1017 break;
1018 }
1019 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001020 default:
1021 ret = -ENXIO;
1022 break;
1023 }
1024
1025 return ret;
1026}
1027
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001028static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1029{
1030 uint8_t *keys;
1031 uint64_t hva;
1032 unsigned long curkey;
1033 int i, r = 0;
1034
1035 if (args->flags != 0)
1036 return -EINVAL;
1037
1038 /* Is this guest using storage keys? */
1039 if (!mm_use_skey(current->mm))
1040 return KVM_S390_GET_SKEYS_NONE;
1041
1042 /* Enforce sane limit on memory allocation */
1043 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1044 return -EINVAL;
1045
1046 keys = kmalloc_array(args->count, sizeof(uint8_t),
1047 GFP_KERNEL | __GFP_NOWARN);
1048 if (!keys)
1049 keys = vmalloc(sizeof(uint8_t) * args->count);
1050 if (!keys)
1051 return -ENOMEM;
1052
1053 for (i = 0; i < args->count; i++) {
1054 hva = gfn_to_hva(kvm, args->start_gfn + i);
1055 if (kvm_is_error_hva(hva)) {
1056 r = -EFAULT;
1057 goto out;
1058 }
1059
1060 curkey = get_guest_storage_key(current->mm, hva);
1061 if (IS_ERR_VALUE(curkey)) {
1062 r = curkey;
1063 goto out;
1064 }
1065 keys[i] = curkey;
1066 }
1067
1068 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1069 sizeof(uint8_t) * args->count);
1070 if (r)
1071 r = -EFAULT;
1072out:
1073 kvfree(keys);
1074 return r;
1075}
1076
1077static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1078{
1079 uint8_t *keys;
1080 uint64_t hva;
1081 int i, r = 0;
1082
1083 if (args->flags != 0)
1084 return -EINVAL;
1085
1086 /* Enforce sane limit on memory allocation */
1087 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1088 return -EINVAL;
1089
1090 keys = kmalloc_array(args->count, sizeof(uint8_t),
1091 GFP_KERNEL | __GFP_NOWARN);
1092 if (!keys)
1093 keys = vmalloc(sizeof(uint8_t) * args->count);
1094 if (!keys)
1095 return -ENOMEM;
1096
1097 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1098 sizeof(uint8_t) * args->count);
1099 if (r) {
1100 r = -EFAULT;
1101 goto out;
1102 }
1103
1104 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001105 r = s390_enable_skey();
1106 if (r)
1107 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001108
1109 for (i = 0; i < args->count; i++) {
1110 hva = gfn_to_hva(kvm, args->start_gfn + i);
1111 if (kvm_is_error_hva(hva)) {
1112 r = -EFAULT;
1113 goto out;
1114 }
1115
1116 /* Lowest order bit is reserved */
1117 if (keys[i] & 0x01) {
1118 r = -EINVAL;
1119 goto out;
1120 }
1121
1122 r = set_guest_storage_key(current->mm, hva,
1123 (unsigned long)keys[i], 0);
1124 if (r)
1125 goto out;
1126 }
1127out:
1128 kvfree(keys);
1129 return r;
1130}
1131
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001132long kvm_arch_vm_ioctl(struct file *filp,
1133 unsigned int ioctl, unsigned long arg)
1134{
1135 struct kvm *kvm = filp->private_data;
1136 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001137 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001138 int r;
1139
1140 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001141 case KVM_S390_INTERRUPT: {
1142 struct kvm_s390_interrupt s390int;
1143
1144 r = -EFAULT;
1145 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1146 break;
1147 r = kvm_s390_inject_vm(kvm, &s390int);
1148 break;
1149 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001150 case KVM_ENABLE_CAP: {
1151 struct kvm_enable_cap cap;
1152 r = -EFAULT;
1153 if (copy_from_user(&cap, argp, sizeof(cap)))
1154 break;
1155 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1156 break;
1157 }
Cornelia Huck84223592013-07-15 13:36:01 +02001158 case KVM_CREATE_IRQCHIP: {
1159 struct kvm_irq_routing_entry routing;
1160
1161 r = -EINVAL;
1162 if (kvm->arch.use_irqchip) {
1163 /* Set up dummy routing. */
1164 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001165 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001166 }
1167 break;
1168 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001169 case KVM_SET_DEVICE_ATTR: {
1170 r = -EFAULT;
1171 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1172 break;
1173 r = kvm_s390_vm_set_attr(kvm, &attr);
1174 break;
1175 }
1176 case KVM_GET_DEVICE_ATTR: {
1177 r = -EFAULT;
1178 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1179 break;
1180 r = kvm_s390_vm_get_attr(kvm, &attr);
1181 break;
1182 }
1183 case KVM_HAS_DEVICE_ATTR: {
1184 r = -EFAULT;
1185 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1186 break;
1187 r = kvm_s390_vm_has_attr(kvm, &attr);
1188 break;
1189 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001190 case KVM_S390_GET_SKEYS: {
1191 struct kvm_s390_skeys args;
1192
1193 r = -EFAULT;
1194 if (copy_from_user(&args, argp,
1195 sizeof(struct kvm_s390_skeys)))
1196 break;
1197 r = kvm_s390_get_skeys(kvm, &args);
1198 break;
1199 }
1200 case KVM_S390_SET_SKEYS: {
1201 struct kvm_s390_skeys args;
1202
1203 r = -EFAULT;
1204 if (copy_from_user(&args, argp,
1205 sizeof(struct kvm_s390_skeys)))
1206 break;
1207 r = kvm_s390_set_skeys(kvm, &args);
1208 break;
1209 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001210 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001211 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001212 }
1213
1214 return r;
1215}
1216
Tony Krowiak45c9b472015-01-13 11:33:26 -05001217static int kvm_s390_query_ap_config(u8 *config)
1218{
1219 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001220 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001221
Christian Borntraeger86044c82015-02-26 13:53:47 +01001222 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001223 asm volatile(
1224 "lgr 0,%1\n"
1225 "lgr 2,%2\n"
1226 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001227 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001228 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001229 "1:\n"
1230 EX_TABLE(0b, 1b)
1231 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001232 : "r" (fcn_code), "r" (config)
1233 : "cc", "0", "2", "memory"
1234 );
1235
1236 return cc;
1237}
1238
1239static int kvm_s390_apxa_installed(void)
1240{
1241 u8 config[128];
1242 int cc;
1243
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001244 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001245 cc = kvm_s390_query_ap_config(config);
1246
1247 if (cc)
1248 pr_err("PQAP(QCI) failed with cc=%d", cc);
1249 else
1250 return config[0] & 0x40;
1251 }
1252
1253 return 0;
1254}
1255
1256static void kvm_s390_set_crycb_format(struct kvm *kvm)
1257{
1258 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1259
1260 if (kvm_s390_apxa_installed())
1261 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1262 else
1263 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1264}
1265
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001266static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001267{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001268 struct cpuid cpuid;
1269
1270 get_cpu_id(&cpuid);
1271 cpuid.version = 0xff;
1272 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001273}
1274
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001275static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001276{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001277 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001278 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001279
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001280 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001281 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001282
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001283 /* Enable AES/DEA protected key functions by default */
1284 kvm->arch.crypto.aes_kw = 1;
1285 kvm->arch.crypto.dea_kw = 1;
1286 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1287 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1288 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1289 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001290}
1291
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001292static void sca_dispose(struct kvm *kvm)
1293{
1294 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001295 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001296 else
1297 free_page((unsigned long)(kvm->arch.sca));
1298 kvm->arch.sca = NULL;
1299}
1300
Carsten Ottee08b9632012-01-04 10:25:20 +01001301int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001302{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001303 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001304 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001305 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001306 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001307
Carsten Ottee08b9632012-01-04 10:25:20 +01001308 rc = -EINVAL;
1309#ifdef CONFIG_KVM_S390_UCONTROL
1310 if (type & ~KVM_VM_S390_UCONTROL)
1311 goto out_err;
1312 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1313 goto out_err;
1314#else
1315 if (type)
1316 goto out_err;
1317#endif
1318
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001319 rc = s390_enable_sie();
1320 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001321 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001322
Carsten Otteb2904112011-10-18 12:27:13 +02001323 rc = -ENOMEM;
1324
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001325 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1326
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001327 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001328 if (!sclp.has_64bscao)
1329 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001330 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001331 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001332 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001333 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001334 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001335 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001336 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001337 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001338 kvm->arch.sca = (struct bsca_block *)
1339 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001340 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001341
1342 sprintf(debug_name, "kvm-%u", current->pid);
1343
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001344 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001345 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001346 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001347
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001348 kvm->arch.sie_page2 =
1349 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1350 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001351 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001352
Michael Muellerfb5bf932015-02-27 14:25:10 +01001353 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001354 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001355 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001356 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1357 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001358 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001359 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001360 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001361 }
1362
Michael Mueller981467c2015-02-24 13:51:04 +01001363 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001364 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1365 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001366 S390_ARCH_FAC_LIST_SIZE_BYTE);
1367
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001368 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1369 set_kvm_facility(kvm->arch.model.fac_list, 74);
1370
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001371 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001372 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001373
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001374 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001375
Carsten Otteba5c1e92008-03-25 18:47:26 +01001376 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001377 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1378 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001379 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001380 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001381
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001382 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001383 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001384
Carsten Ottee08b9632012-01-04 10:25:20 +01001385 if (type & KVM_VM_S390_UCONTROL) {
1386 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001387 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001388 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001389 if (sclp.hamax == U64_MAX)
1390 kvm->arch.mem_limit = TASK_MAX_SIZE;
1391 else
1392 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1393 sclp.hamax + 1);
Dominik Dingela3a92c32014-12-01 17:24:42 +01001394 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001395 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001396 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001397 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001398 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001399 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001400
1401 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001402 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001403 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001404
David Hildenbrand8ad35752014-03-14 11:00:21 +01001405 spin_lock_init(&kvm->arch.start_stop_lock);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001406 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001407
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001408 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001409out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001410 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001411 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001412 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001413 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001414 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001415}
1416
Christian Borntraegerd329c032008-11-26 14:50:27 +01001417void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1418{
1419 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001420 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001421 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001422 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001423 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001424 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001425
1426 if (kvm_is_ucontrol(vcpu->kvm))
1427 gmap_free(vcpu->arch.gmap);
1428
Dominik Dingele6db1d62015-05-07 15:41:57 +02001429 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001430 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001431 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001432
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001433 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001434 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001435}
1436
1437static void kvm_free_vcpus(struct kvm *kvm)
1438{
1439 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001440 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001441
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001442 kvm_for_each_vcpu(i, vcpu, kvm)
1443 kvm_arch_vcpu_destroy(vcpu);
1444
1445 mutex_lock(&kvm->lock);
1446 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1447 kvm->vcpus[i] = NULL;
1448
1449 atomic_set(&kvm->online_vcpus, 0);
1450 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001451}
1452
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001453void kvm_arch_destroy_vm(struct kvm *kvm)
1454{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001455 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001456 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001457 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001458 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001459 if (!kvm_is_ucontrol(kvm))
1460 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001461 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001462 kvm_s390_clear_float_irqs(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001463 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001464}
1465
1466/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001467static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1468{
1469 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1470 if (!vcpu->arch.gmap)
1471 return -ENOMEM;
1472 vcpu->arch.gmap->private = vcpu->kvm;
1473
1474 return 0;
1475}
1476
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001477static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1478{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001479 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001480 if (vcpu->kvm->arch.use_esca) {
1481 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001482
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001483 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001484 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001485 } else {
1486 struct bsca_block *sca = vcpu->kvm->arch.sca;
1487
1488 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001489 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001490 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001491 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001492}
1493
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001494static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001495{
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001496 read_lock(&vcpu->kvm->arch.sca_lock);
1497 if (vcpu->kvm->arch.use_esca) {
1498 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001499
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001500 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001501 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1502 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand25508822015-10-12 16:27:23 +02001503 vcpu->arch.sie_block->ecb2 |= 0x04U;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001504 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001505 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001506 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001507
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001508 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001509 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1510 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001511 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001512 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001513 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001514}
1515
1516/* Basic SCA to Extended SCA data copy routines */
1517static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1518{
1519 d->sda = s->sda;
1520 d->sigp_ctrl.c = s->sigp_ctrl.c;
1521 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1522}
1523
1524static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1525{
1526 int i;
1527
1528 d->ipte_control = s->ipte_control;
1529 d->mcn[0] = s->mcn;
1530 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1531 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1532}
1533
1534static int sca_switch_to_extended(struct kvm *kvm)
1535{
1536 struct bsca_block *old_sca = kvm->arch.sca;
1537 struct esca_block *new_sca;
1538 struct kvm_vcpu *vcpu;
1539 unsigned int vcpu_idx;
1540 u32 scaol, scaoh;
1541
1542 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1543 if (!new_sca)
1544 return -ENOMEM;
1545
1546 scaoh = (u32)((u64)(new_sca) >> 32);
1547 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1548
1549 kvm_s390_vcpu_block_all(kvm);
1550 write_lock(&kvm->arch.sca_lock);
1551
1552 sca_copy_b_to_e(new_sca, old_sca);
1553
1554 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1555 vcpu->arch.sie_block->scaoh = scaoh;
1556 vcpu->arch.sie_block->scaol = scaol;
1557 vcpu->arch.sie_block->ecb2 |= 0x04U;
1558 }
1559 kvm->arch.sca = new_sca;
1560 kvm->arch.use_esca = 1;
1561
1562 write_unlock(&kvm->arch.sca_lock);
1563 kvm_s390_vcpu_unblock_all(kvm);
1564
1565 free_page((unsigned long)old_sca);
1566
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001567 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1568 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001569 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001570}
1571
1572static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1573{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001574 int rc;
1575
1576 if (id < KVM_S390_BSCA_CPU_SLOTS)
1577 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001578 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001579 return false;
1580
1581 mutex_lock(&kvm->lock);
1582 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1583 mutex_unlock(&kvm->lock);
1584
1585 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001586}
1587
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001588int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1589{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001590 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1591 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001592 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1593 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001594 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001595 KVM_SYNC_CRS |
1596 KVM_SYNC_ARCH0 |
1597 KVM_SYNC_PFAULT;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001598 if (test_kvm_facility(vcpu->kvm, 64))
1599 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01001600 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1601 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1602 */
1603 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04001604 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001605 else
1606 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001607
1608 if (kvm_is_ucontrol(vcpu->kvm))
1609 return __kvm_ucontrol_vcpu_init(vcpu);
1610
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001611 return 0;
1612}
1613
David Hildenbranddb0758b2016-02-15 09:42:25 +01001614/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1615static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1616{
1617 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001618 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001619 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01001620 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001621}
1622
1623/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1624static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1625{
1626 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001627 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001628 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1629 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001630 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001631}
1632
1633/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1634static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1635{
1636 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1637 vcpu->arch.cputm_enabled = true;
1638 __start_cpu_timer_accounting(vcpu);
1639}
1640
1641/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1642static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1643{
1644 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1645 __stop_cpu_timer_accounting(vcpu);
1646 vcpu->arch.cputm_enabled = false;
1647}
1648
1649static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1650{
1651 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1652 __enable_cpu_timer_accounting(vcpu);
1653 preempt_enable();
1654}
1655
1656static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1657{
1658 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1659 __disable_cpu_timer_accounting(vcpu);
1660 preempt_enable();
1661}
1662
David Hildenbrand4287f242016-02-15 09:40:12 +01001663/* set the cpu timer - may only be called from the VCPU thread itself */
1664void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1665{
David Hildenbranddb0758b2016-02-15 09:42:25 +01001666 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01001667 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001668 if (vcpu->arch.cputm_enabled)
1669 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01001670 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001671 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001672 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01001673}
1674
David Hildenbranddb0758b2016-02-15 09:42:25 +01001675/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01001676__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1677{
David Hildenbrand9c23a132016-02-17 21:53:33 +01001678 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001679 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001680
1681 if (unlikely(!vcpu->arch.cputm_enabled))
1682 return vcpu->arch.sie_block->cputm;
1683
David Hildenbrand9c23a132016-02-17 21:53:33 +01001684 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1685 do {
1686 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1687 /*
1688 * If the writer would ever execute a read in the critical
1689 * section, e.g. in irq context, we have a deadlock.
1690 */
1691 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1692 value = vcpu->arch.sie_block->cputm;
1693 /* if cputm_start is 0, accounting is being started/stopped */
1694 if (likely(vcpu->arch.cputm_start))
1695 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1696 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1697 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01001698 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01001699}
1700
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001701void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1702{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001703 /* Save host register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001704 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001705 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1706 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001707
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001708 if (MACHINE_HAS_VX)
1709 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1710 else
1711 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001712 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001713 if (test_fp_ctl(current->thread.fpu.fpc))
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001714 /* User space provided an invalid FPC, let's clear it */
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001715 current->thread.fpu.fpc = 0;
1716
1717 save_access_regs(vcpu->arch.host_acrs);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001718 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001719 gmap_enable(vcpu->arch.gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001720 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01001721 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001722 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01001723 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001724}
1725
1726void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1727{
David Hildenbrand01a745a2016-02-12 20:41:56 +01001728 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01001729 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001730 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001731 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001732 gmap_disable(vcpu->arch.gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001733
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001734 /* Save guest register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001735 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001736 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001737
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001738 /* Restore host register state */
1739 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1740 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001741
1742 save_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001743 restore_access_regs(vcpu->arch.host_acrs);
1744}
1745
1746static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1747{
1748 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1749 vcpu->arch.sie_block->gpsw.mask = 0UL;
1750 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001751 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01001752 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001753 vcpu->arch.sie_block->ckc = 0UL;
1754 vcpu->arch.sie_block->todpr = 0;
1755 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1756 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1757 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001758 /* make sure the new fpc will be lazily loaded */
1759 save_fpu_regs();
1760 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001761 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001762 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001763 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1764 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001765 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1766 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001767 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001768}
1769
Dominik Dingel31928aa2014-12-04 15:47:07 +01001770void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001771{
Jason J. Herne72f25022014-11-25 09:46:02 -05001772 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001773 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001774 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001775 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001776 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02001777 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01001778 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001779 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02001780 }
1781
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001782}
1783
Tony Krowiak5102ee82014-06-27 14:46:01 -04001784static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1785{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001786 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001787 return;
1788
Tony Krowiaka374e892014-09-03 10:13:53 +02001789 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1790
1791 if (vcpu->kvm->arch.crypto.aes_kw)
1792 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1793 if (vcpu->kvm->arch.crypto.dea_kw)
1794 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1795
Tony Krowiak5102ee82014-06-27 14:46:01 -04001796 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1797}
1798
Dominik Dingelb31605c2014-03-25 13:47:11 +01001799void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1800{
1801 free_page(vcpu->arch.sie_block->cbrlo);
1802 vcpu->arch.sie_block->cbrlo = 0;
1803}
1804
1805int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1806{
1807 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1808 if (!vcpu->arch.sie_block->cbrlo)
1809 return -ENOMEM;
1810
1811 vcpu->arch.sie_block->ecb2 |= 0x80;
1812 vcpu->arch.sie_block->ecb2 &= ~0x08;
1813 return 0;
1814}
1815
Michael Mueller91520f12015-02-27 14:32:11 +01001816static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1817{
1818 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1819
Michael Mueller91520f12015-02-27 14:32:11 +01001820 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01001821 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001822 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01001823}
1824
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001825int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1826{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001827 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001828
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001829 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1830 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001831 CPUSTAT_STOPPED);
1832
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001833 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001834 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001835 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001836 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001837
Michael Mueller91520f12015-02-27 14:32:11 +01001838 kvm_s390_vcpu_setup_model(vcpu);
1839
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01001840 vcpu->arch.sie_block->ecb = 0x02;
1841 if (test_kvm_facility(vcpu->kvm, 9))
1842 vcpu->arch.sie_block->ecb |= 0x04;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001843 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001844 vcpu->arch.sie_block->ecb |= 0x10;
1845
David Hildenbrandd6af0b42016-03-04 11:55:56 +01001846 if (test_kvm_facility(vcpu->kvm, 8))
1847 vcpu->arch.sie_block->ecb2 |= 0x08;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001848 vcpu->arch.sie_block->eca = 0xC1002000U;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001849 if (sclp.has_siif)
Heiko Carstens217a4402013-12-30 12:54:14 +01001850 vcpu->arch.sie_block->eca |= 1;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001851 if (sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001852 vcpu->arch.sie_block->eca |= 0x10000000U;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001853 if (test_kvm_facility(vcpu->kvm, 64))
1854 vcpu->arch.sie_block->ecb3 |= 0x01;
Michael Mueller18280d82015-03-16 16:05:41 +01001855 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001856 vcpu->arch.sie_block->eca |= 0x00020000;
1857 vcpu->arch.sie_block->ecd |= 0x20000000;
1858 }
Fan Zhangc6e5f162016-01-07 18:24:29 +08001859 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Thomas Huth492d8642015-02-10 16:11:01 +01001860 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001861 if (test_kvm_facility(vcpu->kvm, 74))
1862 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001863
Dominik Dingele6db1d62015-05-07 15:41:57 +02001864 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01001865 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1866 if (rc)
1867 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001868 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001869 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001870 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001871
Tony Krowiak5102ee82014-06-27 14:46:01 -04001872 kvm_s390_vcpu_crypto_setup(vcpu);
1873
Dominik Dingelb31605c2014-03-25 13:47:11 +01001874 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001875}
1876
1877struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1878 unsigned int id)
1879{
Carsten Otte4d475552011-10-18 12:27:12 +02001880 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001881 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001882 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001883
David Hildenbrand42158252015-10-12 12:57:22 +02001884 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02001885 goto out;
1886
1887 rc = -ENOMEM;
1888
Michael Muellerb110fea2013-06-12 13:54:54 +02001889 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001890 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001891 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001892
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001893 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1894 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001895 goto out_free_cpu;
1896
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001897 vcpu->arch.sie_block = &sie_page->sie_block;
1898 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1899
David Hildenbrandefed1102015-04-16 12:32:41 +02001900 /* the real guest size will always be smaller than msl */
1901 vcpu->arch.sie_block->mso = 0;
1902 vcpu->arch.sie_block->msl = sclp.hamax;
1903
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001904 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001905 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001906 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001907 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001908 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001909 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001910
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001911 rc = kvm_vcpu_init(vcpu, kvm, id);
1912 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001913 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001914 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001915 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001916 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001917
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001918 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001919out_free_sie_block:
1920 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001921out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001922 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001923out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001924 return ERR_PTR(rc);
1925}
1926
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001927int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1928{
David Hildenbrand9a022062014-08-05 17:40:47 +02001929 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001930}
1931
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001932void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001933{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001934 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001935 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001936}
1937
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001938void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001939{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001940 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001941}
1942
Christian Borntraeger8e236542015-04-09 13:49:04 +02001943static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1944{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001945 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001946 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001947}
1948
1949static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1950{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04001951 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001952}
1953
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001954/*
1955 * Kick a guest cpu out of SIE and wait until SIE is not running.
1956 * If the CPU is not running (e.g. waiting as idle) the function will
1957 * return immediately. */
1958void exit_sie(struct kvm_vcpu *vcpu)
1959{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001960 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001961 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1962 cpu_relax();
1963}
1964
Christian Borntraeger8e236542015-04-09 13:49:04 +02001965/* Kick a guest cpu out of SIE to process a request synchronously */
1966void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001967{
Christian Borntraeger8e236542015-04-09 13:49:04 +02001968 kvm_make_request(req, vcpu);
1969 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001970}
1971
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001972static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1973{
1974 int i;
1975 struct kvm *kvm = gmap->private;
1976 struct kvm_vcpu *vcpu;
1977
1978 kvm_for_each_vcpu(i, vcpu, kvm) {
1979 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001980 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001981 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001982 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001983 }
1984 }
1985}
1986
Christoffer Dallb6d33832012-03-08 16:44:24 -05001987int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1988{
1989 /* kvm common code refers to this, but never calls it */
1990 BUG();
1991 return 0;
1992}
1993
Carsten Otte14eebd92012-05-15 14:15:26 +02001994static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1995 struct kvm_one_reg *reg)
1996{
1997 int r = -EINVAL;
1998
1999 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002000 case KVM_REG_S390_TODPR:
2001 r = put_user(vcpu->arch.sie_block->todpr,
2002 (u32 __user *)reg->addr);
2003 break;
2004 case KVM_REG_S390_EPOCHDIFF:
2005 r = put_user(vcpu->arch.sie_block->epoch,
2006 (u64 __user *)reg->addr);
2007 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002008 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002009 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002010 (u64 __user *)reg->addr);
2011 break;
2012 case KVM_REG_S390_CLOCK_COMP:
2013 r = put_user(vcpu->arch.sie_block->ckc,
2014 (u64 __user *)reg->addr);
2015 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002016 case KVM_REG_S390_PFTOKEN:
2017 r = put_user(vcpu->arch.pfault_token,
2018 (u64 __user *)reg->addr);
2019 break;
2020 case KVM_REG_S390_PFCOMPARE:
2021 r = put_user(vcpu->arch.pfault_compare,
2022 (u64 __user *)reg->addr);
2023 break;
2024 case KVM_REG_S390_PFSELECT:
2025 r = put_user(vcpu->arch.pfault_select,
2026 (u64 __user *)reg->addr);
2027 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002028 case KVM_REG_S390_PP:
2029 r = put_user(vcpu->arch.sie_block->pp,
2030 (u64 __user *)reg->addr);
2031 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002032 case KVM_REG_S390_GBEA:
2033 r = put_user(vcpu->arch.sie_block->gbea,
2034 (u64 __user *)reg->addr);
2035 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002036 default:
2037 break;
2038 }
2039
2040 return r;
2041}
2042
2043static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2044 struct kvm_one_reg *reg)
2045{
2046 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002047 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002048
2049 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002050 case KVM_REG_S390_TODPR:
2051 r = get_user(vcpu->arch.sie_block->todpr,
2052 (u32 __user *)reg->addr);
2053 break;
2054 case KVM_REG_S390_EPOCHDIFF:
2055 r = get_user(vcpu->arch.sie_block->epoch,
2056 (u64 __user *)reg->addr);
2057 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002058 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002059 r = get_user(val, (u64 __user *)reg->addr);
2060 if (!r)
2061 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002062 break;
2063 case KVM_REG_S390_CLOCK_COMP:
2064 r = get_user(vcpu->arch.sie_block->ckc,
2065 (u64 __user *)reg->addr);
2066 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002067 case KVM_REG_S390_PFTOKEN:
2068 r = get_user(vcpu->arch.pfault_token,
2069 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002070 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2071 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002072 break;
2073 case KVM_REG_S390_PFCOMPARE:
2074 r = get_user(vcpu->arch.pfault_compare,
2075 (u64 __user *)reg->addr);
2076 break;
2077 case KVM_REG_S390_PFSELECT:
2078 r = get_user(vcpu->arch.pfault_select,
2079 (u64 __user *)reg->addr);
2080 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002081 case KVM_REG_S390_PP:
2082 r = get_user(vcpu->arch.sie_block->pp,
2083 (u64 __user *)reg->addr);
2084 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002085 case KVM_REG_S390_GBEA:
2086 r = get_user(vcpu->arch.sie_block->gbea,
2087 (u64 __user *)reg->addr);
2088 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002089 default:
2090 break;
2091 }
2092
2093 return r;
2094}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002095
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002096static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2097{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002098 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002099 return 0;
2100}
2101
2102int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2103{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002104 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002105 return 0;
2106}
2107
2108int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2109{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002110 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002111 return 0;
2112}
2113
2114int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2115 struct kvm_sregs *sregs)
2116{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002117 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002118 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01002119 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002120 return 0;
2121}
2122
2123int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2124 struct kvm_sregs *sregs)
2125{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002126 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002127 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002128 return 0;
2129}
2130
2131int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2132{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002133 /* make sure the new values will be lazily loaded */
2134 save_fpu_regs();
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002135 if (test_fp_ctl(fpu->fpc))
2136 return -EINVAL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002137 current->thread.fpu.fpc = fpu->fpc;
2138 if (MACHINE_HAS_VX)
2139 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2140 else
2141 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002142 return 0;
2143}
2144
2145int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2146{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002147 /* make sure we have the latest values */
2148 save_fpu_regs();
2149 if (MACHINE_HAS_VX)
2150 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2151 else
2152 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2153 fpu->fpc = current->thread.fpu.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002154 return 0;
2155}
2156
2157static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2158{
2159 int rc = 0;
2160
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002161 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002162 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002163 else {
2164 vcpu->run->psw_mask = psw.mask;
2165 vcpu->run->psw_addr = psw.addr;
2166 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002167 return rc;
2168}
2169
2170int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2171 struct kvm_translation *tr)
2172{
2173 return -EINVAL; /* not implemented yet */
2174}
2175
David Hildenbrand27291e22014-01-23 12:26:52 +01002176#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2177 KVM_GUESTDBG_USE_HW_BP | \
2178 KVM_GUESTDBG_ENABLE)
2179
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002180int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2181 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002182{
David Hildenbrand27291e22014-01-23 12:26:52 +01002183 int rc = 0;
2184
2185 vcpu->guest_debug = 0;
2186 kvm_s390_clear_bp_data(vcpu);
2187
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002188 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002189 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002190 if (!sclp.has_gpere)
2191 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002192
2193 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2194 vcpu->guest_debug = dbg->control;
2195 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002196 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002197
2198 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2199 rc = kvm_s390_import_bp_data(vcpu, dbg);
2200 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002201 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002202 vcpu->arch.guestdbg.last_bp = 0;
2203 }
2204
2205 if (rc) {
2206 vcpu->guest_debug = 0;
2207 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002208 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002209 }
2210
2211 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002212}
2213
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002214int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2215 struct kvm_mp_state *mp_state)
2216{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002217 /* CHECK_STOP and LOAD are not supported yet */
2218 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2219 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002220}
2221
2222int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2223 struct kvm_mp_state *mp_state)
2224{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002225 int rc = 0;
2226
2227 /* user space knows about this interface - let it control the state */
2228 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2229
2230 switch (mp_state->mp_state) {
2231 case KVM_MP_STATE_STOPPED:
2232 kvm_s390_vcpu_stop(vcpu);
2233 break;
2234 case KVM_MP_STATE_OPERATING:
2235 kvm_s390_vcpu_start(vcpu);
2236 break;
2237 case KVM_MP_STATE_LOAD:
2238 case KVM_MP_STATE_CHECK_STOP:
2239 /* fall through - CHECK_STOP and LOAD are not supported yet */
2240 default:
2241 rc = -ENXIO;
2242 }
2243
2244 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002245}
2246
David Hildenbrand8ad35752014-03-14 11:00:21 +01002247static bool ibs_enabled(struct kvm_vcpu *vcpu)
2248{
2249 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2250}
2251
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002252static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2253{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002254retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002255 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002256 if (!vcpu->requests)
2257 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002258 /*
2259 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2260 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2261 * This ensures that the ipte instruction for this request has
2262 * already finished. We might race against a second unmapper that
2263 * wants to set the blocking bit. Lets just retry the request loop.
2264 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002265 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002266 int rc;
2267 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02002268 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002269 PAGE_SIZE * 2);
2270 if (rc)
2271 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002272 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002273 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002274
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002275 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2276 vcpu->arch.sie_block->ihcpu = 0xffff;
2277 goto retry;
2278 }
2279
David Hildenbrand8ad35752014-03-14 11:00:21 +01002280 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2281 if (!ibs_enabled(vcpu)) {
2282 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002283 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002284 &vcpu->arch.sie_block->cpuflags);
2285 }
2286 goto retry;
2287 }
2288
2289 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2290 if (ibs_enabled(vcpu)) {
2291 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002292 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002293 &vcpu->arch.sie_block->cpuflags);
2294 }
2295 goto retry;
2296 }
2297
David Hildenbrand0759d062014-05-13 16:54:32 +02002298 /* nothing to do, just clear the request */
2299 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2300
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002301 return 0;
2302}
2303
David Hildenbrand25ed1672015-05-12 09:49:14 +02002304void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2305{
2306 struct kvm_vcpu *vcpu;
2307 int i;
2308
2309 mutex_lock(&kvm->lock);
2310 preempt_disable();
2311 kvm->arch.epoch = tod - get_tod_clock();
2312 kvm_s390_vcpu_block_all(kvm);
2313 kvm_for_each_vcpu(i, vcpu, kvm)
2314 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2315 kvm_s390_vcpu_unblock_all(kvm);
2316 preempt_enable();
2317 mutex_unlock(&kvm->lock);
2318}
2319
Thomas Huthfa576c52014-05-06 17:20:16 +02002320/**
2321 * kvm_arch_fault_in_page - fault-in guest page if necessary
2322 * @vcpu: The corresponding virtual cpu
2323 * @gpa: Guest physical address
2324 * @writable: Whether the page should be writable or not
2325 *
2326 * Make sure that a guest page has been faulted-in on the host.
2327 *
2328 * Return: Zero on success, negative error code otherwise.
2329 */
2330long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002331{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002332 return gmap_fault(vcpu->arch.gmap, gpa,
2333 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002334}
2335
Dominik Dingel3c038e62013-10-07 17:11:48 +02002336static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2337 unsigned long token)
2338{
2339 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002340 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002341
2342 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002343 irq.u.ext.ext_params2 = token;
2344 irq.type = KVM_S390_INT_PFAULT_INIT;
2345 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002346 } else {
2347 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002348 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002349 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2350 }
2351}
2352
2353void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2354 struct kvm_async_pf *work)
2355{
2356 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2357 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2358}
2359
2360void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2361 struct kvm_async_pf *work)
2362{
2363 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2364 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2365}
2366
2367void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2368 struct kvm_async_pf *work)
2369{
2370 /* s390 will always inject the page directly */
2371}
2372
2373bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2374{
2375 /*
2376 * s390 will always inject the page directly,
2377 * but we still want check_async_completion to cleanup
2378 */
2379 return true;
2380}
2381
2382static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2383{
2384 hva_t hva;
2385 struct kvm_arch_async_pf arch;
2386 int rc;
2387
2388 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2389 return 0;
2390 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2391 vcpu->arch.pfault_compare)
2392 return 0;
2393 if (psw_extint_disabled(vcpu))
2394 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002395 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002396 return 0;
2397 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2398 return 0;
2399 if (!vcpu->arch.gmap->pfault_enabled)
2400 return 0;
2401
Heiko Carstens81480cc2014-01-01 16:36:07 +01002402 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2403 hva += current->thread.gmap_addr & ~PAGE_MASK;
2404 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002405 return 0;
2406
2407 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2408 return rc;
2409}
2410
Thomas Huth3fb4c402013-09-12 10:33:43 +02002411static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002412{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002413 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002414
Dominik Dingel3c038e62013-10-07 17:11:48 +02002415 /*
2416 * On s390 notifications for arriving pages will be delivered directly
2417 * to the guest but the house keeping for completed pfaults is
2418 * handled outside the worker.
2419 */
2420 kvm_check_async_pf_completion(vcpu);
2421
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002422 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2423 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002424
2425 if (need_resched())
2426 schedule();
2427
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002428 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002429 s390_handle_mcck();
2430
Jens Freimann79395032014-04-17 10:10:30 +02002431 if (!kvm_is_ucontrol(vcpu->kvm)) {
2432 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2433 if (rc)
2434 return rc;
2435 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002436
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002437 rc = kvm_s390_handle_requests(vcpu);
2438 if (rc)
2439 return rc;
2440
David Hildenbrand27291e22014-01-23 12:26:52 +01002441 if (guestdbg_enabled(vcpu)) {
2442 kvm_s390_backup_guest_per_regs(vcpu);
2443 kvm_s390_patch_guest_per_regs(vcpu);
2444 }
2445
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002446 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002447 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2448 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2449 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002450
Thomas Huth3fb4c402013-09-12 10:33:43 +02002451 return 0;
2452}
2453
Thomas Huth492d8642015-02-10 16:11:01 +01002454static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2455{
David Hildenbrand56317922016-01-12 17:37:58 +01002456 struct kvm_s390_pgm_info pgm_info = {
2457 .code = PGM_ADDRESSING,
2458 };
2459 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01002460 int rc;
2461
2462 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2463 trace_kvm_s390_sie_fault(vcpu);
2464
2465 /*
2466 * We want to inject an addressing exception, which is defined as a
2467 * suppressing or terminating exception. However, since we came here
2468 * by a DAT access exception, the PSW still points to the faulting
2469 * instruction since DAT exceptions are nullifying. So we've got
2470 * to look up the current opcode to get the length of the instruction
2471 * to be able to forward the PSW.
2472 */
David Hildenbrand65977322015-11-16 16:17:45 +01002473 rc = read_guest_instr(vcpu, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01002474 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01002475 if (rc < 0) {
2476 return rc;
2477 } else if (rc) {
2478 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2479 * Forward by arbitrary ilc, injection will take care of
2480 * nullification if necessary.
2481 */
2482 pgm_info = vcpu->arch.pgm;
2483 ilen = 4;
2484 }
David Hildenbrand56317922016-01-12 17:37:58 +01002485 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2486 kvm_s390_forward_psw(vcpu, ilen);
2487 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01002488}
2489
Thomas Huth3fb4c402013-09-12 10:33:43 +02002490static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2491{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002492 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2493 vcpu->arch.sie_block->icptcode);
2494 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2495
David Hildenbrand27291e22014-01-23 12:26:52 +01002496 if (guestdbg_enabled(vcpu))
2497 kvm_s390_restore_guest_per_regs(vcpu);
2498
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002499 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2500 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002501
2502 if (vcpu->arch.sie_block->icptcode > 0) {
2503 int rc = kvm_handle_sie_intercept(vcpu);
2504
2505 if (rc != -EOPNOTSUPP)
2506 return rc;
2507 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2508 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2509 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2510 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2511 return -EREMOTE;
2512 } else if (exit_reason != -EFAULT) {
2513 vcpu->stat.exit_null++;
2514 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002515 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2516 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2517 vcpu->run->s390_ucontrol.trans_exc_code =
2518 current->thread.gmap_addr;
2519 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002520 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002521 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002522 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002523 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002524 if (kvm_arch_setup_async_pf(vcpu))
2525 return 0;
2526 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002527 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02002528 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002529}
2530
2531static int __vcpu_run(struct kvm_vcpu *vcpu)
2532{
2533 int rc, exit_reason;
2534
Thomas Huth800c1062013-09-12 10:33:45 +02002535 /*
2536 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2537 * ning the guest), so that memslots (and other stuff) are protected
2538 */
2539 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2540
Thomas Hutha76ccff2013-09-12 10:33:44 +02002541 do {
2542 rc = vcpu_pre_run(vcpu);
2543 if (rc)
2544 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002545
Thomas Huth800c1062013-09-12 10:33:45 +02002546 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002547 /*
2548 * As PF_VCPU will be used in fault handler, between
2549 * guest_enter and guest_exit should be no uaccess.
2550 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002551 local_irq_disable();
2552 __kvm_guest_enter();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002553 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002554 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002555 exit_reason = sie64a(vcpu->arch.sie_block,
2556 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002557 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002558 __enable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002559 __kvm_guest_exit();
2560 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002561 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002562
Thomas Hutha76ccff2013-09-12 10:33:44 +02002563 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002564 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002565
Thomas Huth800c1062013-09-12 10:33:45 +02002566 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002567 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002568}
2569
David Hildenbrandb028ee32014-07-17 10:47:43 +02002570static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2571{
2572 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2573 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2574 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2575 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2576 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2577 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002578 /* some control register changes require a tlb flush */
2579 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002580 }
2581 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01002582 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002583 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2584 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2585 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2586 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2587 }
2588 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2589 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2590 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2591 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002592 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2593 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002594 }
2595 kvm_run->kvm_dirty_regs = 0;
2596}
2597
2598static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2599{
2600 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2601 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2602 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2603 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01002604 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002605 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2606 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2607 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2608 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2609 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2610 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2611 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2612}
2613
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002614int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2615{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002616 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002617 sigset_t sigsaved;
2618
David Hildenbrand27291e22014-01-23 12:26:52 +01002619 if (guestdbg_exit_pending(vcpu)) {
2620 kvm_s390_prepare_debug_exit(vcpu);
2621 return 0;
2622 }
2623
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002624 if (vcpu->sigset_active)
2625 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2626
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002627 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2628 kvm_s390_vcpu_start(vcpu);
2629 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002630 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002631 vcpu->vcpu_id);
2632 return -EINVAL;
2633 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002634
David Hildenbrandb028ee32014-07-17 10:47:43 +02002635 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002636 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002637
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002638 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002639 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002640
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002641 if (signal_pending(current) && !rc) {
2642 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002643 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002644 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002645
David Hildenbrand27291e22014-01-23 12:26:52 +01002646 if (guestdbg_exit_pending(vcpu) && !rc) {
2647 kvm_s390_prepare_debug_exit(vcpu);
2648 rc = 0;
2649 }
2650
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002651 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02002652 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002653 rc = 0;
2654 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002655
David Hildenbranddb0758b2016-02-15 09:42:25 +01002656 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002657 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002658
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002659 if (vcpu->sigset_active)
2660 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2661
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002662 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002663 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002664}
2665
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002666/*
2667 * store status at address
2668 * we use have two special cases:
2669 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2670 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2671 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002672int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002673{
Carsten Otte092670c2011-07-24 10:48:22 +02002674 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002675 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02002676 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01002677 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002678 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002679
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002680 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002681 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2682 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002683 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002684 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002685 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2686 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002687 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002688 gpa = px;
2689 } else
2690 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002691
2692 /* manually convert vector registers if necessary */
2693 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01002694 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002695 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2696 fprs, 128);
2697 } else {
2698 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002699 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002700 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002701 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002702 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002703 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002704 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002705 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02002706 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002707 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002708 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002709 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002710 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01002711 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002712 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01002713 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002714 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002715 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002716 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002717 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002718 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002719 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002720 &vcpu->arch.sie_block->gcr, 128);
2721 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002722}
2723
Thomas Huthe8798922013-11-06 15:46:33 +01002724int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2725{
2726 /*
2727 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2728 * copying in vcpu load/put. Lets update our copies before we save
2729 * it into the save area
2730 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002731 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002732 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01002733 save_access_regs(vcpu->run->s.regs.acrs);
2734
2735 return kvm_s390_store_status_unloaded(vcpu, addr);
2736}
2737
Eric Farmanbc17de72014-04-14 16:01:09 -04002738/*
2739 * store additional status at address
2740 */
2741int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2742 unsigned long gpa)
2743{
2744 /* Only bits 0-53 are used for address formation */
2745 if (!(gpa & ~0x3ff))
2746 return 0;
2747
2748 return write_guest_abs(vcpu, gpa & ~0x3ff,
2749 (void *)&vcpu->run->s.regs.vrs, 512);
2750}
2751
2752int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2753{
2754 if (!test_kvm_facility(vcpu->kvm, 129))
2755 return 0;
2756
2757 /*
2758 * The guest VXRS are in the host VXRs due to the lazy
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002759 * copying in vcpu load/put. We can simply call save_fpu_regs()
2760 * to save the current register state because we are in the
2761 * middle of a load/put cycle.
2762 *
2763 * Let's update our copies before we save it into the save area.
Eric Farmanbc17de72014-04-14 16:01:09 -04002764 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002765 save_fpu_regs();
Eric Farmanbc17de72014-04-14 16:01:09 -04002766
2767 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2768}
2769
David Hildenbrand8ad35752014-03-14 11:00:21 +01002770static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2771{
2772 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002773 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002774}
2775
2776static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2777{
2778 unsigned int i;
2779 struct kvm_vcpu *vcpu;
2780
2781 kvm_for_each_vcpu(i, vcpu, kvm) {
2782 __disable_ibs_on_vcpu(vcpu);
2783 }
2784}
2785
2786static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2787{
2788 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002789 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002790}
2791
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002792void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2793{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002794 int i, online_vcpus, started_vcpus = 0;
2795
2796 if (!is_vcpu_stopped(vcpu))
2797 return;
2798
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002799 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002800 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002801 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002802 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2803
2804 for (i = 0; i < online_vcpus; i++) {
2805 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2806 started_vcpus++;
2807 }
2808
2809 if (started_vcpus == 0) {
2810 /* we're the only active VCPU -> speed it up */
2811 __enable_ibs_on_vcpu(vcpu);
2812 } else if (started_vcpus == 1) {
2813 /*
2814 * As we are starting a second VCPU, we have to disable
2815 * the IBS facility on all VCPUs to remove potentially
2816 * oustanding ENABLE requests.
2817 */
2818 __disable_ibs_on_all_vcpus(vcpu->kvm);
2819 }
2820
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002821 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002822 /*
2823 * Another VCPU might have used IBS while we were offline.
2824 * Let's play safe and flush the VCPU at startup.
2825 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002826 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002827 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002828 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002829}
2830
2831void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2832{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002833 int i, online_vcpus, started_vcpus = 0;
2834 struct kvm_vcpu *started_vcpu = NULL;
2835
2836 if (is_vcpu_stopped(vcpu))
2837 return;
2838
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002839 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002840 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002841 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002842 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2843
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002844 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002845 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002846
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002847 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002848 __disable_ibs_on_vcpu(vcpu);
2849
2850 for (i = 0; i < online_vcpus; i++) {
2851 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2852 started_vcpus++;
2853 started_vcpu = vcpu->kvm->vcpus[i];
2854 }
2855 }
2856
2857 if (started_vcpus == 1) {
2858 /*
2859 * As we only have one VCPU left, we want to enable the
2860 * IBS facility for that VCPU to speed it up.
2861 */
2862 __enable_ibs_on_vcpu(started_vcpu);
2863 }
2864
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002865 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002866 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002867}
2868
Cornelia Huckd6712df2012-12-20 15:32:11 +01002869static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2870 struct kvm_enable_cap *cap)
2871{
2872 int r;
2873
2874 if (cap->flags)
2875 return -EINVAL;
2876
2877 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002878 case KVM_CAP_S390_CSS_SUPPORT:
2879 if (!vcpu->kvm->arch.css_support) {
2880 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02002881 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002882 trace_kvm_s390_enable_css(vcpu->kvm);
2883 }
2884 r = 0;
2885 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002886 default:
2887 r = -EINVAL;
2888 break;
2889 }
2890 return r;
2891}
2892
Thomas Huth41408c282015-02-06 15:01:21 +01002893static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2894 struct kvm_s390_mem_op *mop)
2895{
2896 void __user *uaddr = (void __user *)mop->buf;
2897 void *tmpbuf = NULL;
2898 int r, srcu_idx;
2899 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2900 | KVM_S390_MEMOP_F_CHECK_ONLY;
2901
2902 if (mop->flags & ~supported_flags)
2903 return -EINVAL;
2904
2905 if (mop->size > MEM_OP_MAX_SIZE)
2906 return -E2BIG;
2907
2908 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2909 tmpbuf = vmalloc(mop->size);
2910 if (!tmpbuf)
2911 return -ENOMEM;
2912 }
2913
2914 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2915
2916 switch (mop->op) {
2917 case KVM_S390_MEMOP_LOGICAL_READ:
2918 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002919 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2920 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01002921 break;
2922 }
2923 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2924 if (r == 0) {
2925 if (copy_to_user(uaddr, tmpbuf, mop->size))
2926 r = -EFAULT;
2927 }
2928 break;
2929 case KVM_S390_MEMOP_LOGICAL_WRITE:
2930 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002931 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2932 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01002933 break;
2934 }
2935 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2936 r = -EFAULT;
2937 break;
2938 }
2939 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2940 break;
2941 default:
2942 r = -EINVAL;
2943 }
2944
2945 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2946
2947 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2948 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2949
2950 vfree(tmpbuf);
2951 return r;
2952}
2953
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002954long kvm_arch_vcpu_ioctl(struct file *filp,
2955 unsigned int ioctl, unsigned long arg)
2956{
2957 struct kvm_vcpu *vcpu = filp->private_data;
2958 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002959 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002960 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002961
Avi Kivity937366242010-05-13 12:35:17 +03002962 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01002963 case KVM_S390_IRQ: {
2964 struct kvm_s390_irq s390irq;
2965
2966 r = -EFAULT;
2967 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2968 break;
2969 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2970 break;
2971 }
Avi Kivity937366242010-05-13 12:35:17 +03002972 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002973 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002974 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002975
Avi Kivity937366242010-05-13 12:35:17 +03002976 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002977 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03002978 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002979 if (s390int_to_s390irq(&s390int, &s390irq))
2980 return -EINVAL;
2981 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03002982 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002983 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002984 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002985 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002986 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002987 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002988 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002989 case KVM_S390_SET_INITIAL_PSW: {
2990 psw_t psw;
2991
Avi Kivitybc923cc2010-05-13 12:21:46 +03002992 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002993 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002994 break;
2995 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2996 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002997 }
2998 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002999 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3000 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003001 case KVM_SET_ONE_REG:
3002 case KVM_GET_ONE_REG: {
3003 struct kvm_one_reg reg;
3004 r = -EFAULT;
3005 if (copy_from_user(&reg, argp, sizeof(reg)))
3006 break;
3007 if (ioctl == KVM_SET_ONE_REG)
3008 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3009 else
3010 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3011 break;
3012 }
Carsten Otte27e03932012-01-04 10:25:21 +01003013#ifdef CONFIG_KVM_S390_UCONTROL
3014 case KVM_S390_UCAS_MAP: {
3015 struct kvm_s390_ucas_mapping ucasmap;
3016
3017 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3018 r = -EFAULT;
3019 break;
3020 }
3021
3022 if (!kvm_is_ucontrol(vcpu->kvm)) {
3023 r = -EINVAL;
3024 break;
3025 }
3026
3027 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3028 ucasmap.vcpu_addr, ucasmap.length);
3029 break;
3030 }
3031 case KVM_S390_UCAS_UNMAP: {
3032 struct kvm_s390_ucas_mapping ucasmap;
3033
3034 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3035 r = -EFAULT;
3036 break;
3037 }
3038
3039 if (!kvm_is_ucontrol(vcpu->kvm)) {
3040 r = -EINVAL;
3041 break;
3042 }
3043
3044 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3045 ucasmap.length);
3046 break;
3047 }
3048#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003049 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003050 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003051 break;
3052 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003053 case KVM_ENABLE_CAP:
3054 {
3055 struct kvm_enable_cap cap;
3056 r = -EFAULT;
3057 if (copy_from_user(&cap, argp, sizeof(cap)))
3058 break;
3059 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3060 break;
3061 }
Thomas Huth41408c282015-02-06 15:01:21 +01003062 case KVM_S390_MEM_OP: {
3063 struct kvm_s390_mem_op mem_op;
3064
3065 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3066 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3067 else
3068 r = -EFAULT;
3069 break;
3070 }
Jens Freimann816c7662014-11-24 17:13:46 +01003071 case KVM_S390_SET_IRQ_STATE: {
3072 struct kvm_s390_irq_state irq_state;
3073
3074 r = -EFAULT;
3075 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3076 break;
3077 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3078 irq_state.len == 0 ||
3079 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3080 r = -EINVAL;
3081 break;
3082 }
3083 r = kvm_s390_set_irq_state(vcpu,
3084 (void __user *) irq_state.buf,
3085 irq_state.len);
3086 break;
3087 }
3088 case KVM_S390_GET_IRQ_STATE: {
3089 struct kvm_s390_irq_state irq_state;
3090
3091 r = -EFAULT;
3092 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3093 break;
3094 if (irq_state.len == 0) {
3095 r = -EINVAL;
3096 break;
3097 }
3098 r = kvm_s390_get_irq_state(vcpu,
3099 (__u8 __user *) irq_state.buf,
3100 irq_state.len);
3101 break;
3102 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003103 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003104 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003105 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003106 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003107}
3108
Carsten Otte5b1c1492012-01-04 10:25:23 +01003109int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3110{
3111#ifdef CONFIG_KVM_S390_UCONTROL
3112 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3113 && (kvm_is_ucontrol(vcpu->kvm))) {
3114 vmf->page = virt_to_page(vcpu->arch.sie_block);
3115 get_page(vmf->page);
3116 return 0;
3117 }
3118#endif
3119 return VM_FAULT_SIGBUS;
3120}
3121
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303122int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3123 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003124{
3125 return 0;
3126}
3127
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003128/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003129int kvm_arch_prepare_memory_region(struct kvm *kvm,
3130 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003131 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003132 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003133{
Nick Wangdd2887e2013-03-25 17:22:57 +01003134 /* A few sanity checks. We can have memory slots which have to be
3135 located/ended at a segment boundary (1MB). The memory in userland is
3136 ok to be fragmented into various different vmas. It is okay to mmap()
3137 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003138
Carsten Otte598841c2011-07-24 10:48:21 +02003139 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003140 return -EINVAL;
3141
Carsten Otte598841c2011-07-24 10:48:21 +02003142 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003143 return -EINVAL;
3144
Dominik Dingela3a92c32014-12-01 17:24:42 +01003145 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3146 return -EINVAL;
3147
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003148 return 0;
3149}
3150
3151void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003152 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003153 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003154 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003155 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003156{
Carsten Ottef7850c92011-07-24 10:48:23 +02003157 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003158
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003159 /* If the basics of the memslot do not change, we do not want
3160 * to update the gmap. Every update causes several unnecessary
3161 * segment translation exceptions. This is usually handled just
3162 * fine by the normal fault handler + gmap, but it will also
3163 * cause faults on the prefix page of running guest CPUs.
3164 */
3165 if (old->userspace_addr == mem->userspace_addr &&
3166 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3167 old->npages * PAGE_SIZE == mem->memory_size)
3168 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003169
3170 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3171 mem->guest_phys_addr, mem->memory_size);
3172 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003173 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003174 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003175}
3176
Alexander Yarygin60a37702016-04-01 15:38:57 +03003177static inline unsigned long nonhyp_mask(int i)
3178{
3179 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3180
3181 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3182}
3183
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003184void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3185{
3186 vcpu->valid_wakeup = false;
3187}
3188
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003189static int __init kvm_s390_init(void)
3190{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003191 int i;
3192
David Hildenbrand07197fd2015-01-30 16:01:38 +01003193 if (!sclp.has_sief2) {
3194 pr_info("SIE not available\n");
3195 return -ENODEV;
3196 }
3197
Alexander Yarygin60a37702016-04-01 15:38:57 +03003198 for (i = 0; i < 16; i++)
3199 kvm_s390_fac_list_mask[i] |=
3200 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3201
Michael Mueller9d8d5782015-02-02 15:42:51 +01003202 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003203}
3204
3205static void __exit kvm_s390_exit(void)
3206{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003207 kvm_exit();
3208}
3209
3210module_init(kvm_s390_init);
3211module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003212
3213/*
3214 * Enable autoloading of the kvm module.
3215 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3216 * since x86 takes a different approach.
3217 */
3218#include <linux/miscdevice.h>
3219MODULE_ALIAS_MISCDEV(KVM_MINOR);
3220MODULE_ALIAS("devname:kvm");