blob: 49c60393a15ce156908fb5d81016fba908237f6e [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c22015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010029#include <linux/bitmap.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010030#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010031#include <asm/lowcore.h>
Fan Zhangfdf03652015-05-13 10:58:41 +020032#include <asm/etr.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010033#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010034#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010035#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010036#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020037#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020038#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020039#include <asm/cpacf.h>
40#include <asm/etr.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010041#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010042#include "gaccess.h"
43
David Hildenbrandea2cdd22015-05-20 13:24:02 +020044#define KMSG_COMPONENT "kvm-s390"
45#undef pr_fmt
46#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
47
Cornelia Huck5786fff2012-07-23 17:20:29 +020048#define CREATE_TRACE_POINTS
49#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020050#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020051
Thomas Huth41408c22015-02-06 15:01:21 +010052#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010053#define LOCAL_IRQS 32
54#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
55 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c22015-02-06 15:01:21 +010056
Heiko Carstensb0c632d2008-03-25 18:47:20 +010057#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
58
59struct kvm_stats_debugfs_item debugfs_entries[] = {
60 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020061 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010062 { "exit_validity", VCPU_STAT(exit_validity) },
63 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
64 { "exit_external_request", VCPU_STAT(exit_external_request) },
65 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010066 { "exit_instruction", VCPU_STAT(exit_instruction) },
67 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
68 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020069 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010070 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020071 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020072 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020073 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020074 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010075 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010076 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
77 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010078 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020079 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010080 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
81 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
82 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
83 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
84 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
85 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
86 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020087 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010088 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
89 { "instruction_spx", VCPU_STAT(instruction_spx) },
90 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
91 { "instruction_stap", VCPU_STAT(instruction_stap) },
92 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010093 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010094 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
95 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020096 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010097 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
98 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020099 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200100 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100101 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100102 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200103 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100104 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200105 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
106 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100107 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200108 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
109 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500110 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
112 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
113 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200114 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
115 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
116 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100117 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100118 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200119 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200120 { "diagnose_258", VCPU_STAT(diagnose_258) },
121 { "diagnose_308", VCPU_STAT(diagnose_308) },
122 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123 { NULL }
124};
125
Michael Mueller9d8d5782015-02-02 15:42:51 +0100126/* upper facilities limit for kvm */
Alexander Yarygin60a37702016-04-01 15:38:57 +0300127unsigned long kvm_s390_fac_list_mask[16] = {
128 0xffe6000000000000UL,
129 0x005e000000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100130};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100131
Michael Mueller9d8d5782015-02-02 15:42:51 +0100132unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200133{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100134 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
135 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200136}
137
David Hildenbrand15c97052015-03-19 17:36:43 +0100138/* available cpu features supported by kvm */
139static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200140/* available subfunctions indicated via query / "test bit" */
141static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100142
Michael Mueller9d8d5782015-02-02 15:42:51 +0100143static struct gmap_notifier gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200144debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100145
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100146/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200147int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100148{
149 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200150 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100151}
152
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200153static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
154
Fan Zhangfdf03652015-05-13 10:58:41 +0200155/*
156 * This callback is executed during stop_machine(). All CPUs are therefore
157 * temporarily stopped. In order not to change guest behavior, we have to
158 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
159 * so a CPU won't be stopped while calculating with the epoch.
160 */
161static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
162 void *v)
163{
164 struct kvm *kvm;
165 struct kvm_vcpu *vcpu;
166 int i;
167 unsigned long long *delta = v;
168
169 list_for_each_entry(kvm, &vm_list, vm_list) {
170 kvm->arch.epoch -= *delta;
171 kvm_for_each_vcpu(i, vcpu, kvm) {
172 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100173 if (vcpu->arch.cputm_enabled)
174 vcpu->arch.cputm_start += *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200175 }
176 }
177 return NOTIFY_OK;
178}
179
180static struct notifier_block kvm_clock_notifier = {
181 .notifier_call = kvm_clock_sync,
182};
183
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100184int kvm_arch_hardware_setup(void)
185{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200186 gmap_notifier.notifier_call = kvm_gmap_notifier;
187 gmap_register_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200188 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
189 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100190 return 0;
191}
192
193void kvm_arch_hardware_unsetup(void)
194{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200195 gmap_unregister_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200196 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
197 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100198}
199
David Hildenbrand22be5a12016-01-21 13:22:54 +0100200static void allow_cpu_feat(unsigned long nr)
201{
202 set_bit_inv(nr, kvm_s390_available_cpu_feat);
203}
204
David Hildenbrand0a763c72016-05-18 16:03:47 +0200205static inline int plo_test_bit(unsigned char nr)
206{
207 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
208 int cc = 3; /* subfunction not available */
209
210 asm volatile(
211 /* Parameter registers are ignored for "test bit" */
212 " plo 0,0,0,0(0)\n"
213 " ipm %0\n"
214 " srl %0,28\n"
215 : "=d" (cc)
216 : "d" (r0)
217 : "cc");
218 return cc == 0;
219}
220
David Hildenbrand22be5a12016-01-21 13:22:54 +0100221static void kvm_s390_cpu_feat_init(void)
222{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200223 int i;
224
225 for (i = 0; i < 256; ++i) {
226 if (plo_test_bit(i))
227 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
228 }
229
230 if (test_facility(28)) /* TOD-clock steering */
231 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
232
233 if (test_facility(17)) { /* MSA */
234 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
235 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
236 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
237 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
238 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
239 }
240 if (test_facility(76)) /* MSA3 */
241 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
242 if (test_facility(77)) { /* MSA4 */
243 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
244 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
245 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
246 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
247 }
248 if (test_facility(57)) /* MSA5 */
249 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
250
David Hildenbrand22be5a12016-01-21 13:22:54 +0100251 if (MACHINE_HAS_ESOP)
252 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
253}
254
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100255int kvm_arch_init(void *opaque)
256{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200257 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
258 if (!kvm_s390_dbf)
259 return -ENOMEM;
260
261 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
262 debug_unregister(kvm_s390_dbf);
263 return -ENOMEM;
264 }
265
David Hildenbrand22be5a12016-01-21 13:22:54 +0100266 kvm_s390_cpu_feat_init();
267
Cornelia Huck84877d92014-09-02 10:27:35 +0100268 /* Register floating interrupt controller interface. */
269 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100270}
271
Christian Borntraeger78f26132015-07-22 15:50:58 +0200272void kvm_arch_exit(void)
273{
274 debug_unregister(kvm_s390_dbf);
275}
276
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100277/* Section: device related */
278long kvm_arch_dev_ioctl(struct file *filp,
279 unsigned int ioctl, unsigned long arg)
280{
281 if (ioctl == KVM_S390_ENABLE_SIE)
282 return s390_enable_sie();
283 return -EINVAL;
284}
285
Alexander Graf784aa3d2014-07-14 18:27:35 +0200286int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100287{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100288 int r;
289
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200290 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100291 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200292 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100293 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100294#ifdef CONFIG_KVM_S390_UCONTROL
295 case KVM_CAP_S390_UCONTROL:
296#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200297 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100298 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200299 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100300 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100301 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100302 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200303 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200304 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200305 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200306 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200307 case KVM_CAP_MP_STATE:
Jens Freimann47b43c52014-11-11 20:57:06 +0100308 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200309 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100310 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400311 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100312 case KVM_CAP_S390_IRQ_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100313 r = 1;
314 break;
Thomas Huth41408c22015-02-06 15:01:21 +0100315 case KVM_CAP_S390_MEM_OP:
316 r = MEM_OP_MAX_SIZE;
317 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200318 case KVM_CAP_NR_VCPUS:
319 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100320 r = KVM_S390_BSCA_CPU_SLOTS;
321 if (sclp.has_esca && sclp.has_64bscao)
322 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200323 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100324 case KVM_CAP_NR_MEMSLOTS:
325 r = KVM_USER_MEM_SLOTS;
326 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200327 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100328 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200329 break;
Eric Farman68c55752014-06-09 10:57:26 -0400330 case KVM_CAP_S390_VECTOR_REGISTERS:
331 r = MACHINE_HAS_VX;
332 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800333 case KVM_CAP_S390_RI:
334 r = test_facility(64);
335 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200336 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100337 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200338 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100339 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100340}
341
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400342static void kvm_s390_sync_dirty_log(struct kvm *kvm,
343 struct kvm_memory_slot *memslot)
344{
345 gfn_t cur_gfn, last_gfn;
346 unsigned long address;
347 struct gmap *gmap = kvm->arch.gmap;
348
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400349 /* Loop over all guest pages */
350 last_gfn = memslot->base_gfn + memslot->npages;
351 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
352 address = gfn_to_hva_memslot(memslot, cur_gfn);
353
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100354 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400355 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100356 if (fatal_signal_pending(current))
357 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100358 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400359 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400360}
361
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100362/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200363static void sca_del_vcpu(struct kvm_vcpu *vcpu);
364
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100365/*
366 * Get (and clear) the dirty memory log for a memory slot.
367 */
368int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
369 struct kvm_dirty_log *log)
370{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400371 int r;
372 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200373 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400374 struct kvm_memory_slot *memslot;
375 int is_dirty = 0;
376
377 mutex_lock(&kvm->slots_lock);
378
379 r = -EINVAL;
380 if (log->slot >= KVM_USER_MEM_SLOTS)
381 goto out;
382
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200383 slots = kvm_memslots(kvm);
384 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400385 r = -ENOENT;
386 if (!memslot->dirty_bitmap)
387 goto out;
388
389 kvm_s390_sync_dirty_log(kvm, memslot);
390 r = kvm_get_dirty_log(kvm, log, &is_dirty);
391 if (r)
392 goto out;
393
394 /* Clear the dirty log */
395 if (is_dirty) {
396 n = kvm_dirty_bitmap_bytes(memslot);
397 memset(memslot->dirty_bitmap, 0, n);
398 }
399 r = 0;
400out:
401 mutex_unlock(&kvm->slots_lock);
402 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100403}
404
Cornelia Huckd938dc52013-10-23 18:26:34 +0200405static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
406{
407 int r;
408
409 if (cap->flags)
410 return -EINVAL;
411
412 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200413 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200414 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200415 kvm->arch.use_irqchip = 1;
416 r = 0;
417 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200418 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200419 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200420 kvm->arch.user_sigp = 1;
421 r = 0;
422 break;
Eric Farman68c55752014-06-09 10:57:26 -0400423 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100424 mutex_lock(&kvm->lock);
425 if (atomic_read(&kvm->online_vcpus)) {
426 r = -EBUSY;
427 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100428 set_kvm_facility(kvm->arch.model.fac_mask, 129);
429 set_kvm_facility(kvm->arch.model.fac_list, 129);
Michael Mueller18280d82015-03-16 16:05:41 +0100430 r = 0;
431 } else
432 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100433 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200434 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
435 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400436 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800437 case KVM_CAP_S390_RI:
438 r = -EINVAL;
439 mutex_lock(&kvm->lock);
440 if (atomic_read(&kvm->online_vcpus)) {
441 r = -EBUSY;
442 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100443 set_kvm_facility(kvm->arch.model.fac_mask, 64);
444 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800445 r = 0;
446 }
447 mutex_unlock(&kvm->lock);
448 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
449 r ? "(not available)" : "(success)");
450 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100451 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200452 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100453 kvm->arch.user_stsi = 1;
454 r = 0;
455 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200456 default:
457 r = -EINVAL;
458 break;
459 }
460 return r;
461}
462
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100463static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
464{
465 int ret;
466
467 switch (attr->attr) {
468 case KVM_S390_VM_MEM_LIMIT_SIZE:
469 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200470 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100471 kvm->arch.mem_limit);
472 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100473 ret = -EFAULT;
474 break;
475 default:
476 ret = -ENXIO;
477 break;
478 }
479 return ret;
480}
481
482static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200483{
484 int ret;
485 unsigned int idx;
486 switch (attr->attr) {
487 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100488 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100489 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200490 break;
491
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200492 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200493 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200494 mutex_lock(&kvm->lock);
495 if (atomic_read(&kvm->online_vcpus) == 0) {
496 kvm->arch.use_cmma = 1;
497 ret = 0;
498 }
499 mutex_unlock(&kvm->lock);
500 break;
501 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100502 ret = -ENXIO;
503 if (!sclp.has_cmma)
504 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200505 ret = -EINVAL;
506 if (!kvm->arch.use_cmma)
507 break;
508
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200509 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200510 mutex_lock(&kvm->lock);
511 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200512 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200513 srcu_read_unlock(&kvm->srcu, idx);
514 mutex_unlock(&kvm->lock);
515 ret = 0;
516 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100517 case KVM_S390_VM_MEM_LIMIT_SIZE: {
518 unsigned long new_limit;
519
520 if (kvm_is_ucontrol(kvm))
521 return -EINVAL;
522
523 if (get_user(new_limit, (u64 __user *)attr->addr))
524 return -EFAULT;
525
Dominik Dingela3a92c32014-12-01 17:24:42 +0100526 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
527 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100528 return -E2BIG;
529
Dominik Dingela3a92c32014-12-01 17:24:42 +0100530 if (!new_limit)
531 return -EINVAL;
532
533 /* gmap_alloc takes last usable address */
534 if (new_limit != KVM_S390_NO_MEM_LIMIT)
535 new_limit -= 1;
536
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100537 ret = -EBUSY;
538 mutex_lock(&kvm->lock);
539 if (atomic_read(&kvm->online_vcpus) == 0) {
540 /* gmap_alloc will round the limit up */
541 struct gmap *new = gmap_alloc(current->mm, new_limit);
542
543 if (!new) {
544 ret = -ENOMEM;
545 } else {
546 gmap_free(kvm->arch.gmap);
547 new->private = kvm;
548 kvm->arch.gmap = new;
549 ret = 0;
550 }
551 }
552 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100553 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
554 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
555 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100556 break;
557 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200558 default:
559 ret = -ENXIO;
560 break;
561 }
562 return ret;
563}
564
Tony Krowiaka374e892014-09-03 10:13:53 +0200565static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
566
567static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
568{
569 struct kvm_vcpu *vcpu;
570 int i;
571
Michael Mueller9d8d5782015-02-02 15:42:51 +0100572 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200573 return -EINVAL;
574
575 mutex_lock(&kvm->lock);
576 switch (attr->attr) {
577 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
578 get_random_bytes(
579 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
580 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
581 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200582 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200583 break;
584 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
585 get_random_bytes(
586 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
587 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
588 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200589 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200590 break;
591 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
592 kvm->arch.crypto.aes_kw = 0;
593 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
594 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200595 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200596 break;
597 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
598 kvm->arch.crypto.dea_kw = 0;
599 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
600 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200601 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200602 break;
603 default:
604 mutex_unlock(&kvm->lock);
605 return -ENXIO;
606 }
607
608 kvm_for_each_vcpu(i, vcpu, kvm) {
609 kvm_s390_vcpu_crypto_setup(vcpu);
610 exit_sie(vcpu);
611 }
612 mutex_unlock(&kvm->lock);
613 return 0;
614}
615
Jason J. Herne72f25022014-11-25 09:46:02 -0500616static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
617{
618 u8 gtod_high;
619
620 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
621 sizeof(gtod_high)))
622 return -EFAULT;
623
624 if (gtod_high != 0)
625 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200626 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500627
628 return 0;
629}
630
631static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
632{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200633 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500634
635 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
636 return -EFAULT;
637
David Hildenbrand25ed1672015-05-12 09:49:14 +0200638 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200639 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500640 return 0;
641}
642
643static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
644{
645 int ret;
646
647 if (attr->flags)
648 return -EINVAL;
649
650 switch (attr->attr) {
651 case KVM_S390_VM_TOD_HIGH:
652 ret = kvm_s390_set_tod_high(kvm, attr);
653 break;
654 case KVM_S390_VM_TOD_LOW:
655 ret = kvm_s390_set_tod_low(kvm, attr);
656 break;
657 default:
658 ret = -ENXIO;
659 break;
660 }
661 return ret;
662}
663
664static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
665{
666 u8 gtod_high = 0;
667
668 if (copy_to_user((void __user *)attr->addr, &gtod_high,
669 sizeof(gtod_high)))
670 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200671 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500672
673 return 0;
674}
675
676static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
677{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200678 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500679
David Hildenbrand60417fc2015-09-29 16:20:36 +0200680 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500681 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
682 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200683 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500684
685 return 0;
686}
687
688static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
689{
690 int ret;
691
692 if (attr->flags)
693 return -EINVAL;
694
695 switch (attr->attr) {
696 case KVM_S390_VM_TOD_HIGH:
697 ret = kvm_s390_get_tod_high(kvm, attr);
698 break;
699 case KVM_S390_VM_TOD_LOW:
700 ret = kvm_s390_get_tod_low(kvm, attr);
701 break;
702 default:
703 ret = -ENXIO;
704 break;
705 }
706 return ret;
707}
708
Michael Mueller658b6ed2015-02-02 15:49:35 +0100709static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
710{
711 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200712 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100713 int ret = 0;
714
715 mutex_lock(&kvm->lock);
716 if (atomic_read(&kvm->online_vcpus)) {
717 ret = -EBUSY;
718 goto out;
719 }
720 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
721 if (!proc) {
722 ret = -ENOMEM;
723 goto out;
724 }
725 if (!copy_from_user(proc, (void __user *)attr->addr,
726 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200727 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200728 lowest_ibc = sclp.ibc >> 16 & 0xfff;
729 unblocked_ibc = sclp.ibc & 0xfff;
730 if (lowest_ibc) {
731 if (proc->ibc > unblocked_ibc)
732 kvm->arch.model.ibc = unblocked_ibc;
733 else if (proc->ibc < lowest_ibc)
734 kvm->arch.model.ibc = lowest_ibc;
735 else
736 kvm->arch.model.ibc = proc->ibc;
737 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100738 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100739 S390_ARCH_FAC_LIST_SIZE_BYTE);
740 } else
741 ret = -EFAULT;
742 kfree(proc);
743out:
744 mutex_unlock(&kvm->lock);
745 return ret;
746}
747
David Hildenbrand15c97052015-03-19 17:36:43 +0100748static int kvm_s390_set_processor_feat(struct kvm *kvm,
749 struct kvm_device_attr *attr)
750{
751 struct kvm_s390_vm_cpu_feat data;
752 int ret = -EBUSY;
753
754 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
755 return -EFAULT;
756 if (!bitmap_subset((unsigned long *) data.feat,
757 kvm_s390_available_cpu_feat,
758 KVM_S390_VM_CPU_FEAT_NR_BITS))
759 return -EINVAL;
760
761 mutex_lock(&kvm->lock);
762 if (!atomic_read(&kvm->online_vcpus)) {
763 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
764 KVM_S390_VM_CPU_FEAT_NR_BITS);
765 ret = 0;
766 }
767 mutex_unlock(&kvm->lock);
768 return ret;
769}
770
David Hildenbrand0a763c72016-05-18 16:03:47 +0200771static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
772 struct kvm_device_attr *attr)
773{
774 /*
775 * Once supported by kernel + hw, we have to store the subfunctions
776 * in kvm->arch and remember that user space configured them.
777 */
778 return -ENXIO;
779}
780
Michael Mueller658b6ed2015-02-02 15:49:35 +0100781static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
782{
783 int ret = -ENXIO;
784
785 switch (attr->attr) {
786 case KVM_S390_VM_CPU_PROCESSOR:
787 ret = kvm_s390_set_processor(kvm, attr);
788 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100789 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
790 ret = kvm_s390_set_processor_feat(kvm, attr);
791 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200792 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
793 ret = kvm_s390_set_processor_subfunc(kvm, attr);
794 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100795 }
796 return ret;
797}
798
799static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
800{
801 struct kvm_s390_vm_cpu_processor *proc;
802 int ret = 0;
803
804 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
805 if (!proc) {
806 ret = -ENOMEM;
807 goto out;
808 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200809 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100810 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100811 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
812 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100813 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
814 ret = -EFAULT;
815 kfree(proc);
816out:
817 return ret;
818}
819
820static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
821{
822 struct kvm_s390_vm_cpu_machine *mach;
823 int ret = 0;
824
825 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
826 if (!mach) {
827 ret = -ENOMEM;
828 goto out;
829 }
830 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200831 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100832 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +0100833 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100834 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100835 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100836 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
837 ret = -EFAULT;
838 kfree(mach);
839out:
840 return ret;
841}
842
David Hildenbrand15c97052015-03-19 17:36:43 +0100843static int kvm_s390_get_processor_feat(struct kvm *kvm,
844 struct kvm_device_attr *attr)
845{
846 struct kvm_s390_vm_cpu_feat data;
847
848 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
849 KVM_S390_VM_CPU_FEAT_NR_BITS);
850 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
851 return -EFAULT;
852 return 0;
853}
854
855static int kvm_s390_get_machine_feat(struct kvm *kvm,
856 struct kvm_device_attr *attr)
857{
858 struct kvm_s390_vm_cpu_feat data;
859
860 bitmap_copy((unsigned long *) data.feat,
861 kvm_s390_available_cpu_feat,
862 KVM_S390_VM_CPU_FEAT_NR_BITS);
863 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
864 return -EFAULT;
865 return 0;
866}
867
David Hildenbrand0a763c72016-05-18 16:03:47 +0200868static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
869 struct kvm_device_attr *attr)
870{
871 /*
872 * Once we can actually configure subfunctions (kernel + hw support),
873 * we have to check if they were already set by user space, if so copy
874 * them from kvm->arch.
875 */
876 return -ENXIO;
877}
878
879static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
880 struct kvm_device_attr *attr)
881{
882 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
883 sizeof(struct kvm_s390_vm_cpu_subfunc)))
884 return -EFAULT;
885 return 0;
886}
Michael Mueller658b6ed2015-02-02 15:49:35 +0100887static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
888{
889 int ret = -ENXIO;
890
891 switch (attr->attr) {
892 case KVM_S390_VM_CPU_PROCESSOR:
893 ret = kvm_s390_get_processor(kvm, attr);
894 break;
895 case KVM_S390_VM_CPU_MACHINE:
896 ret = kvm_s390_get_machine(kvm, attr);
897 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100898 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
899 ret = kvm_s390_get_processor_feat(kvm, attr);
900 break;
901 case KVM_S390_VM_CPU_MACHINE_FEAT:
902 ret = kvm_s390_get_machine_feat(kvm, attr);
903 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200904 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
905 ret = kvm_s390_get_processor_subfunc(kvm, attr);
906 break;
907 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
908 ret = kvm_s390_get_machine_subfunc(kvm, attr);
909 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100910 }
911 return ret;
912}
913
Dominik Dingelf2061652014-04-09 13:13:00 +0200914static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
915{
916 int ret;
917
918 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200919 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100920 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200921 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500922 case KVM_S390_VM_TOD:
923 ret = kvm_s390_set_tod(kvm, attr);
924 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100925 case KVM_S390_VM_CPU_MODEL:
926 ret = kvm_s390_set_cpu_model(kvm, attr);
927 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200928 case KVM_S390_VM_CRYPTO:
929 ret = kvm_s390_vm_set_crypto(kvm, attr);
930 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200931 default:
932 ret = -ENXIO;
933 break;
934 }
935
936 return ret;
937}
938
939static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
940{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100941 int ret;
942
943 switch (attr->group) {
944 case KVM_S390_VM_MEM_CTRL:
945 ret = kvm_s390_get_mem_control(kvm, attr);
946 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500947 case KVM_S390_VM_TOD:
948 ret = kvm_s390_get_tod(kvm, attr);
949 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100950 case KVM_S390_VM_CPU_MODEL:
951 ret = kvm_s390_get_cpu_model(kvm, attr);
952 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100953 default:
954 ret = -ENXIO;
955 break;
956 }
957
958 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200959}
960
961static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
962{
963 int ret;
964
965 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200966 case KVM_S390_VM_MEM_CTRL:
967 switch (attr->attr) {
968 case KVM_S390_VM_MEM_ENABLE_CMMA:
969 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100970 ret = sclp.has_cmma ? 0 : -ENXIO;
971 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100972 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200973 ret = 0;
974 break;
975 default:
976 ret = -ENXIO;
977 break;
978 }
979 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500980 case KVM_S390_VM_TOD:
981 switch (attr->attr) {
982 case KVM_S390_VM_TOD_LOW:
983 case KVM_S390_VM_TOD_HIGH:
984 ret = 0;
985 break;
986 default:
987 ret = -ENXIO;
988 break;
989 }
990 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100991 case KVM_S390_VM_CPU_MODEL:
992 switch (attr->attr) {
993 case KVM_S390_VM_CPU_PROCESSOR:
994 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +0100995 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
996 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +0200997 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +0100998 ret = 0;
999 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001000 /* configuring subfunctions is not supported yet */
1001 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001002 default:
1003 ret = -ENXIO;
1004 break;
1005 }
1006 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001007 case KVM_S390_VM_CRYPTO:
1008 switch (attr->attr) {
1009 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1010 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1011 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1012 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1013 ret = 0;
1014 break;
1015 default:
1016 ret = -ENXIO;
1017 break;
1018 }
1019 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001020 default:
1021 ret = -ENXIO;
1022 break;
1023 }
1024
1025 return ret;
1026}
1027
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001028static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1029{
1030 uint8_t *keys;
1031 uint64_t hva;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001032 int i, r = 0;
1033
1034 if (args->flags != 0)
1035 return -EINVAL;
1036
1037 /* Is this guest using storage keys? */
1038 if (!mm_use_skey(current->mm))
1039 return KVM_S390_GET_SKEYS_NONE;
1040
1041 /* Enforce sane limit on memory allocation */
1042 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1043 return -EINVAL;
1044
1045 keys = kmalloc_array(args->count, sizeof(uint8_t),
1046 GFP_KERNEL | __GFP_NOWARN);
1047 if (!keys)
1048 keys = vmalloc(sizeof(uint8_t) * args->count);
1049 if (!keys)
1050 return -ENOMEM;
1051
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001052 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001053 for (i = 0; i < args->count; i++) {
1054 hva = gfn_to_hva(kvm, args->start_gfn + i);
1055 if (kvm_is_error_hva(hva)) {
1056 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001057 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001058 }
1059
David Hildenbrand154c8c12016-05-09 11:22:34 +02001060 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1061 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001062 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001063 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001064 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001065
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001066 if (!r) {
1067 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1068 sizeof(uint8_t) * args->count);
1069 if (r)
1070 r = -EFAULT;
1071 }
1072
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001073 kvfree(keys);
1074 return r;
1075}
1076
1077static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1078{
1079 uint8_t *keys;
1080 uint64_t hva;
1081 int i, r = 0;
1082
1083 if (args->flags != 0)
1084 return -EINVAL;
1085
1086 /* Enforce sane limit on memory allocation */
1087 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1088 return -EINVAL;
1089
1090 keys = kmalloc_array(args->count, sizeof(uint8_t),
1091 GFP_KERNEL | __GFP_NOWARN);
1092 if (!keys)
1093 keys = vmalloc(sizeof(uint8_t) * args->count);
1094 if (!keys)
1095 return -ENOMEM;
1096
1097 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1098 sizeof(uint8_t) * args->count);
1099 if (r) {
1100 r = -EFAULT;
1101 goto out;
1102 }
1103
1104 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001105 r = s390_enable_skey();
1106 if (r)
1107 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001108
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001109 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001110 for (i = 0; i < args->count; i++) {
1111 hva = gfn_to_hva(kvm, args->start_gfn + i);
1112 if (kvm_is_error_hva(hva)) {
1113 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001114 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001115 }
1116
1117 /* Lowest order bit is reserved */
1118 if (keys[i] & 0x01) {
1119 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001120 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001121 }
1122
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001123 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001124 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001125 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001126 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001127 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001128out:
1129 kvfree(keys);
1130 return r;
1131}
1132
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001133long kvm_arch_vm_ioctl(struct file *filp,
1134 unsigned int ioctl, unsigned long arg)
1135{
1136 struct kvm *kvm = filp->private_data;
1137 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001138 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001139 int r;
1140
1141 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001142 case KVM_S390_INTERRUPT: {
1143 struct kvm_s390_interrupt s390int;
1144
1145 r = -EFAULT;
1146 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1147 break;
1148 r = kvm_s390_inject_vm(kvm, &s390int);
1149 break;
1150 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001151 case KVM_ENABLE_CAP: {
1152 struct kvm_enable_cap cap;
1153 r = -EFAULT;
1154 if (copy_from_user(&cap, argp, sizeof(cap)))
1155 break;
1156 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1157 break;
1158 }
Cornelia Huck84223592013-07-15 13:36:01 +02001159 case KVM_CREATE_IRQCHIP: {
1160 struct kvm_irq_routing_entry routing;
1161
1162 r = -EINVAL;
1163 if (kvm->arch.use_irqchip) {
1164 /* Set up dummy routing. */
1165 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001166 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001167 }
1168 break;
1169 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001170 case KVM_SET_DEVICE_ATTR: {
1171 r = -EFAULT;
1172 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1173 break;
1174 r = kvm_s390_vm_set_attr(kvm, &attr);
1175 break;
1176 }
1177 case KVM_GET_DEVICE_ATTR: {
1178 r = -EFAULT;
1179 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1180 break;
1181 r = kvm_s390_vm_get_attr(kvm, &attr);
1182 break;
1183 }
1184 case KVM_HAS_DEVICE_ATTR: {
1185 r = -EFAULT;
1186 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1187 break;
1188 r = kvm_s390_vm_has_attr(kvm, &attr);
1189 break;
1190 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001191 case KVM_S390_GET_SKEYS: {
1192 struct kvm_s390_skeys args;
1193
1194 r = -EFAULT;
1195 if (copy_from_user(&args, argp,
1196 sizeof(struct kvm_s390_skeys)))
1197 break;
1198 r = kvm_s390_get_skeys(kvm, &args);
1199 break;
1200 }
1201 case KVM_S390_SET_SKEYS: {
1202 struct kvm_s390_skeys args;
1203
1204 r = -EFAULT;
1205 if (copy_from_user(&args, argp,
1206 sizeof(struct kvm_s390_skeys)))
1207 break;
1208 r = kvm_s390_set_skeys(kvm, &args);
1209 break;
1210 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001211 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001212 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001213 }
1214
1215 return r;
1216}
1217
Tony Krowiak45c9b472015-01-13 11:33:26 -05001218static int kvm_s390_query_ap_config(u8 *config)
1219{
1220 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001221 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001222
Christian Borntraeger86044c82015-02-26 13:53:47 +01001223 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001224 asm volatile(
1225 "lgr 0,%1\n"
1226 "lgr 2,%2\n"
1227 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001228 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001229 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001230 "1:\n"
1231 EX_TABLE(0b, 1b)
1232 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001233 : "r" (fcn_code), "r" (config)
1234 : "cc", "0", "2", "memory"
1235 );
1236
1237 return cc;
1238}
1239
1240static int kvm_s390_apxa_installed(void)
1241{
1242 u8 config[128];
1243 int cc;
1244
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001245 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001246 cc = kvm_s390_query_ap_config(config);
1247
1248 if (cc)
1249 pr_err("PQAP(QCI) failed with cc=%d", cc);
1250 else
1251 return config[0] & 0x40;
1252 }
1253
1254 return 0;
1255}
1256
1257static void kvm_s390_set_crycb_format(struct kvm *kvm)
1258{
1259 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1260
1261 if (kvm_s390_apxa_installed())
1262 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1263 else
1264 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1265}
1266
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001267static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001268{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001269 struct cpuid cpuid;
1270
1271 get_cpu_id(&cpuid);
1272 cpuid.version = 0xff;
1273 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001274}
1275
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001276static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001277{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001278 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001279 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001280
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001281 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001282 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001283
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001284 /* Enable AES/DEA protected key functions by default */
1285 kvm->arch.crypto.aes_kw = 1;
1286 kvm->arch.crypto.dea_kw = 1;
1287 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1288 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1289 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1290 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001291}
1292
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001293static void sca_dispose(struct kvm *kvm)
1294{
1295 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001296 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001297 else
1298 free_page((unsigned long)(kvm->arch.sca));
1299 kvm->arch.sca = NULL;
1300}
1301
Carsten Ottee08b9632012-01-04 10:25:20 +01001302int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001303{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001304 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001305 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001306 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001307 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001308
Carsten Ottee08b9632012-01-04 10:25:20 +01001309 rc = -EINVAL;
1310#ifdef CONFIG_KVM_S390_UCONTROL
1311 if (type & ~KVM_VM_S390_UCONTROL)
1312 goto out_err;
1313 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1314 goto out_err;
1315#else
1316 if (type)
1317 goto out_err;
1318#endif
1319
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001320 rc = s390_enable_sie();
1321 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001322 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001323
Carsten Otteb2904112011-10-18 12:27:13 +02001324 rc = -ENOMEM;
1325
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001326 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1327
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001328 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001329 if (!sclp.has_64bscao)
1330 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001331 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001332 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001333 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001334 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001335 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001336 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001337 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001338 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001339 kvm->arch.sca = (struct bsca_block *)
1340 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001341 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001342
1343 sprintf(debug_name, "kvm-%u", current->pid);
1344
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001345 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001346 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001347 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001348
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001349 kvm->arch.sie_page2 =
1350 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1351 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001352 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001353
Michael Muellerfb5bf932015-02-27 14:25:10 +01001354 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001355 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001356 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001357 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1358 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001359 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001360 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001361 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001362 }
1363
Michael Mueller981467c2015-02-24 13:51:04 +01001364 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001365 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1366 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001367 S390_ARCH_FAC_LIST_SIZE_BYTE);
1368
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001369 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1370 set_kvm_facility(kvm->arch.model.fac_list, 74);
1371
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001372 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001373 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001374
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001375 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001376
Carsten Otteba5c1e92008-03-25 18:47:26 +01001377 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001378 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1379 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001380 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001381 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001382
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001383 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001384 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001385
Carsten Ottee08b9632012-01-04 10:25:20 +01001386 if (type & KVM_VM_S390_UCONTROL) {
1387 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001388 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001389 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001390 if (sclp.hamax == U64_MAX)
1391 kvm->arch.mem_limit = TASK_MAX_SIZE;
1392 else
1393 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1394 sclp.hamax + 1);
Dominik Dingela3a92c32014-12-01 17:24:42 +01001395 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001396 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001397 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001398 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001399 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001400 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001401
1402 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001403 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001404 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001405
David Hildenbrand8ad35752014-03-14 11:00:21 +01001406 spin_lock_init(&kvm->arch.start_stop_lock);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001407 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001408
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001409 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001410out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001411 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001412 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001413 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001414 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001415 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001416}
1417
Christian Borntraegerd329c032008-11-26 14:50:27 +01001418void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1419{
1420 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001421 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001422 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001423 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001424 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001425 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001426
1427 if (kvm_is_ucontrol(vcpu->kvm))
1428 gmap_free(vcpu->arch.gmap);
1429
Dominik Dingele6db1d62015-05-07 15:41:57 +02001430 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001431 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001432 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001433
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001434 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001435 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001436}
1437
1438static void kvm_free_vcpus(struct kvm *kvm)
1439{
1440 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001441 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001442
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001443 kvm_for_each_vcpu(i, vcpu, kvm)
1444 kvm_arch_vcpu_destroy(vcpu);
1445
1446 mutex_lock(&kvm->lock);
1447 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1448 kvm->vcpus[i] = NULL;
1449
1450 atomic_set(&kvm->online_vcpus, 0);
1451 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001452}
1453
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001454void kvm_arch_destroy_vm(struct kvm *kvm)
1455{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001456 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001457 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001458 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001459 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001460 if (!kvm_is_ucontrol(kvm))
1461 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001462 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001463 kvm_s390_clear_float_irqs(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001464 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001465}
1466
1467/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001468static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1469{
1470 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1471 if (!vcpu->arch.gmap)
1472 return -ENOMEM;
1473 vcpu->arch.gmap->private = vcpu->kvm;
1474
1475 return 0;
1476}
1477
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001478static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1479{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001480 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001481 if (vcpu->kvm->arch.use_esca) {
1482 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001483
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001484 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001485 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001486 } else {
1487 struct bsca_block *sca = vcpu->kvm->arch.sca;
1488
1489 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001490 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001491 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001492 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001493}
1494
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001495static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001496{
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001497 read_lock(&vcpu->kvm->arch.sca_lock);
1498 if (vcpu->kvm->arch.use_esca) {
1499 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001500
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001501 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001502 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1503 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand25508822015-10-12 16:27:23 +02001504 vcpu->arch.sie_block->ecb2 |= 0x04U;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001505 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001506 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001507 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001508
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001509 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001510 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1511 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001512 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001513 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001514 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001515}
1516
1517/* Basic SCA to Extended SCA data copy routines */
1518static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1519{
1520 d->sda = s->sda;
1521 d->sigp_ctrl.c = s->sigp_ctrl.c;
1522 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1523}
1524
1525static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1526{
1527 int i;
1528
1529 d->ipte_control = s->ipte_control;
1530 d->mcn[0] = s->mcn;
1531 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1532 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1533}
1534
1535static int sca_switch_to_extended(struct kvm *kvm)
1536{
1537 struct bsca_block *old_sca = kvm->arch.sca;
1538 struct esca_block *new_sca;
1539 struct kvm_vcpu *vcpu;
1540 unsigned int vcpu_idx;
1541 u32 scaol, scaoh;
1542
1543 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1544 if (!new_sca)
1545 return -ENOMEM;
1546
1547 scaoh = (u32)((u64)(new_sca) >> 32);
1548 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1549
1550 kvm_s390_vcpu_block_all(kvm);
1551 write_lock(&kvm->arch.sca_lock);
1552
1553 sca_copy_b_to_e(new_sca, old_sca);
1554
1555 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1556 vcpu->arch.sie_block->scaoh = scaoh;
1557 vcpu->arch.sie_block->scaol = scaol;
1558 vcpu->arch.sie_block->ecb2 |= 0x04U;
1559 }
1560 kvm->arch.sca = new_sca;
1561 kvm->arch.use_esca = 1;
1562
1563 write_unlock(&kvm->arch.sca_lock);
1564 kvm_s390_vcpu_unblock_all(kvm);
1565
1566 free_page((unsigned long)old_sca);
1567
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001568 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1569 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001570 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001571}
1572
1573static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1574{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001575 int rc;
1576
1577 if (id < KVM_S390_BSCA_CPU_SLOTS)
1578 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001579 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001580 return false;
1581
1582 mutex_lock(&kvm->lock);
1583 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1584 mutex_unlock(&kvm->lock);
1585
1586 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001587}
1588
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001589int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1590{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001591 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1592 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001593 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1594 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001595 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001596 KVM_SYNC_CRS |
1597 KVM_SYNC_ARCH0 |
1598 KVM_SYNC_PFAULT;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001599 if (test_kvm_facility(vcpu->kvm, 64))
1600 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01001601 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1602 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1603 */
1604 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04001605 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001606 else
1607 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001608
1609 if (kvm_is_ucontrol(vcpu->kvm))
1610 return __kvm_ucontrol_vcpu_init(vcpu);
1611
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001612 return 0;
1613}
1614
David Hildenbranddb0758b2016-02-15 09:42:25 +01001615/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1616static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1617{
1618 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001619 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001620 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01001621 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001622}
1623
1624/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1625static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1626{
1627 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001628 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001629 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1630 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001631 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001632}
1633
1634/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1635static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1636{
1637 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1638 vcpu->arch.cputm_enabled = true;
1639 __start_cpu_timer_accounting(vcpu);
1640}
1641
1642/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1643static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1644{
1645 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1646 __stop_cpu_timer_accounting(vcpu);
1647 vcpu->arch.cputm_enabled = false;
1648}
1649
1650static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1651{
1652 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1653 __enable_cpu_timer_accounting(vcpu);
1654 preempt_enable();
1655}
1656
1657static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1658{
1659 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1660 __disable_cpu_timer_accounting(vcpu);
1661 preempt_enable();
1662}
1663
David Hildenbrand4287f242016-02-15 09:40:12 +01001664/* set the cpu timer - may only be called from the VCPU thread itself */
1665void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1666{
David Hildenbranddb0758b2016-02-15 09:42:25 +01001667 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01001668 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001669 if (vcpu->arch.cputm_enabled)
1670 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01001671 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001672 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001673 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01001674}
1675
David Hildenbranddb0758b2016-02-15 09:42:25 +01001676/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01001677__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1678{
David Hildenbrand9c23a132016-02-17 21:53:33 +01001679 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001680 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001681
1682 if (unlikely(!vcpu->arch.cputm_enabled))
1683 return vcpu->arch.sie_block->cputm;
1684
David Hildenbrand9c23a132016-02-17 21:53:33 +01001685 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1686 do {
1687 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1688 /*
1689 * If the writer would ever execute a read in the critical
1690 * section, e.g. in irq context, we have a deadlock.
1691 */
1692 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1693 value = vcpu->arch.sie_block->cputm;
1694 /* if cputm_start is 0, accounting is being started/stopped */
1695 if (likely(vcpu->arch.cputm_start))
1696 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1697 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1698 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01001699 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01001700}
1701
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001702void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1703{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001704 /* Save host register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001705 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001706 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1707 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001708
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001709 if (MACHINE_HAS_VX)
1710 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1711 else
1712 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001713 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001714 if (test_fp_ctl(current->thread.fpu.fpc))
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001715 /* User space provided an invalid FPC, let's clear it */
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001716 current->thread.fpu.fpc = 0;
1717
1718 save_access_regs(vcpu->arch.host_acrs);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001719 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001720 gmap_enable(vcpu->arch.gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001721 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01001722 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001723 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01001724 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001725}
1726
1727void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1728{
David Hildenbrand01a745a2016-02-12 20:41:56 +01001729 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01001730 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001731 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001732 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001733 gmap_disable(vcpu->arch.gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001734
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001735 /* Save guest register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001736 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001737 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001738
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001739 /* Restore host register state */
1740 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1741 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001742
1743 save_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001744 restore_access_regs(vcpu->arch.host_acrs);
1745}
1746
1747static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1748{
1749 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1750 vcpu->arch.sie_block->gpsw.mask = 0UL;
1751 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001752 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01001753 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001754 vcpu->arch.sie_block->ckc = 0UL;
1755 vcpu->arch.sie_block->todpr = 0;
1756 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1757 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1758 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001759 /* make sure the new fpc will be lazily loaded */
1760 save_fpu_regs();
1761 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001762 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001763 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001764 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1765 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001766 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1767 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001768 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001769}
1770
Dominik Dingel31928aa2014-12-04 15:47:07 +01001771void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001772{
Jason J. Herne72f25022014-11-25 09:46:02 -05001773 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001774 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001775 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001776 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001777 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02001778 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01001779 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001780 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02001781 }
1782
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001783}
1784
Tony Krowiak5102ee82014-06-27 14:46:01 -04001785static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1786{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001787 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001788 return;
1789
Tony Krowiaka374e892014-09-03 10:13:53 +02001790 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1791
1792 if (vcpu->kvm->arch.crypto.aes_kw)
1793 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1794 if (vcpu->kvm->arch.crypto.dea_kw)
1795 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1796
Tony Krowiak5102ee82014-06-27 14:46:01 -04001797 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1798}
1799
Dominik Dingelb31605c2014-03-25 13:47:11 +01001800void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1801{
1802 free_page(vcpu->arch.sie_block->cbrlo);
1803 vcpu->arch.sie_block->cbrlo = 0;
1804}
1805
1806int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1807{
1808 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1809 if (!vcpu->arch.sie_block->cbrlo)
1810 return -ENOMEM;
1811
1812 vcpu->arch.sie_block->ecb2 |= 0x80;
1813 vcpu->arch.sie_block->ecb2 &= ~0x08;
1814 return 0;
1815}
1816
Michael Mueller91520f12015-02-27 14:32:11 +01001817static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1818{
1819 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1820
Michael Mueller91520f12015-02-27 14:32:11 +01001821 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01001822 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001823 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01001824}
1825
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001826int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1827{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001828 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001829
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001830 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1831 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001832 CPUSTAT_STOPPED);
1833
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001834 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001835 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001836 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001837 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001838
Michael Mueller91520f12015-02-27 14:32:11 +01001839 kvm_s390_vcpu_setup_model(vcpu);
1840
David Hildenbrandbdab09f2016-04-12 11:07:49 +02001841 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1842 if (MACHINE_HAS_ESOP)
1843 vcpu->arch.sie_block->ecb |= 0x02;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01001844 if (test_kvm_facility(vcpu->kvm, 9))
1845 vcpu->arch.sie_block->ecb |= 0x04;
David Hildenbrandf597d242016-04-22 16:26:49 +02001846 if (test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001847 vcpu->arch.sie_block->ecb |= 0x10;
1848
David Hildenbrand873b4252016-04-04 15:53:47 +02001849 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrandd6af0b42016-03-04 11:55:56 +01001850 vcpu->arch.sie_block->ecb2 |= 0x08;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02001851 vcpu->arch.sie_block->eca = 0x1002000U;
1852 if (sclp.has_cei)
1853 vcpu->arch.sie_block->eca |= 0x80000000U;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02001854 if (sclp.has_ib)
1855 vcpu->arch.sie_block->eca |= 0x40000000U;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001856 if (sclp.has_siif)
Heiko Carstens217a4402013-12-30 12:54:14 +01001857 vcpu->arch.sie_block->eca |= 1;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001858 if (sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001859 vcpu->arch.sie_block->eca |= 0x10000000U;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001860 if (test_kvm_facility(vcpu->kvm, 64))
1861 vcpu->arch.sie_block->ecb3 |= 0x01;
Michael Mueller18280d82015-03-16 16:05:41 +01001862 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001863 vcpu->arch.sie_block->eca |= 0x00020000;
1864 vcpu->arch.sie_block->ecd |= 0x20000000;
1865 }
Fan Zhangc6e5f162016-01-07 18:24:29 +08001866 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Thomas Huth492d8642015-02-10 16:11:01 +01001867 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001868 if (test_kvm_facility(vcpu->kvm, 74))
1869 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001870
Dominik Dingele6db1d62015-05-07 15:41:57 +02001871 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01001872 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1873 if (rc)
1874 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001875 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001876 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001877 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001878
Tony Krowiak5102ee82014-06-27 14:46:01 -04001879 kvm_s390_vcpu_crypto_setup(vcpu);
1880
Dominik Dingelb31605c2014-03-25 13:47:11 +01001881 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001882}
1883
1884struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1885 unsigned int id)
1886{
Carsten Otte4d475552011-10-18 12:27:12 +02001887 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001888 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001889 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001890
David Hildenbrand42158252015-10-12 12:57:22 +02001891 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02001892 goto out;
1893
1894 rc = -ENOMEM;
1895
Michael Muellerb110fea2013-06-12 13:54:54 +02001896 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001897 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001898 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001899
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001900 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1901 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001902 goto out_free_cpu;
1903
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001904 vcpu->arch.sie_block = &sie_page->sie_block;
1905 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1906
David Hildenbrandefed1102015-04-16 12:32:41 +02001907 /* the real guest size will always be smaller than msl */
1908 vcpu->arch.sie_block->mso = 0;
1909 vcpu->arch.sie_block->msl = sclp.hamax;
1910
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001911 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001912 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001913 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001914 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001915 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001916 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001917
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001918 rc = kvm_vcpu_init(vcpu, kvm, id);
1919 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001920 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001921 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001922 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001923 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001924
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001925 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001926out_free_sie_block:
1927 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001928out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001929 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001930out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001931 return ERR_PTR(rc);
1932}
1933
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001934int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1935{
David Hildenbrand9a022062014-08-05 17:40:47 +02001936 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001937}
1938
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001939void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001940{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001941 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001942 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001943}
1944
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001945void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001946{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001947 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001948}
1949
Christian Borntraeger8e236542015-04-09 13:49:04 +02001950static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1951{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001952 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001953 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001954}
1955
1956static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1957{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04001958 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001959}
1960
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001961/*
1962 * Kick a guest cpu out of SIE and wait until SIE is not running.
1963 * If the CPU is not running (e.g. waiting as idle) the function will
1964 * return immediately. */
1965void exit_sie(struct kvm_vcpu *vcpu)
1966{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001967 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001968 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1969 cpu_relax();
1970}
1971
Christian Borntraeger8e236542015-04-09 13:49:04 +02001972/* Kick a guest cpu out of SIE to process a request synchronously */
1973void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001974{
Christian Borntraeger8e236542015-04-09 13:49:04 +02001975 kvm_make_request(req, vcpu);
1976 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001977}
1978
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001979static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1980{
1981 int i;
1982 struct kvm *kvm = gmap->private;
1983 struct kvm_vcpu *vcpu;
1984
1985 kvm_for_each_vcpu(i, vcpu, kvm) {
1986 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001987 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001988 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001989 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001990 }
1991 }
1992}
1993
Christoffer Dallb6d33832012-03-08 16:44:24 -05001994int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1995{
1996 /* kvm common code refers to this, but never calls it */
1997 BUG();
1998 return 0;
1999}
2000
Carsten Otte14eebd92012-05-15 14:15:26 +02002001static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2002 struct kvm_one_reg *reg)
2003{
2004 int r = -EINVAL;
2005
2006 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002007 case KVM_REG_S390_TODPR:
2008 r = put_user(vcpu->arch.sie_block->todpr,
2009 (u32 __user *)reg->addr);
2010 break;
2011 case KVM_REG_S390_EPOCHDIFF:
2012 r = put_user(vcpu->arch.sie_block->epoch,
2013 (u64 __user *)reg->addr);
2014 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002015 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002016 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002017 (u64 __user *)reg->addr);
2018 break;
2019 case KVM_REG_S390_CLOCK_COMP:
2020 r = put_user(vcpu->arch.sie_block->ckc,
2021 (u64 __user *)reg->addr);
2022 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002023 case KVM_REG_S390_PFTOKEN:
2024 r = put_user(vcpu->arch.pfault_token,
2025 (u64 __user *)reg->addr);
2026 break;
2027 case KVM_REG_S390_PFCOMPARE:
2028 r = put_user(vcpu->arch.pfault_compare,
2029 (u64 __user *)reg->addr);
2030 break;
2031 case KVM_REG_S390_PFSELECT:
2032 r = put_user(vcpu->arch.pfault_select,
2033 (u64 __user *)reg->addr);
2034 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002035 case KVM_REG_S390_PP:
2036 r = put_user(vcpu->arch.sie_block->pp,
2037 (u64 __user *)reg->addr);
2038 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002039 case KVM_REG_S390_GBEA:
2040 r = put_user(vcpu->arch.sie_block->gbea,
2041 (u64 __user *)reg->addr);
2042 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002043 default:
2044 break;
2045 }
2046
2047 return r;
2048}
2049
2050static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2051 struct kvm_one_reg *reg)
2052{
2053 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002054 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002055
2056 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002057 case KVM_REG_S390_TODPR:
2058 r = get_user(vcpu->arch.sie_block->todpr,
2059 (u32 __user *)reg->addr);
2060 break;
2061 case KVM_REG_S390_EPOCHDIFF:
2062 r = get_user(vcpu->arch.sie_block->epoch,
2063 (u64 __user *)reg->addr);
2064 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002065 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002066 r = get_user(val, (u64 __user *)reg->addr);
2067 if (!r)
2068 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002069 break;
2070 case KVM_REG_S390_CLOCK_COMP:
2071 r = get_user(vcpu->arch.sie_block->ckc,
2072 (u64 __user *)reg->addr);
2073 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002074 case KVM_REG_S390_PFTOKEN:
2075 r = get_user(vcpu->arch.pfault_token,
2076 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002077 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2078 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002079 break;
2080 case KVM_REG_S390_PFCOMPARE:
2081 r = get_user(vcpu->arch.pfault_compare,
2082 (u64 __user *)reg->addr);
2083 break;
2084 case KVM_REG_S390_PFSELECT:
2085 r = get_user(vcpu->arch.pfault_select,
2086 (u64 __user *)reg->addr);
2087 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002088 case KVM_REG_S390_PP:
2089 r = get_user(vcpu->arch.sie_block->pp,
2090 (u64 __user *)reg->addr);
2091 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002092 case KVM_REG_S390_GBEA:
2093 r = get_user(vcpu->arch.sie_block->gbea,
2094 (u64 __user *)reg->addr);
2095 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002096 default:
2097 break;
2098 }
2099
2100 return r;
2101}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002102
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002103static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2104{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002105 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002106 return 0;
2107}
2108
2109int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2110{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002111 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002112 return 0;
2113}
2114
2115int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2116{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002117 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002118 return 0;
2119}
2120
2121int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2122 struct kvm_sregs *sregs)
2123{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002124 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002125 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01002126 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002127 return 0;
2128}
2129
2130int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2131 struct kvm_sregs *sregs)
2132{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002133 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002134 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002135 return 0;
2136}
2137
2138int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2139{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002140 /* make sure the new values will be lazily loaded */
2141 save_fpu_regs();
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002142 if (test_fp_ctl(fpu->fpc))
2143 return -EINVAL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002144 current->thread.fpu.fpc = fpu->fpc;
2145 if (MACHINE_HAS_VX)
2146 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2147 else
2148 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002149 return 0;
2150}
2151
2152int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2153{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002154 /* make sure we have the latest values */
2155 save_fpu_regs();
2156 if (MACHINE_HAS_VX)
2157 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2158 else
2159 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2160 fpu->fpc = current->thread.fpu.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002161 return 0;
2162}
2163
2164static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2165{
2166 int rc = 0;
2167
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002168 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002169 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002170 else {
2171 vcpu->run->psw_mask = psw.mask;
2172 vcpu->run->psw_addr = psw.addr;
2173 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002174 return rc;
2175}
2176
2177int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2178 struct kvm_translation *tr)
2179{
2180 return -EINVAL; /* not implemented yet */
2181}
2182
David Hildenbrand27291e22014-01-23 12:26:52 +01002183#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2184 KVM_GUESTDBG_USE_HW_BP | \
2185 KVM_GUESTDBG_ENABLE)
2186
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002187int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2188 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002189{
David Hildenbrand27291e22014-01-23 12:26:52 +01002190 int rc = 0;
2191
2192 vcpu->guest_debug = 0;
2193 kvm_s390_clear_bp_data(vcpu);
2194
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002195 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002196 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002197 if (!sclp.has_gpere)
2198 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002199
2200 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2201 vcpu->guest_debug = dbg->control;
2202 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002203 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002204
2205 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2206 rc = kvm_s390_import_bp_data(vcpu, dbg);
2207 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002208 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002209 vcpu->arch.guestdbg.last_bp = 0;
2210 }
2211
2212 if (rc) {
2213 vcpu->guest_debug = 0;
2214 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002215 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002216 }
2217
2218 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002219}
2220
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002221int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2222 struct kvm_mp_state *mp_state)
2223{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002224 /* CHECK_STOP and LOAD are not supported yet */
2225 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2226 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002227}
2228
2229int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2230 struct kvm_mp_state *mp_state)
2231{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002232 int rc = 0;
2233
2234 /* user space knows about this interface - let it control the state */
2235 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2236
2237 switch (mp_state->mp_state) {
2238 case KVM_MP_STATE_STOPPED:
2239 kvm_s390_vcpu_stop(vcpu);
2240 break;
2241 case KVM_MP_STATE_OPERATING:
2242 kvm_s390_vcpu_start(vcpu);
2243 break;
2244 case KVM_MP_STATE_LOAD:
2245 case KVM_MP_STATE_CHECK_STOP:
2246 /* fall through - CHECK_STOP and LOAD are not supported yet */
2247 default:
2248 rc = -ENXIO;
2249 }
2250
2251 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002252}
2253
David Hildenbrand8ad35752014-03-14 11:00:21 +01002254static bool ibs_enabled(struct kvm_vcpu *vcpu)
2255{
2256 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2257}
2258
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002259static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2260{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002261retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002262 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002263 if (!vcpu->requests)
2264 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002265 /*
2266 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2267 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2268 * This ensures that the ipte instruction for this request has
2269 * already finished. We might race against a second unmapper that
2270 * wants to set the blocking bit. Lets just retry the request loop.
2271 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002272 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002273 int rc;
2274 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02002275 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002276 PAGE_SIZE * 2);
2277 if (rc)
2278 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002279 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002280 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002281
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002282 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2283 vcpu->arch.sie_block->ihcpu = 0xffff;
2284 goto retry;
2285 }
2286
David Hildenbrand8ad35752014-03-14 11:00:21 +01002287 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2288 if (!ibs_enabled(vcpu)) {
2289 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002290 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002291 &vcpu->arch.sie_block->cpuflags);
2292 }
2293 goto retry;
2294 }
2295
2296 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2297 if (ibs_enabled(vcpu)) {
2298 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002299 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002300 &vcpu->arch.sie_block->cpuflags);
2301 }
2302 goto retry;
2303 }
2304
David Hildenbrand0759d062014-05-13 16:54:32 +02002305 /* nothing to do, just clear the request */
2306 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2307
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002308 return 0;
2309}
2310
David Hildenbrand25ed1672015-05-12 09:49:14 +02002311void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2312{
2313 struct kvm_vcpu *vcpu;
2314 int i;
2315
2316 mutex_lock(&kvm->lock);
2317 preempt_disable();
2318 kvm->arch.epoch = tod - get_tod_clock();
2319 kvm_s390_vcpu_block_all(kvm);
2320 kvm_for_each_vcpu(i, vcpu, kvm)
2321 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2322 kvm_s390_vcpu_unblock_all(kvm);
2323 preempt_enable();
2324 mutex_unlock(&kvm->lock);
2325}
2326
Thomas Huthfa576c52014-05-06 17:20:16 +02002327/**
2328 * kvm_arch_fault_in_page - fault-in guest page if necessary
2329 * @vcpu: The corresponding virtual cpu
2330 * @gpa: Guest physical address
2331 * @writable: Whether the page should be writable or not
2332 *
2333 * Make sure that a guest page has been faulted-in on the host.
2334 *
2335 * Return: Zero on success, negative error code otherwise.
2336 */
2337long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002338{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002339 return gmap_fault(vcpu->arch.gmap, gpa,
2340 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002341}
2342
Dominik Dingel3c038e62013-10-07 17:11:48 +02002343static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2344 unsigned long token)
2345{
2346 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002347 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002348
2349 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002350 irq.u.ext.ext_params2 = token;
2351 irq.type = KVM_S390_INT_PFAULT_INIT;
2352 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002353 } else {
2354 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002355 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002356 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2357 }
2358}
2359
2360void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2361 struct kvm_async_pf *work)
2362{
2363 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2364 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2365}
2366
2367void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2368 struct kvm_async_pf *work)
2369{
2370 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2371 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2372}
2373
2374void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2375 struct kvm_async_pf *work)
2376{
2377 /* s390 will always inject the page directly */
2378}
2379
2380bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2381{
2382 /*
2383 * s390 will always inject the page directly,
2384 * but we still want check_async_completion to cleanup
2385 */
2386 return true;
2387}
2388
2389static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2390{
2391 hva_t hva;
2392 struct kvm_arch_async_pf arch;
2393 int rc;
2394
2395 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2396 return 0;
2397 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2398 vcpu->arch.pfault_compare)
2399 return 0;
2400 if (psw_extint_disabled(vcpu))
2401 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002402 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002403 return 0;
2404 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2405 return 0;
2406 if (!vcpu->arch.gmap->pfault_enabled)
2407 return 0;
2408
Heiko Carstens81480cc2014-01-01 16:36:07 +01002409 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2410 hva += current->thread.gmap_addr & ~PAGE_MASK;
2411 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002412 return 0;
2413
2414 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2415 return rc;
2416}
2417
Thomas Huth3fb4c402013-09-12 10:33:43 +02002418static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002419{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002420 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002421
Dominik Dingel3c038e62013-10-07 17:11:48 +02002422 /*
2423 * On s390 notifications for arriving pages will be delivered directly
2424 * to the guest but the house keeping for completed pfaults is
2425 * handled outside the worker.
2426 */
2427 kvm_check_async_pf_completion(vcpu);
2428
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002429 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2430 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002431
2432 if (need_resched())
2433 schedule();
2434
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002435 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002436 s390_handle_mcck();
2437
Jens Freimann79395032014-04-17 10:10:30 +02002438 if (!kvm_is_ucontrol(vcpu->kvm)) {
2439 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2440 if (rc)
2441 return rc;
2442 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002443
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002444 rc = kvm_s390_handle_requests(vcpu);
2445 if (rc)
2446 return rc;
2447
David Hildenbrand27291e22014-01-23 12:26:52 +01002448 if (guestdbg_enabled(vcpu)) {
2449 kvm_s390_backup_guest_per_regs(vcpu);
2450 kvm_s390_patch_guest_per_regs(vcpu);
2451 }
2452
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002453 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002454 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2455 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2456 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002457
Thomas Huth3fb4c402013-09-12 10:33:43 +02002458 return 0;
2459}
2460
Thomas Huth492d8642015-02-10 16:11:01 +01002461static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2462{
David Hildenbrand56317922016-01-12 17:37:58 +01002463 struct kvm_s390_pgm_info pgm_info = {
2464 .code = PGM_ADDRESSING,
2465 };
2466 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01002467 int rc;
2468
2469 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2470 trace_kvm_s390_sie_fault(vcpu);
2471
2472 /*
2473 * We want to inject an addressing exception, which is defined as a
2474 * suppressing or terminating exception. However, since we came here
2475 * by a DAT access exception, the PSW still points to the faulting
2476 * instruction since DAT exceptions are nullifying. So we've got
2477 * to look up the current opcode to get the length of the instruction
2478 * to be able to forward the PSW.
2479 */
David Hildenbrand65977322015-11-16 16:17:45 +01002480 rc = read_guest_instr(vcpu, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01002481 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01002482 if (rc < 0) {
2483 return rc;
2484 } else if (rc) {
2485 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2486 * Forward by arbitrary ilc, injection will take care of
2487 * nullification if necessary.
2488 */
2489 pgm_info = vcpu->arch.pgm;
2490 ilen = 4;
2491 }
David Hildenbrand56317922016-01-12 17:37:58 +01002492 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2493 kvm_s390_forward_psw(vcpu, ilen);
2494 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01002495}
2496
Thomas Huth3fb4c402013-09-12 10:33:43 +02002497static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2498{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002499 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2500 vcpu->arch.sie_block->icptcode);
2501 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2502
David Hildenbrand27291e22014-01-23 12:26:52 +01002503 if (guestdbg_enabled(vcpu))
2504 kvm_s390_restore_guest_per_regs(vcpu);
2505
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002506 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2507 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002508
2509 if (vcpu->arch.sie_block->icptcode > 0) {
2510 int rc = kvm_handle_sie_intercept(vcpu);
2511
2512 if (rc != -EOPNOTSUPP)
2513 return rc;
2514 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2515 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2516 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2517 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2518 return -EREMOTE;
2519 } else if (exit_reason != -EFAULT) {
2520 vcpu->stat.exit_null++;
2521 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002522 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2523 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2524 vcpu->run->s390_ucontrol.trans_exc_code =
2525 current->thread.gmap_addr;
2526 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002527 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002528 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002529 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002530 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002531 if (kvm_arch_setup_async_pf(vcpu))
2532 return 0;
2533 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002534 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02002535 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002536}
2537
2538static int __vcpu_run(struct kvm_vcpu *vcpu)
2539{
2540 int rc, exit_reason;
2541
Thomas Huth800c1062013-09-12 10:33:45 +02002542 /*
2543 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2544 * ning the guest), so that memslots (and other stuff) are protected
2545 */
2546 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2547
Thomas Hutha76ccff2013-09-12 10:33:44 +02002548 do {
2549 rc = vcpu_pre_run(vcpu);
2550 if (rc)
2551 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002552
Thomas Huth800c1062013-09-12 10:33:45 +02002553 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002554 /*
2555 * As PF_VCPU will be used in fault handler, between
2556 * guest_enter and guest_exit should be no uaccess.
2557 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002558 local_irq_disable();
2559 __kvm_guest_enter();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002560 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002561 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002562 exit_reason = sie64a(vcpu->arch.sie_block,
2563 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002564 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002565 __enable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002566 __kvm_guest_exit();
2567 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002568 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002569
Thomas Hutha76ccff2013-09-12 10:33:44 +02002570 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002571 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002572
Thomas Huth800c1062013-09-12 10:33:45 +02002573 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002574 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002575}
2576
David Hildenbrandb028ee32014-07-17 10:47:43 +02002577static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2578{
2579 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2580 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2581 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2582 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2583 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2584 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002585 /* some control register changes require a tlb flush */
2586 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002587 }
2588 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01002589 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002590 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2591 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2592 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2593 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2594 }
2595 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2596 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2597 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2598 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002599 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2600 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002601 }
2602 kvm_run->kvm_dirty_regs = 0;
2603}
2604
2605static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2606{
2607 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2608 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2609 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2610 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01002611 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002612 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2613 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2614 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2615 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2616 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2617 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2618 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2619}
2620
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002621int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2622{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002623 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002624 sigset_t sigsaved;
2625
David Hildenbrand27291e22014-01-23 12:26:52 +01002626 if (guestdbg_exit_pending(vcpu)) {
2627 kvm_s390_prepare_debug_exit(vcpu);
2628 return 0;
2629 }
2630
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002631 if (vcpu->sigset_active)
2632 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2633
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002634 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2635 kvm_s390_vcpu_start(vcpu);
2636 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002637 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002638 vcpu->vcpu_id);
2639 return -EINVAL;
2640 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002641
David Hildenbrandb028ee32014-07-17 10:47:43 +02002642 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002643 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002644
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002645 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002646 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002647
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002648 if (signal_pending(current) && !rc) {
2649 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002650 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002651 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002652
David Hildenbrand27291e22014-01-23 12:26:52 +01002653 if (guestdbg_exit_pending(vcpu) && !rc) {
2654 kvm_s390_prepare_debug_exit(vcpu);
2655 rc = 0;
2656 }
2657
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002658 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02002659 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002660 rc = 0;
2661 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002662
David Hildenbranddb0758b2016-02-15 09:42:25 +01002663 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002664 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002665
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002666 if (vcpu->sigset_active)
2667 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2668
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002669 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002670 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002671}
2672
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002673/*
2674 * store status at address
2675 * we use have two special cases:
2676 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2677 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2678 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002679int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002680{
Carsten Otte092670c2011-07-24 10:48:22 +02002681 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002682 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02002683 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01002684 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002685 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002686
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002687 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002688 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2689 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002690 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002691 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002692 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2693 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002694 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002695 gpa = px;
2696 } else
2697 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002698
2699 /* manually convert vector registers if necessary */
2700 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01002701 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002702 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2703 fprs, 128);
2704 } else {
2705 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002706 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002707 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002708 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002709 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002710 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002711 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002712 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02002713 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002714 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002715 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002716 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002717 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01002718 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002719 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01002720 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002721 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002722 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002723 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002724 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002725 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002726 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002727 &vcpu->arch.sie_block->gcr, 128);
2728 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002729}
2730
Thomas Huthe8798922013-11-06 15:46:33 +01002731int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2732{
2733 /*
2734 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2735 * copying in vcpu load/put. Lets update our copies before we save
2736 * it into the save area
2737 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002738 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002739 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01002740 save_access_regs(vcpu->run->s.regs.acrs);
2741
2742 return kvm_s390_store_status_unloaded(vcpu, addr);
2743}
2744
Eric Farmanbc17de72014-04-14 16:01:09 -04002745/*
2746 * store additional status at address
2747 */
2748int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2749 unsigned long gpa)
2750{
2751 /* Only bits 0-53 are used for address formation */
2752 if (!(gpa & ~0x3ff))
2753 return 0;
2754
2755 return write_guest_abs(vcpu, gpa & ~0x3ff,
2756 (void *)&vcpu->run->s.regs.vrs, 512);
2757}
2758
2759int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2760{
2761 if (!test_kvm_facility(vcpu->kvm, 129))
2762 return 0;
2763
2764 /*
2765 * The guest VXRS are in the host VXRs due to the lazy
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002766 * copying in vcpu load/put. We can simply call save_fpu_regs()
2767 * to save the current register state because we are in the
2768 * middle of a load/put cycle.
2769 *
2770 * Let's update our copies before we save it into the save area.
Eric Farmanbc17de72014-04-14 16:01:09 -04002771 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002772 save_fpu_regs();
Eric Farmanbc17de72014-04-14 16:01:09 -04002773
2774 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2775}
2776
David Hildenbrand8ad35752014-03-14 11:00:21 +01002777static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2778{
2779 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002780 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002781}
2782
2783static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2784{
2785 unsigned int i;
2786 struct kvm_vcpu *vcpu;
2787
2788 kvm_for_each_vcpu(i, vcpu, kvm) {
2789 __disable_ibs_on_vcpu(vcpu);
2790 }
2791}
2792
2793static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2794{
David Hildenbrand09a400e2016-04-04 15:57:08 +02002795 if (!sclp.has_ibs)
2796 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002797 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002798 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002799}
2800
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002801void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2802{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002803 int i, online_vcpus, started_vcpus = 0;
2804
2805 if (!is_vcpu_stopped(vcpu))
2806 return;
2807
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002808 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002809 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002810 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002811 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2812
2813 for (i = 0; i < online_vcpus; i++) {
2814 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2815 started_vcpus++;
2816 }
2817
2818 if (started_vcpus == 0) {
2819 /* we're the only active VCPU -> speed it up */
2820 __enable_ibs_on_vcpu(vcpu);
2821 } else if (started_vcpus == 1) {
2822 /*
2823 * As we are starting a second VCPU, we have to disable
2824 * the IBS facility on all VCPUs to remove potentially
2825 * oustanding ENABLE requests.
2826 */
2827 __disable_ibs_on_all_vcpus(vcpu->kvm);
2828 }
2829
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002830 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002831 /*
2832 * Another VCPU might have used IBS while we were offline.
2833 * Let's play safe and flush the VCPU at startup.
2834 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002835 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002836 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002837 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002838}
2839
2840void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2841{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002842 int i, online_vcpus, started_vcpus = 0;
2843 struct kvm_vcpu *started_vcpu = NULL;
2844
2845 if (is_vcpu_stopped(vcpu))
2846 return;
2847
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002848 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002849 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002850 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002851 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2852
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002853 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002854 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002855
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002856 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002857 __disable_ibs_on_vcpu(vcpu);
2858
2859 for (i = 0; i < online_vcpus; i++) {
2860 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2861 started_vcpus++;
2862 started_vcpu = vcpu->kvm->vcpus[i];
2863 }
2864 }
2865
2866 if (started_vcpus == 1) {
2867 /*
2868 * As we only have one VCPU left, we want to enable the
2869 * IBS facility for that VCPU to speed it up.
2870 */
2871 __enable_ibs_on_vcpu(started_vcpu);
2872 }
2873
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002874 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002875 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002876}
2877
Cornelia Huckd6712df2012-12-20 15:32:11 +01002878static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2879 struct kvm_enable_cap *cap)
2880{
2881 int r;
2882
2883 if (cap->flags)
2884 return -EINVAL;
2885
2886 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002887 case KVM_CAP_S390_CSS_SUPPORT:
2888 if (!vcpu->kvm->arch.css_support) {
2889 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02002890 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002891 trace_kvm_s390_enable_css(vcpu->kvm);
2892 }
2893 r = 0;
2894 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002895 default:
2896 r = -EINVAL;
2897 break;
2898 }
2899 return r;
2900}
2901
Thomas Huth41408c22015-02-06 15:01:21 +01002902static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2903 struct kvm_s390_mem_op *mop)
2904{
2905 void __user *uaddr = (void __user *)mop->buf;
2906 void *tmpbuf = NULL;
2907 int r, srcu_idx;
2908 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2909 | KVM_S390_MEMOP_F_CHECK_ONLY;
2910
2911 if (mop->flags & ~supported_flags)
2912 return -EINVAL;
2913
2914 if (mop->size > MEM_OP_MAX_SIZE)
2915 return -E2BIG;
2916
2917 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2918 tmpbuf = vmalloc(mop->size);
2919 if (!tmpbuf)
2920 return -ENOMEM;
2921 }
2922
2923 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2924
2925 switch (mop->op) {
2926 case KVM_S390_MEMOP_LOGICAL_READ:
2927 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002928 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2929 mop->size, GACC_FETCH);
Thomas Huth41408c22015-02-06 15:01:21 +01002930 break;
2931 }
2932 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2933 if (r == 0) {
2934 if (copy_to_user(uaddr, tmpbuf, mop->size))
2935 r = -EFAULT;
2936 }
2937 break;
2938 case KVM_S390_MEMOP_LOGICAL_WRITE:
2939 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002940 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2941 mop->size, GACC_STORE);
Thomas Huth41408c22015-02-06 15:01:21 +01002942 break;
2943 }
2944 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2945 r = -EFAULT;
2946 break;
2947 }
2948 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2949 break;
2950 default:
2951 r = -EINVAL;
2952 }
2953
2954 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2955
2956 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2957 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2958
2959 vfree(tmpbuf);
2960 return r;
2961}
2962
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002963long kvm_arch_vcpu_ioctl(struct file *filp,
2964 unsigned int ioctl, unsigned long arg)
2965{
2966 struct kvm_vcpu *vcpu = filp->private_data;
2967 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002968 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002969 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002970
Avi Kivity93736622010-05-13 12:35:17 +03002971 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01002972 case KVM_S390_IRQ: {
2973 struct kvm_s390_irq s390irq;
2974
2975 r = -EFAULT;
2976 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2977 break;
2978 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2979 break;
2980 }
Avi Kivity93736622010-05-13 12:35:17 +03002981 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002982 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002983 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002984
Avi Kivity93736622010-05-13 12:35:17 +03002985 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002986 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03002987 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002988 if (s390int_to_s390irq(&s390int, &s390irq))
2989 return -EINVAL;
2990 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03002991 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002992 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002993 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002994 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002995 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002996 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002997 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002998 case KVM_S390_SET_INITIAL_PSW: {
2999 psw_t psw;
3000
Avi Kivitybc923cc2010-05-13 12:21:46 +03003001 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003002 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003003 break;
3004 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3005 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003006 }
3007 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003008 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3009 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003010 case KVM_SET_ONE_REG:
3011 case KVM_GET_ONE_REG: {
3012 struct kvm_one_reg reg;
3013 r = -EFAULT;
3014 if (copy_from_user(&reg, argp, sizeof(reg)))
3015 break;
3016 if (ioctl == KVM_SET_ONE_REG)
3017 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3018 else
3019 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3020 break;
3021 }
Carsten Otte27e03932012-01-04 10:25:21 +01003022#ifdef CONFIG_KVM_S390_UCONTROL
3023 case KVM_S390_UCAS_MAP: {
3024 struct kvm_s390_ucas_mapping ucasmap;
3025
3026 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3027 r = -EFAULT;
3028 break;
3029 }
3030
3031 if (!kvm_is_ucontrol(vcpu->kvm)) {
3032 r = -EINVAL;
3033 break;
3034 }
3035
3036 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3037 ucasmap.vcpu_addr, ucasmap.length);
3038 break;
3039 }
3040 case KVM_S390_UCAS_UNMAP: {
3041 struct kvm_s390_ucas_mapping ucasmap;
3042
3043 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3044 r = -EFAULT;
3045 break;
3046 }
3047
3048 if (!kvm_is_ucontrol(vcpu->kvm)) {
3049 r = -EINVAL;
3050 break;
3051 }
3052
3053 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3054 ucasmap.length);
3055 break;
3056 }
3057#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003058 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003059 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003060 break;
3061 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003062 case KVM_ENABLE_CAP:
3063 {
3064 struct kvm_enable_cap cap;
3065 r = -EFAULT;
3066 if (copy_from_user(&cap, argp, sizeof(cap)))
3067 break;
3068 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3069 break;
3070 }
Thomas Huth41408c22015-02-06 15:01:21 +01003071 case KVM_S390_MEM_OP: {
3072 struct kvm_s390_mem_op mem_op;
3073
3074 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3075 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3076 else
3077 r = -EFAULT;
3078 break;
3079 }
Jens Freimann816c7662014-11-24 17:13:46 +01003080 case KVM_S390_SET_IRQ_STATE: {
3081 struct kvm_s390_irq_state irq_state;
3082
3083 r = -EFAULT;
3084 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3085 break;
3086 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3087 irq_state.len == 0 ||
3088 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3089 r = -EINVAL;
3090 break;
3091 }
3092 r = kvm_s390_set_irq_state(vcpu,
3093 (void __user *) irq_state.buf,
3094 irq_state.len);
3095 break;
3096 }
3097 case KVM_S390_GET_IRQ_STATE: {
3098 struct kvm_s390_irq_state irq_state;
3099
3100 r = -EFAULT;
3101 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3102 break;
3103 if (irq_state.len == 0) {
3104 r = -EINVAL;
3105 break;
3106 }
3107 r = kvm_s390_get_irq_state(vcpu,
3108 (__u8 __user *) irq_state.buf,
3109 irq_state.len);
3110 break;
3111 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003112 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003113 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003114 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003115 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003116}
3117
Carsten Otte5b1c1492012-01-04 10:25:23 +01003118int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3119{
3120#ifdef CONFIG_KVM_S390_UCONTROL
3121 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3122 && (kvm_is_ucontrol(vcpu->kvm))) {
3123 vmf->page = virt_to_page(vcpu->arch.sie_block);
3124 get_page(vmf->page);
3125 return 0;
3126 }
3127#endif
3128 return VM_FAULT_SIGBUS;
3129}
3130
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303131int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3132 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003133{
3134 return 0;
3135}
3136
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003137/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003138int kvm_arch_prepare_memory_region(struct kvm *kvm,
3139 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003140 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003141 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003142{
Nick Wangdd2887e2013-03-25 17:22:57 +01003143 /* A few sanity checks. We can have memory slots which have to be
3144 located/ended at a segment boundary (1MB). The memory in userland is
3145 ok to be fragmented into various different vmas. It is okay to mmap()
3146 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003147
Carsten Otte598841c2011-07-24 10:48:21 +02003148 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003149 return -EINVAL;
3150
Carsten Otte598841c2011-07-24 10:48:21 +02003151 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003152 return -EINVAL;
3153
Dominik Dingela3a92c32014-12-01 17:24:42 +01003154 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3155 return -EINVAL;
3156
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003157 return 0;
3158}
3159
3160void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003161 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003162 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003163 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003164 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003165{
Carsten Ottef7850c92011-07-24 10:48:23 +02003166 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003167
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003168 /* If the basics of the memslot do not change, we do not want
3169 * to update the gmap. Every update causes several unnecessary
3170 * segment translation exceptions. This is usually handled just
3171 * fine by the normal fault handler + gmap, but it will also
3172 * cause faults on the prefix page of running guest CPUs.
3173 */
3174 if (old->userspace_addr == mem->userspace_addr &&
3175 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3176 old->npages * PAGE_SIZE == mem->memory_size)
3177 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003178
3179 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3180 mem->guest_phys_addr, mem->memory_size);
3181 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003182 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003183 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003184}
3185
Alexander Yarygin60a37702016-04-01 15:38:57 +03003186static inline unsigned long nonhyp_mask(int i)
3187{
3188 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3189
3190 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3191}
3192
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003193void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3194{
3195 vcpu->valid_wakeup = false;
3196}
3197
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003198static int __init kvm_s390_init(void)
3199{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003200 int i;
3201
David Hildenbrand07197fd2015-01-30 16:01:38 +01003202 if (!sclp.has_sief2) {
3203 pr_info("SIE not available\n");
3204 return -ENODEV;
3205 }
3206
Alexander Yarygin60a37702016-04-01 15:38:57 +03003207 for (i = 0; i < 16; i++)
3208 kvm_s390_fac_list_mask[i] |=
3209 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3210
Michael Mueller9d8d5782015-02-02 15:42:51 +01003211 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003212}
3213
3214static void __exit kvm_s390_exit(void)
3215{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003216 kvm_exit();
3217}
3218
3219module_init(kvm_s390_init);
3220module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003221
3222/*
3223 * Enable autoloading of the kvm module.
3224 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3225 * since x86 takes a different approach.
3226 */
3227#include <linux/miscdevice.h>
3228MODULE_ALIAS_MISCDEV(KVM_MINOR);
3229MODULE_ALIAS("devname:kvm");