blob: 2b5c14da32273aa8ff5c2022459ece650e77c318 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010029#include <linux/bitmap.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010030#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010031#include <asm/lowcore.h>
Fan Zhangfdf03652015-05-13 10:58:41 +020032#include <asm/etr.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010033#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010034#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010035#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010036#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020037#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020038#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010039#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010040#include "gaccess.h"
41
David Hildenbrandea2cdd22015-05-20 13:24:02 +020042#define KMSG_COMPONENT "kvm-s390"
43#undef pr_fmt
44#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
45
Cornelia Huck5786fff2012-07-23 17:20:29 +020046#define CREATE_TRACE_POINTS
47#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020048#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020049
Thomas Huth41408c282015-02-06 15:01:21 +010050#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010051#define LOCAL_IRQS 32
52#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
53 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010054
Heiko Carstensb0c632d2008-03-25 18:47:20 +010055#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
56
57struct kvm_stats_debugfs_item debugfs_entries[] = {
58 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020059 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010060 { "exit_validity", VCPU_STAT(exit_validity) },
61 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
62 { "exit_external_request", VCPU_STAT(exit_external_request) },
63 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010064 { "exit_instruction", VCPU_STAT(exit_instruction) },
65 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
66 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020067 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010068 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020069 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020070 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020071 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020072 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010074 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
75 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010076 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020077 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010078 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
79 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
80 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
81 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
82 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
83 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
84 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020085 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010086 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
87 { "instruction_spx", VCPU_STAT(instruction_spx) },
88 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
89 { "instruction_stap", VCPU_STAT(instruction_stap) },
90 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010091 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010092 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
93 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020094 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010095 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
96 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020097 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +020098 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010099 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100100 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200101 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100102 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200103 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
104 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100105 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200106 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
107 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500108 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100109 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
110 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
111 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200112 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
113 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
114 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100115 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100116 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200117 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200118 { "diagnose_258", VCPU_STAT(diagnose_258) },
119 { "diagnose_308", VCPU_STAT(diagnose_308) },
120 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121 { NULL }
122};
123
Michael Mueller9d8d5782015-02-02 15:42:51 +0100124/* upper facilities limit for kvm */
Alexander Yarygin60a37702016-04-01 15:38:57 +0300125unsigned long kvm_s390_fac_list_mask[16] = {
126 0xffe6000000000000UL,
127 0x005e000000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100128};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100129
Michael Mueller9d8d5782015-02-02 15:42:51 +0100130unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200131{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100132 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
133 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200134}
135
David Hildenbrand15c97052015-03-19 17:36:43 +0100136/* available cpu features supported by kvm */
137static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
138
Michael Mueller9d8d5782015-02-02 15:42:51 +0100139static struct gmap_notifier gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200140debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100141
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100142/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200143int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100144{
145 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200146 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100147}
148
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200149static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
150
Fan Zhangfdf03652015-05-13 10:58:41 +0200151/*
152 * This callback is executed during stop_machine(). All CPUs are therefore
153 * temporarily stopped. In order not to change guest behavior, we have to
154 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
155 * so a CPU won't be stopped while calculating with the epoch.
156 */
157static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
158 void *v)
159{
160 struct kvm *kvm;
161 struct kvm_vcpu *vcpu;
162 int i;
163 unsigned long long *delta = v;
164
165 list_for_each_entry(kvm, &vm_list, vm_list) {
166 kvm->arch.epoch -= *delta;
167 kvm_for_each_vcpu(i, vcpu, kvm) {
168 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100169 if (vcpu->arch.cputm_enabled)
170 vcpu->arch.cputm_start += *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200171 }
172 }
173 return NOTIFY_OK;
174}
175
176static struct notifier_block kvm_clock_notifier = {
177 .notifier_call = kvm_clock_sync,
178};
179
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100180int kvm_arch_hardware_setup(void)
181{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200182 gmap_notifier.notifier_call = kvm_gmap_notifier;
183 gmap_register_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200184 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
185 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100186 return 0;
187}
188
189void kvm_arch_hardware_unsetup(void)
190{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200191 gmap_unregister_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200192 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
193 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100194}
195
David Hildenbrand22be5a12016-01-21 13:22:54 +0100196static void allow_cpu_feat(unsigned long nr)
197{
198 set_bit_inv(nr, kvm_s390_available_cpu_feat);
199}
200
201static void kvm_s390_cpu_feat_init(void)
202{
203 if (MACHINE_HAS_ESOP)
204 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
205}
206
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100207int kvm_arch_init(void *opaque)
208{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200209 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
210 if (!kvm_s390_dbf)
211 return -ENOMEM;
212
213 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
214 debug_unregister(kvm_s390_dbf);
215 return -ENOMEM;
216 }
217
David Hildenbrand22be5a12016-01-21 13:22:54 +0100218 kvm_s390_cpu_feat_init();
219
Cornelia Huck84877d92014-09-02 10:27:35 +0100220 /* Register floating interrupt controller interface. */
221 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100222}
223
Christian Borntraeger78f26132015-07-22 15:50:58 +0200224void kvm_arch_exit(void)
225{
226 debug_unregister(kvm_s390_dbf);
227}
228
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100229/* Section: device related */
230long kvm_arch_dev_ioctl(struct file *filp,
231 unsigned int ioctl, unsigned long arg)
232{
233 if (ioctl == KVM_S390_ENABLE_SIE)
234 return s390_enable_sie();
235 return -EINVAL;
236}
237
Alexander Graf784aa3d2014-07-14 18:27:35 +0200238int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100239{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100240 int r;
241
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200242 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100243 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200244 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100245 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100246#ifdef CONFIG_KVM_S390_UCONTROL
247 case KVM_CAP_S390_UCONTROL:
248#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200249 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100250 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200251 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100252 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100253 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100254 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200255 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200256 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200257 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200258 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200259 case KVM_CAP_MP_STATE:
Jens Freimann47b43c52014-11-11 20:57:06 +0100260 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200261 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100262 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400263 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100264 case KVM_CAP_S390_IRQ_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100265 r = 1;
266 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100267 case KVM_CAP_S390_MEM_OP:
268 r = MEM_OP_MAX_SIZE;
269 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200270 case KVM_CAP_NR_VCPUS:
271 case KVM_CAP_MAX_VCPUS:
Eugene (jno) Dvurechenskife0edcb2015-04-22 18:37:40 +0200272 r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
273 : KVM_S390_BSCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200274 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100275 case KVM_CAP_NR_MEMSLOTS:
276 r = KVM_USER_MEM_SLOTS;
277 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200278 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100279 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200280 break;
Eric Farman68c55752014-06-09 10:57:26 -0400281 case KVM_CAP_S390_VECTOR_REGISTERS:
282 r = MACHINE_HAS_VX;
283 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800284 case KVM_CAP_S390_RI:
285 r = test_facility(64);
286 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200287 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100288 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200289 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100290 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100291}
292
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400293static void kvm_s390_sync_dirty_log(struct kvm *kvm,
294 struct kvm_memory_slot *memslot)
295{
296 gfn_t cur_gfn, last_gfn;
297 unsigned long address;
298 struct gmap *gmap = kvm->arch.gmap;
299
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400300 /* Loop over all guest pages */
301 last_gfn = memslot->base_gfn + memslot->npages;
302 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
303 address = gfn_to_hva_memslot(memslot, cur_gfn);
304
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100305 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400306 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100307 if (fatal_signal_pending(current))
308 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100309 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400310 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400311}
312
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100313/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200314static void sca_del_vcpu(struct kvm_vcpu *vcpu);
315
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100316/*
317 * Get (and clear) the dirty memory log for a memory slot.
318 */
319int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
320 struct kvm_dirty_log *log)
321{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400322 int r;
323 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200324 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400325 struct kvm_memory_slot *memslot;
326 int is_dirty = 0;
327
328 mutex_lock(&kvm->slots_lock);
329
330 r = -EINVAL;
331 if (log->slot >= KVM_USER_MEM_SLOTS)
332 goto out;
333
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200334 slots = kvm_memslots(kvm);
335 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400336 r = -ENOENT;
337 if (!memslot->dirty_bitmap)
338 goto out;
339
340 kvm_s390_sync_dirty_log(kvm, memslot);
341 r = kvm_get_dirty_log(kvm, log, &is_dirty);
342 if (r)
343 goto out;
344
345 /* Clear the dirty log */
346 if (is_dirty) {
347 n = kvm_dirty_bitmap_bytes(memslot);
348 memset(memslot->dirty_bitmap, 0, n);
349 }
350 r = 0;
351out:
352 mutex_unlock(&kvm->slots_lock);
353 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100354}
355
Cornelia Huckd938dc52013-10-23 18:26:34 +0200356static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
357{
358 int r;
359
360 if (cap->flags)
361 return -EINVAL;
362
363 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200364 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200365 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200366 kvm->arch.use_irqchip = 1;
367 r = 0;
368 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200369 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200370 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200371 kvm->arch.user_sigp = 1;
372 r = 0;
373 break;
Eric Farman68c55752014-06-09 10:57:26 -0400374 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100375 mutex_lock(&kvm->lock);
376 if (atomic_read(&kvm->online_vcpus)) {
377 r = -EBUSY;
378 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100379 set_kvm_facility(kvm->arch.model.fac_mask, 129);
380 set_kvm_facility(kvm->arch.model.fac_list, 129);
Michael Mueller18280d82015-03-16 16:05:41 +0100381 r = 0;
382 } else
383 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100384 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200385 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
386 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400387 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800388 case KVM_CAP_S390_RI:
389 r = -EINVAL;
390 mutex_lock(&kvm->lock);
391 if (atomic_read(&kvm->online_vcpus)) {
392 r = -EBUSY;
393 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100394 set_kvm_facility(kvm->arch.model.fac_mask, 64);
395 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800396 r = 0;
397 }
398 mutex_unlock(&kvm->lock);
399 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
400 r ? "(not available)" : "(success)");
401 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100402 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200403 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100404 kvm->arch.user_stsi = 1;
405 r = 0;
406 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200407 default:
408 r = -EINVAL;
409 break;
410 }
411 return r;
412}
413
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100414static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
415{
416 int ret;
417
418 switch (attr->attr) {
419 case KVM_S390_VM_MEM_LIMIT_SIZE:
420 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200421 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100422 kvm->arch.mem_limit);
423 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100424 ret = -EFAULT;
425 break;
426 default:
427 ret = -ENXIO;
428 break;
429 }
430 return ret;
431}
432
433static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200434{
435 int ret;
436 unsigned int idx;
437 switch (attr->attr) {
438 case KVM_S390_VM_MEM_ENABLE_CMMA:
Dominik Dingele6db1d62015-05-07 15:41:57 +0200439 /* enable CMMA only for z10 and later (EDAT_1) */
440 ret = -EINVAL;
441 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
442 break;
443
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200444 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200445 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200446 mutex_lock(&kvm->lock);
447 if (atomic_read(&kvm->online_vcpus) == 0) {
448 kvm->arch.use_cmma = 1;
449 ret = 0;
450 }
451 mutex_unlock(&kvm->lock);
452 break;
453 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingelc3489152015-06-18 13:17:11 +0200454 ret = -EINVAL;
455 if (!kvm->arch.use_cmma)
456 break;
457
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200458 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200459 mutex_lock(&kvm->lock);
460 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200461 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200462 srcu_read_unlock(&kvm->srcu, idx);
463 mutex_unlock(&kvm->lock);
464 ret = 0;
465 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100466 case KVM_S390_VM_MEM_LIMIT_SIZE: {
467 unsigned long new_limit;
468
469 if (kvm_is_ucontrol(kvm))
470 return -EINVAL;
471
472 if (get_user(new_limit, (u64 __user *)attr->addr))
473 return -EFAULT;
474
Dominik Dingela3a92c32014-12-01 17:24:42 +0100475 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
476 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100477 return -E2BIG;
478
Dominik Dingela3a92c32014-12-01 17:24:42 +0100479 if (!new_limit)
480 return -EINVAL;
481
482 /* gmap_alloc takes last usable address */
483 if (new_limit != KVM_S390_NO_MEM_LIMIT)
484 new_limit -= 1;
485
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100486 ret = -EBUSY;
487 mutex_lock(&kvm->lock);
488 if (atomic_read(&kvm->online_vcpus) == 0) {
489 /* gmap_alloc will round the limit up */
490 struct gmap *new = gmap_alloc(current->mm, new_limit);
491
492 if (!new) {
493 ret = -ENOMEM;
494 } else {
495 gmap_free(kvm->arch.gmap);
496 new->private = kvm;
497 kvm->arch.gmap = new;
498 ret = 0;
499 }
500 }
501 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100502 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
503 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
504 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100505 break;
506 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200507 default:
508 ret = -ENXIO;
509 break;
510 }
511 return ret;
512}
513
Tony Krowiaka374e892014-09-03 10:13:53 +0200514static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
515
516static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
517{
518 struct kvm_vcpu *vcpu;
519 int i;
520
Michael Mueller9d8d5782015-02-02 15:42:51 +0100521 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200522 return -EINVAL;
523
524 mutex_lock(&kvm->lock);
525 switch (attr->attr) {
526 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
527 get_random_bytes(
528 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
529 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
530 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200531 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200532 break;
533 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
534 get_random_bytes(
535 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
536 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
537 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200538 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200539 break;
540 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
541 kvm->arch.crypto.aes_kw = 0;
542 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
543 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200544 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200545 break;
546 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
547 kvm->arch.crypto.dea_kw = 0;
548 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
549 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200550 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200551 break;
552 default:
553 mutex_unlock(&kvm->lock);
554 return -ENXIO;
555 }
556
557 kvm_for_each_vcpu(i, vcpu, kvm) {
558 kvm_s390_vcpu_crypto_setup(vcpu);
559 exit_sie(vcpu);
560 }
561 mutex_unlock(&kvm->lock);
562 return 0;
563}
564
Jason J. Herne72f25022014-11-25 09:46:02 -0500565static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
566{
567 u8 gtod_high;
568
569 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
570 sizeof(gtod_high)))
571 return -EFAULT;
572
573 if (gtod_high != 0)
574 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200575 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500576
577 return 0;
578}
579
580static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
581{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200582 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500583
584 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
585 return -EFAULT;
586
David Hildenbrand25ed1672015-05-12 09:49:14 +0200587 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200588 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500589 return 0;
590}
591
592static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
593{
594 int ret;
595
596 if (attr->flags)
597 return -EINVAL;
598
599 switch (attr->attr) {
600 case KVM_S390_VM_TOD_HIGH:
601 ret = kvm_s390_set_tod_high(kvm, attr);
602 break;
603 case KVM_S390_VM_TOD_LOW:
604 ret = kvm_s390_set_tod_low(kvm, attr);
605 break;
606 default:
607 ret = -ENXIO;
608 break;
609 }
610 return ret;
611}
612
613static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
614{
615 u8 gtod_high = 0;
616
617 if (copy_to_user((void __user *)attr->addr, &gtod_high,
618 sizeof(gtod_high)))
619 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200620 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500621
622 return 0;
623}
624
625static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
626{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200627 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500628
David Hildenbrand60417fc2015-09-29 16:20:36 +0200629 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500630 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
631 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200632 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500633
634 return 0;
635}
636
637static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
638{
639 int ret;
640
641 if (attr->flags)
642 return -EINVAL;
643
644 switch (attr->attr) {
645 case KVM_S390_VM_TOD_HIGH:
646 ret = kvm_s390_get_tod_high(kvm, attr);
647 break;
648 case KVM_S390_VM_TOD_LOW:
649 ret = kvm_s390_get_tod_low(kvm, attr);
650 break;
651 default:
652 ret = -ENXIO;
653 break;
654 }
655 return ret;
656}
657
Michael Mueller658b6ed2015-02-02 15:49:35 +0100658static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
659{
660 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200661 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100662 int ret = 0;
663
664 mutex_lock(&kvm->lock);
665 if (atomic_read(&kvm->online_vcpus)) {
666 ret = -EBUSY;
667 goto out;
668 }
669 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
670 if (!proc) {
671 ret = -ENOMEM;
672 goto out;
673 }
674 if (!copy_from_user(proc, (void __user *)attr->addr,
675 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200676 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200677 lowest_ibc = sclp.ibc >> 16 & 0xfff;
678 unblocked_ibc = sclp.ibc & 0xfff;
679 if (lowest_ibc) {
680 if (proc->ibc > unblocked_ibc)
681 kvm->arch.model.ibc = unblocked_ibc;
682 else if (proc->ibc < lowest_ibc)
683 kvm->arch.model.ibc = lowest_ibc;
684 else
685 kvm->arch.model.ibc = proc->ibc;
686 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100687 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100688 S390_ARCH_FAC_LIST_SIZE_BYTE);
689 } else
690 ret = -EFAULT;
691 kfree(proc);
692out:
693 mutex_unlock(&kvm->lock);
694 return ret;
695}
696
David Hildenbrand15c97052015-03-19 17:36:43 +0100697static int kvm_s390_set_processor_feat(struct kvm *kvm,
698 struct kvm_device_attr *attr)
699{
700 struct kvm_s390_vm_cpu_feat data;
701 int ret = -EBUSY;
702
703 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
704 return -EFAULT;
705 if (!bitmap_subset((unsigned long *) data.feat,
706 kvm_s390_available_cpu_feat,
707 KVM_S390_VM_CPU_FEAT_NR_BITS))
708 return -EINVAL;
709
710 mutex_lock(&kvm->lock);
711 if (!atomic_read(&kvm->online_vcpus)) {
712 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
713 KVM_S390_VM_CPU_FEAT_NR_BITS);
714 ret = 0;
715 }
716 mutex_unlock(&kvm->lock);
717 return ret;
718}
719
Michael Mueller658b6ed2015-02-02 15:49:35 +0100720static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
721{
722 int ret = -ENXIO;
723
724 switch (attr->attr) {
725 case KVM_S390_VM_CPU_PROCESSOR:
726 ret = kvm_s390_set_processor(kvm, attr);
727 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100728 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
729 ret = kvm_s390_set_processor_feat(kvm, attr);
730 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100731 }
732 return ret;
733}
734
735static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
736{
737 struct kvm_s390_vm_cpu_processor *proc;
738 int ret = 0;
739
740 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
741 if (!proc) {
742 ret = -ENOMEM;
743 goto out;
744 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200745 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100746 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100747 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
748 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100749 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
750 ret = -EFAULT;
751 kfree(proc);
752out:
753 return ret;
754}
755
756static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
757{
758 struct kvm_s390_vm_cpu_machine *mach;
759 int ret = 0;
760
761 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
762 if (!mach) {
763 ret = -ENOMEM;
764 goto out;
765 }
766 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200767 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100768 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +0100769 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100770 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100771 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100772 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
773 ret = -EFAULT;
774 kfree(mach);
775out:
776 return ret;
777}
778
David Hildenbrand15c97052015-03-19 17:36:43 +0100779static int kvm_s390_get_processor_feat(struct kvm *kvm,
780 struct kvm_device_attr *attr)
781{
782 struct kvm_s390_vm_cpu_feat data;
783
784 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
785 KVM_S390_VM_CPU_FEAT_NR_BITS);
786 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
787 return -EFAULT;
788 return 0;
789}
790
791static int kvm_s390_get_machine_feat(struct kvm *kvm,
792 struct kvm_device_attr *attr)
793{
794 struct kvm_s390_vm_cpu_feat data;
795
796 bitmap_copy((unsigned long *) data.feat,
797 kvm_s390_available_cpu_feat,
798 KVM_S390_VM_CPU_FEAT_NR_BITS);
799 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
800 return -EFAULT;
801 return 0;
802}
803
Michael Mueller658b6ed2015-02-02 15:49:35 +0100804static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
805{
806 int ret = -ENXIO;
807
808 switch (attr->attr) {
809 case KVM_S390_VM_CPU_PROCESSOR:
810 ret = kvm_s390_get_processor(kvm, attr);
811 break;
812 case KVM_S390_VM_CPU_MACHINE:
813 ret = kvm_s390_get_machine(kvm, attr);
814 break;
David Hildenbrand15c97052015-03-19 17:36:43 +0100815 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
816 ret = kvm_s390_get_processor_feat(kvm, attr);
817 break;
818 case KVM_S390_VM_CPU_MACHINE_FEAT:
819 ret = kvm_s390_get_machine_feat(kvm, attr);
820 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100821 }
822 return ret;
823}
824
Dominik Dingelf2061652014-04-09 13:13:00 +0200825static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
826{
827 int ret;
828
829 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200830 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100831 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200832 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500833 case KVM_S390_VM_TOD:
834 ret = kvm_s390_set_tod(kvm, attr);
835 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100836 case KVM_S390_VM_CPU_MODEL:
837 ret = kvm_s390_set_cpu_model(kvm, attr);
838 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200839 case KVM_S390_VM_CRYPTO:
840 ret = kvm_s390_vm_set_crypto(kvm, attr);
841 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200842 default:
843 ret = -ENXIO;
844 break;
845 }
846
847 return ret;
848}
849
850static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
851{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100852 int ret;
853
854 switch (attr->group) {
855 case KVM_S390_VM_MEM_CTRL:
856 ret = kvm_s390_get_mem_control(kvm, attr);
857 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500858 case KVM_S390_VM_TOD:
859 ret = kvm_s390_get_tod(kvm, attr);
860 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100861 case KVM_S390_VM_CPU_MODEL:
862 ret = kvm_s390_get_cpu_model(kvm, attr);
863 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100864 default:
865 ret = -ENXIO;
866 break;
867 }
868
869 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200870}
871
872static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
873{
874 int ret;
875
876 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200877 case KVM_S390_VM_MEM_CTRL:
878 switch (attr->attr) {
879 case KVM_S390_VM_MEM_ENABLE_CMMA:
880 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100881 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200882 ret = 0;
883 break;
884 default:
885 ret = -ENXIO;
886 break;
887 }
888 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500889 case KVM_S390_VM_TOD:
890 switch (attr->attr) {
891 case KVM_S390_VM_TOD_LOW:
892 case KVM_S390_VM_TOD_HIGH:
893 ret = 0;
894 break;
895 default:
896 ret = -ENXIO;
897 break;
898 }
899 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100900 case KVM_S390_VM_CPU_MODEL:
901 switch (attr->attr) {
902 case KVM_S390_VM_CPU_PROCESSOR:
903 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +0100904 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
905 case KVM_S390_VM_CPU_MACHINE_FEAT:
Michael Mueller658b6ed2015-02-02 15:49:35 +0100906 ret = 0;
907 break;
908 default:
909 ret = -ENXIO;
910 break;
911 }
912 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200913 case KVM_S390_VM_CRYPTO:
914 switch (attr->attr) {
915 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
916 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
917 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
918 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
919 ret = 0;
920 break;
921 default:
922 ret = -ENXIO;
923 break;
924 }
925 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200926 default:
927 ret = -ENXIO;
928 break;
929 }
930
931 return ret;
932}
933
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400934static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
935{
936 uint8_t *keys;
937 uint64_t hva;
938 unsigned long curkey;
939 int i, r = 0;
940
941 if (args->flags != 0)
942 return -EINVAL;
943
944 /* Is this guest using storage keys? */
945 if (!mm_use_skey(current->mm))
946 return KVM_S390_GET_SKEYS_NONE;
947
948 /* Enforce sane limit on memory allocation */
949 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
950 return -EINVAL;
951
952 keys = kmalloc_array(args->count, sizeof(uint8_t),
953 GFP_KERNEL | __GFP_NOWARN);
954 if (!keys)
955 keys = vmalloc(sizeof(uint8_t) * args->count);
956 if (!keys)
957 return -ENOMEM;
958
959 for (i = 0; i < args->count; i++) {
960 hva = gfn_to_hva(kvm, args->start_gfn + i);
961 if (kvm_is_error_hva(hva)) {
962 r = -EFAULT;
963 goto out;
964 }
965
966 curkey = get_guest_storage_key(current->mm, hva);
967 if (IS_ERR_VALUE(curkey)) {
968 r = curkey;
969 goto out;
970 }
971 keys[i] = curkey;
972 }
973
974 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
975 sizeof(uint8_t) * args->count);
976 if (r)
977 r = -EFAULT;
978out:
979 kvfree(keys);
980 return r;
981}
982
983static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
984{
985 uint8_t *keys;
986 uint64_t hva;
987 int i, r = 0;
988
989 if (args->flags != 0)
990 return -EINVAL;
991
992 /* Enforce sane limit on memory allocation */
993 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
994 return -EINVAL;
995
996 keys = kmalloc_array(args->count, sizeof(uint8_t),
997 GFP_KERNEL | __GFP_NOWARN);
998 if (!keys)
999 keys = vmalloc(sizeof(uint8_t) * args->count);
1000 if (!keys)
1001 return -ENOMEM;
1002
1003 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1004 sizeof(uint8_t) * args->count);
1005 if (r) {
1006 r = -EFAULT;
1007 goto out;
1008 }
1009
1010 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001011 r = s390_enable_skey();
1012 if (r)
1013 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001014
1015 for (i = 0; i < args->count; i++) {
1016 hva = gfn_to_hva(kvm, args->start_gfn + i);
1017 if (kvm_is_error_hva(hva)) {
1018 r = -EFAULT;
1019 goto out;
1020 }
1021
1022 /* Lowest order bit is reserved */
1023 if (keys[i] & 0x01) {
1024 r = -EINVAL;
1025 goto out;
1026 }
1027
1028 r = set_guest_storage_key(current->mm, hva,
1029 (unsigned long)keys[i], 0);
1030 if (r)
1031 goto out;
1032 }
1033out:
1034 kvfree(keys);
1035 return r;
1036}
1037
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001038long kvm_arch_vm_ioctl(struct file *filp,
1039 unsigned int ioctl, unsigned long arg)
1040{
1041 struct kvm *kvm = filp->private_data;
1042 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001043 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001044 int r;
1045
1046 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001047 case KVM_S390_INTERRUPT: {
1048 struct kvm_s390_interrupt s390int;
1049
1050 r = -EFAULT;
1051 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1052 break;
1053 r = kvm_s390_inject_vm(kvm, &s390int);
1054 break;
1055 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001056 case KVM_ENABLE_CAP: {
1057 struct kvm_enable_cap cap;
1058 r = -EFAULT;
1059 if (copy_from_user(&cap, argp, sizeof(cap)))
1060 break;
1061 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1062 break;
1063 }
Cornelia Huck84223592013-07-15 13:36:01 +02001064 case KVM_CREATE_IRQCHIP: {
1065 struct kvm_irq_routing_entry routing;
1066
1067 r = -EINVAL;
1068 if (kvm->arch.use_irqchip) {
1069 /* Set up dummy routing. */
1070 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001071 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001072 }
1073 break;
1074 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001075 case KVM_SET_DEVICE_ATTR: {
1076 r = -EFAULT;
1077 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1078 break;
1079 r = kvm_s390_vm_set_attr(kvm, &attr);
1080 break;
1081 }
1082 case KVM_GET_DEVICE_ATTR: {
1083 r = -EFAULT;
1084 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1085 break;
1086 r = kvm_s390_vm_get_attr(kvm, &attr);
1087 break;
1088 }
1089 case KVM_HAS_DEVICE_ATTR: {
1090 r = -EFAULT;
1091 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1092 break;
1093 r = kvm_s390_vm_has_attr(kvm, &attr);
1094 break;
1095 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001096 case KVM_S390_GET_SKEYS: {
1097 struct kvm_s390_skeys args;
1098
1099 r = -EFAULT;
1100 if (copy_from_user(&args, argp,
1101 sizeof(struct kvm_s390_skeys)))
1102 break;
1103 r = kvm_s390_get_skeys(kvm, &args);
1104 break;
1105 }
1106 case KVM_S390_SET_SKEYS: {
1107 struct kvm_s390_skeys args;
1108
1109 r = -EFAULT;
1110 if (copy_from_user(&args, argp,
1111 sizeof(struct kvm_s390_skeys)))
1112 break;
1113 r = kvm_s390_set_skeys(kvm, &args);
1114 break;
1115 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001116 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001117 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001118 }
1119
1120 return r;
1121}
1122
Tony Krowiak45c9b472015-01-13 11:33:26 -05001123static int kvm_s390_query_ap_config(u8 *config)
1124{
1125 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001126 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001127
Christian Borntraeger86044c82015-02-26 13:53:47 +01001128 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001129 asm volatile(
1130 "lgr 0,%1\n"
1131 "lgr 2,%2\n"
1132 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001133 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001134 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001135 "1:\n"
1136 EX_TABLE(0b, 1b)
1137 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001138 : "r" (fcn_code), "r" (config)
1139 : "cc", "0", "2", "memory"
1140 );
1141
1142 return cc;
1143}
1144
1145static int kvm_s390_apxa_installed(void)
1146{
1147 u8 config[128];
1148 int cc;
1149
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001150 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001151 cc = kvm_s390_query_ap_config(config);
1152
1153 if (cc)
1154 pr_err("PQAP(QCI) failed with cc=%d", cc);
1155 else
1156 return config[0] & 0x40;
1157 }
1158
1159 return 0;
1160}
1161
1162static void kvm_s390_set_crycb_format(struct kvm *kvm)
1163{
1164 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1165
1166 if (kvm_s390_apxa_installed())
1167 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1168 else
1169 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1170}
1171
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001172static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001173{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001174 struct cpuid cpuid;
1175
1176 get_cpu_id(&cpuid);
1177 cpuid.version = 0xff;
1178 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001179}
1180
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001181static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001182{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001183 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001184 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001185
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001186 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001187 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001188
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001189 /* Enable AES/DEA protected key functions by default */
1190 kvm->arch.crypto.aes_kw = 1;
1191 kvm->arch.crypto.dea_kw = 1;
1192 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1193 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1194 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1195 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001196}
1197
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001198static void sca_dispose(struct kvm *kvm)
1199{
1200 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001201 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001202 else
1203 free_page((unsigned long)(kvm->arch.sca));
1204 kvm->arch.sca = NULL;
1205}
1206
Carsten Ottee08b9632012-01-04 10:25:20 +01001207int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001208{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001209 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001210 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001211 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001212
Carsten Ottee08b9632012-01-04 10:25:20 +01001213 rc = -EINVAL;
1214#ifdef CONFIG_KVM_S390_UCONTROL
1215 if (type & ~KVM_VM_S390_UCONTROL)
1216 goto out_err;
1217 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1218 goto out_err;
1219#else
1220 if (type)
1221 goto out_err;
1222#endif
1223
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001224 rc = s390_enable_sie();
1225 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001226 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001227
Carsten Otteb2904112011-10-18 12:27:13 +02001228 rc = -ENOMEM;
1229
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001230 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1231
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001232 kvm->arch.use_esca = 0; /* start with basic SCA */
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001233 rwlock_init(&kvm->arch.sca_lock);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001234 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001235 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001236 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001237 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001238 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001239 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001240 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001241 kvm->arch.sca = (struct bsca_block *)
1242 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001243 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001244
1245 sprintf(debug_name, "kvm-%u", current->pid);
1246
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001247 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001248 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001249 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001250
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001251 kvm->arch.sie_page2 =
1252 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1253 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001254 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001255
Michael Muellerfb5bf932015-02-27 14:25:10 +01001256 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001257 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001258 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001259 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1260 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001261 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001262 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001263 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001264 }
1265
Michael Mueller981467c2015-02-24 13:51:04 +01001266 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001267 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1268 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001269 S390_ARCH_FAC_LIST_SIZE_BYTE);
1270
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001271 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1272 set_kvm_facility(kvm->arch.model.fac_list, 74);
1273
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001274 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001275 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001276
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001277 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001278
Carsten Otteba5c1e92008-03-25 18:47:26 +01001279 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001280 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1281 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001282 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001283 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001284
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001285 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001286 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001287
Carsten Ottee08b9632012-01-04 10:25:20 +01001288 if (type & KVM_VM_S390_UCONTROL) {
1289 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001290 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001291 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001292 if (sclp.hamax == U64_MAX)
1293 kvm->arch.mem_limit = TASK_MAX_SIZE;
1294 else
1295 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1296 sclp.hamax + 1);
Dominik Dingela3a92c32014-12-01 17:24:42 +01001297 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001298 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001299 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001300 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001301 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001302 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001303
1304 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001305 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001306 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001307
David Hildenbrand8ad35752014-03-14 11:00:21 +01001308 spin_lock_init(&kvm->arch.start_stop_lock);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001309 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001310
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001311 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001312out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001313 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001314 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001315 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001316 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001317 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001318}
1319
Christian Borntraegerd329c032008-11-26 14:50:27 +01001320void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1321{
1322 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001323 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001324 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001325 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001326 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001327 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001328
1329 if (kvm_is_ucontrol(vcpu->kvm))
1330 gmap_free(vcpu->arch.gmap);
1331
Dominik Dingele6db1d62015-05-07 15:41:57 +02001332 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001333 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001334 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001335
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001336 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001337 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001338}
1339
1340static void kvm_free_vcpus(struct kvm *kvm)
1341{
1342 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001343 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001344
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001345 kvm_for_each_vcpu(i, vcpu, kvm)
1346 kvm_arch_vcpu_destroy(vcpu);
1347
1348 mutex_lock(&kvm->lock);
1349 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1350 kvm->vcpus[i] = NULL;
1351
1352 atomic_set(&kvm->online_vcpus, 0);
1353 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001354}
1355
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001356void kvm_arch_destroy_vm(struct kvm *kvm)
1357{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001358 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001359 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001360 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001361 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001362 if (!kvm_is_ucontrol(kvm))
1363 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001364 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001365 kvm_s390_clear_float_irqs(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001366 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001367}
1368
1369/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001370static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1371{
1372 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1373 if (!vcpu->arch.gmap)
1374 return -ENOMEM;
1375 vcpu->arch.gmap->private = vcpu->kvm;
1376
1377 return 0;
1378}
1379
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001380static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1381{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001382 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001383 if (vcpu->kvm->arch.use_esca) {
1384 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001385
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001386 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001387 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001388 } else {
1389 struct bsca_block *sca = vcpu->kvm->arch.sca;
1390
1391 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001392 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001393 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001394 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001395}
1396
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001397static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001398{
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001399 read_lock(&vcpu->kvm->arch.sca_lock);
1400 if (vcpu->kvm->arch.use_esca) {
1401 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001402
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001403 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001404 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1405 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand25508822015-10-12 16:27:23 +02001406 vcpu->arch.sie_block->ecb2 |= 0x04U;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001407 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001408 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001409 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001410
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001411 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001412 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1413 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001414 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001415 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001416 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001417}
1418
1419/* Basic SCA to Extended SCA data copy routines */
1420static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1421{
1422 d->sda = s->sda;
1423 d->sigp_ctrl.c = s->sigp_ctrl.c;
1424 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1425}
1426
1427static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1428{
1429 int i;
1430
1431 d->ipte_control = s->ipte_control;
1432 d->mcn[0] = s->mcn;
1433 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1434 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1435}
1436
1437static int sca_switch_to_extended(struct kvm *kvm)
1438{
1439 struct bsca_block *old_sca = kvm->arch.sca;
1440 struct esca_block *new_sca;
1441 struct kvm_vcpu *vcpu;
1442 unsigned int vcpu_idx;
1443 u32 scaol, scaoh;
1444
1445 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1446 if (!new_sca)
1447 return -ENOMEM;
1448
1449 scaoh = (u32)((u64)(new_sca) >> 32);
1450 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1451
1452 kvm_s390_vcpu_block_all(kvm);
1453 write_lock(&kvm->arch.sca_lock);
1454
1455 sca_copy_b_to_e(new_sca, old_sca);
1456
1457 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1458 vcpu->arch.sie_block->scaoh = scaoh;
1459 vcpu->arch.sie_block->scaol = scaol;
1460 vcpu->arch.sie_block->ecb2 |= 0x04U;
1461 }
1462 kvm->arch.sca = new_sca;
1463 kvm->arch.use_esca = 1;
1464
1465 write_unlock(&kvm->arch.sca_lock);
1466 kvm_s390_vcpu_unblock_all(kvm);
1467
1468 free_page((unsigned long)old_sca);
1469
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001470 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1471 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001472 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001473}
1474
1475static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1476{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001477 int rc;
1478
1479 if (id < KVM_S390_BSCA_CPU_SLOTS)
1480 return true;
1481 if (!sclp.has_esca)
1482 return false;
1483
1484 mutex_lock(&kvm->lock);
1485 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1486 mutex_unlock(&kvm->lock);
1487
1488 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001489}
1490
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001491int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1492{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001493 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1494 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001495 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1496 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001497 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001498 KVM_SYNC_CRS |
1499 KVM_SYNC_ARCH0 |
1500 KVM_SYNC_PFAULT;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001501 if (test_kvm_facility(vcpu->kvm, 64))
1502 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01001503 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1504 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1505 */
1506 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04001507 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001508 else
1509 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001510
1511 if (kvm_is_ucontrol(vcpu->kvm))
1512 return __kvm_ucontrol_vcpu_init(vcpu);
1513
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001514 return 0;
1515}
1516
David Hildenbranddb0758b2016-02-15 09:42:25 +01001517/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1518static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1519{
1520 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001521 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001522 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01001523 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001524}
1525
1526/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1527static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1528{
1529 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01001530 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001531 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1532 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001533 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001534}
1535
1536/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1537static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1538{
1539 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1540 vcpu->arch.cputm_enabled = true;
1541 __start_cpu_timer_accounting(vcpu);
1542}
1543
1544/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1545static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1546{
1547 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1548 __stop_cpu_timer_accounting(vcpu);
1549 vcpu->arch.cputm_enabled = false;
1550}
1551
1552static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1553{
1554 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1555 __enable_cpu_timer_accounting(vcpu);
1556 preempt_enable();
1557}
1558
1559static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1560{
1561 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1562 __disable_cpu_timer_accounting(vcpu);
1563 preempt_enable();
1564}
1565
David Hildenbrand4287f242016-02-15 09:40:12 +01001566/* set the cpu timer - may only be called from the VCPU thread itself */
1567void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1568{
David Hildenbranddb0758b2016-02-15 09:42:25 +01001569 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01001570 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001571 if (vcpu->arch.cputm_enabled)
1572 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01001573 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001574 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01001575 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01001576}
1577
David Hildenbranddb0758b2016-02-15 09:42:25 +01001578/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01001579__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1580{
David Hildenbrand9c23a132016-02-17 21:53:33 +01001581 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001582 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01001583
1584 if (unlikely(!vcpu->arch.cputm_enabled))
1585 return vcpu->arch.sie_block->cputm;
1586
David Hildenbrand9c23a132016-02-17 21:53:33 +01001587 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1588 do {
1589 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1590 /*
1591 * If the writer would ever execute a read in the critical
1592 * section, e.g. in irq context, we have a deadlock.
1593 */
1594 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1595 value = vcpu->arch.sie_block->cputm;
1596 /* if cputm_start is 0, accounting is being started/stopped */
1597 if (likely(vcpu->arch.cputm_start))
1598 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1599 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1600 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01001601 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01001602}
1603
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001604void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1605{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001606 /* Save host register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001607 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001608 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1609 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001610
David Hildenbrand6fd8e672016-01-18 14:46:34 +01001611 if (MACHINE_HAS_VX)
1612 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1613 else
1614 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001615 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001616 if (test_fp_ctl(current->thread.fpu.fpc))
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001617 /* User space provided an invalid FPC, let's clear it */
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001618 current->thread.fpu.fpc = 0;
1619
1620 save_access_regs(vcpu->arch.host_acrs);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001621 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001622 gmap_enable(vcpu->arch.gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001623 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01001624 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001625 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01001626 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001627}
1628
1629void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1630{
David Hildenbrand01a745a2016-02-12 20:41:56 +01001631 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01001632 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01001633 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001634 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001635 gmap_disable(vcpu->arch.gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001636
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001637 /* Save guest register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001638 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001639 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001640
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001641 /* Restore host register state */
1642 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1643 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001644
1645 save_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001646 restore_access_regs(vcpu->arch.host_acrs);
1647}
1648
1649static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1650{
1651 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1652 vcpu->arch.sie_block->gpsw.mask = 0UL;
1653 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001654 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01001655 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001656 vcpu->arch.sie_block->ckc = 0UL;
1657 vcpu->arch.sie_block->todpr = 0;
1658 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1659 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1660 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001661 /* make sure the new fpc will be lazily loaded */
1662 save_fpu_regs();
1663 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001664 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001665 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001666 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1667 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001668 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1669 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001670 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001671}
1672
Dominik Dingel31928aa2014-12-04 15:47:07 +01001673void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001674{
Jason J. Herne72f25022014-11-25 09:46:02 -05001675 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001676 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001677 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001678 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001679 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02001680 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01001681 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02001682 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02001683 }
1684
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001685}
1686
Tony Krowiak5102ee82014-06-27 14:46:01 -04001687static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1688{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001689 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001690 return;
1691
Tony Krowiaka374e892014-09-03 10:13:53 +02001692 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1693
1694 if (vcpu->kvm->arch.crypto.aes_kw)
1695 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1696 if (vcpu->kvm->arch.crypto.dea_kw)
1697 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1698
Tony Krowiak5102ee82014-06-27 14:46:01 -04001699 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1700}
1701
Dominik Dingelb31605c2014-03-25 13:47:11 +01001702void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1703{
1704 free_page(vcpu->arch.sie_block->cbrlo);
1705 vcpu->arch.sie_block->cbrlo = 0;
1706}
1707
1708int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1709{
1710 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1711 if (!vcpu->arch.sie_block->cbrlo)
1712 return -ENOMEM;
1713
1714 vcpu->arch.sie_block->ecb2 |= 0x80;
1715 vcpu->arch.sie_block->ecb2 &= ~0x08;
1716 return 0;
1717}
1718
Michael Mueller91520f12015-02-27 14:32:11 +01001719static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1720{
1721 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1722
Michael Mueller91520f12015-02-27 14:32:11 +01001723 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01001724 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001725 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01001726}
1727
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001728int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1729{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001730 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001731
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001732 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1733 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001734 CPUSTAT_STOPPED);
1735
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001736 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001737 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001738 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001739 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001740
Michael Mueller91520f12015-02-27 14:32:11 +01001741 kvm_s390_vcpu_setup_model(vcpu);
1742
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01001743 vcpu->arch.sie_block->ecb = 0x02;
1744 if (test_kvm_facility(vcpu->kvm, 9))
1745 vcpu->arch.sie_block->ecb |= 0x04;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001746 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001747 vcpu->arch.sie_block->ecb |= 0x10;
1748
David Hildenbrandd6af0b42016-03-04 11:55:56 +01001749 if (test_kvm_facility(vcpu->kvm, 8))
1750 vcpu->arch.sie_block->ecb2 |= 0x08;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001751 vcpu->arch.sie_block->eca = 0xC1002000U;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001752 if (sclp.has_siif)
Heiko Carstens217a4402013-12-30 12:54:14 +01001753 vcpu->arch.sie_block->eca |= 1;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001754 if (sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001755 vcpu->arch.sie_block->eca |= 0x10000000U;
Fan Zhangc6e5f162016-01-07 18:24:29 +08001756 if (test_kvm_facility(vcpu->kvm, 64))
1757 vcpu->arch.sie_block->ecb3 |= 0x01;
Michael Mueller18280d82015-03-16 16:05:41 +01001758 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001759 vcpu->arch.sie_block->eca |= 0x00020000;
1760 vcpu->arch.sie_block->ecd |= 0x20000000;
1761 }
Fan Zhangc6e5f162016-01-07 18:24:29 +08001762 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Thomas Huth492d8642015-02-10 16:11:01 +01001763 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001764 if (test_kvm_facility(vcpu->kvm, 74))
1765 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001766
Dominik Dingele6db1d62015-05-07 15:41:57 +02001767 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01001768 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1769 if (rc)
1770 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001771 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001772 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001773 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001774
Tony Krowiak5102ee82014-06-27 14:46:01 -04001775 kvm_s390_vcpu_crypto_setup(vcpu);
1776
Dominik Dingelb31605c2014-03-25 13:47:11 +01001777 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001778}
1779
1780struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1781 unsigned int id)
1782{
Carsten Otte4d475552011-10-18 12:27:12 +02001783 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001784 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001785 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001786
David Hildenbrand42158252015-10-12 12:57:22 +02001787 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02001788 goto out;
1789
1790 rc = -ENOMEM;
1791
Michael Muellerb110fea2013-06-12 13:54:54 +02001792 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001793 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001794 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001795
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001796 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1797 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001798 goto out_free_cpu;
1799
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001800 vcpu->arch.sie_block = &sie_page->sie_block;
1801 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1802
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001803 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001804 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001805 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001806 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001807 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01001808 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001809
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001810 rc = kvm_vcpu_init(vcpu, kvm, id);
1811 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01001812 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001813 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001814 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001815 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001816
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001817 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001818out_free_sie_block:
1819 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001820out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001821 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001822out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001823 return ERR_PTR(rc);
1824}
1825
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001826int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1827{
David Hildenbrand9a022062014-08-05 17:40:47 +02001828 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001829}
1830
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001831void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001832{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001833 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001834 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001835}
1836
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001837void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001838{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001839 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001840}
1841
Christian Borntraeger8e236542015-04-09 13:49:04 +02001842static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1843{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001844 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001845 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001846}
1847
1848static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1849{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04001850 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001851}
1852
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001853/*
1854 * Kick a guest cpu out of SIE and wait until SIE is not running.
1855 * If the CPU is not running (e.g. waiting as idle) the function will
1856 * return immediately. */
1857void exit_sie(struct kvm_vcpu *vcpu)
1858{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001859 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001860 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1861 cpu_relax();
1862}
1863
Christian Borntraeger8e236542015-04-09 13:49:04 +02001864/* Kick a guest cpu out of SIE to process a request synchronously */
1865void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001866{
Christian Borntraeger8e236542015-04-09 13:49:04 +02001867 kvm_make_request(req, vcpu);
1868 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001869}
1870
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001871static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1872{
1873 int i;
1874 struct kvm *kvm = gmap->private;
1875 struct kvm_vcpu *vcpu;
1876
1877 kvm_for_each_vcpu(i, vcpu, kvm) {
1878 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001879 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001880 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001881 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001882 }
1883 }
1884}
1885
Christoffer Dallb6d33832012-03-08 16:44:24 -05001886int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1887{
1888 /* kvm common code refers to this, but never calls it */
1889 BUG();
1890 return 0;
1891}
1892
Carsten Otte14eebd92012-05-15 14:15:26 +02001893static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1894 struct kvm_one_reg *reg)
1895{
1896 int r = -EINVAL;
1897
1898 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001899 case KVM_REG_S390_TODPR:
1900 r = put_user(vcpu->arch.sie_block->todpr,
1901 (u32 __user *)reg->addr);
1902 break;
1903 case KVM_REG_S390_EPOCHDIFF:
1904 r = put_user(vcpu->arch.sie_block->epoch,
1905 (u64 __user *)reg->addr);
1906 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001907 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01001908 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02001909 (u64 __user *)reg->addr);
1910 break;
1911 case KVM_REG_S390_CLOCK_COMP:
1912 r = put_user(vcpu->arch.sie_block->ckc,
1913 (u64 __user *)reg->addr);
1914 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001915 case KVM_REG_S390_PFTOKEN:
1916 r = put_user(vcpu->arch.pfault_token,
1917 (u64 __user *)reg->addr);
1918 break;
1919 case KVM_REG_S390_PFCOMPARE:
1920 r = put_user(vcpu->arch.pfault_compare,
1921 (u64 __user *)reg->addr);
1922 break;
1923 case KVM_REG_S390_PFSELECT:
1924 r = put_user(vcpu->arch.pfault_select,
1925 (u64 __user *)reg->addr);
1926 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001927 case KVM_REG_S390_PP:
1928 r = put_user(vcpu->arch.sie_block->pp,
1929 (u64 __user *)reg->addr);
1930 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001931 case KVM_REG_S390_GBEA:
1932 r = put_user(vcpu->arch.sie_block->gbea,
1933 (u64 __user *)reg->addr);
1934 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001935 default:
1936 break;
1937 }
1938
1939 return r;
1940}
1941
1942static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1943 struct kvm_one_reg *reg)
1944{
1945 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01001946 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02001947
1948 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001949 case KVM_REG_S390_TODPR:
1950 r = get_user(vcpu->arch.sie_block->todpr,
1951 (u32 __user *)reg->addr);
1952 break;
1953 case KVM_REG_S390_EPOCHDIFF:
1954 r = get_user(vcpu->arch.sie_block->epoch,
1955 (u64 __user *)reg->addr);
1956 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001957 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01001958 r = get_user(val, (u64 __user *)reg->addr);
1959 if (!r)
1960 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02001961 break;
1962 case KVM_REG_S390_CLOCK_COMP:
1963 r = get_user(vcpu->arch.sie_block->ckc,
1964 (u64 __user *)reg->addr);
1965 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001966 case KVM_REG_S390_PFTOKEN:
1967 r = get_user(vcpu->arch.pfault_token,
1968 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001969 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1970 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001971 break;
1972 case KVM_REG_S390_PFCOMPARE:
1973 r = get_user(vcpu->arch.pfault_compare,
1974 (u64 __user *)reg->addr);
1975 break;
1976 case KVM_REG_S390_PFSELECT:
1977 r = get_user(vcpu->arch.pfault_select,
1978 (u64 __user *)reg->addr);
1979 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001980 case KVM_REG_S390_PP:
1981 r = get_user(vcpu->arch.sie_block->pp,
1982 (u64 __user *)reg->addr);
1983 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001984 case KVM_REG_S390_GBEA:
1985 r = get_user(vcpu->arch.sie_block->gbea,
1986 (u64 __user *)reg->addr);
1987 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001988 default:
1989 break;
1990 }
1991
1992 return r;
1993}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001994
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001995static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1996{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001997 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001998 return 0;
1999}
2000
2001int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2002{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002003 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002004 return 0;
2005}
2006
2007int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2008{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002009 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002010 return 0;
2011}
2012
2013int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2014 struct kvm_sregs *sregs)
2015{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002016 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002017 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01002018 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002019 return 0;
2020}
2021
2022int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2023 struct kvm_sregs *sregs)
2024{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002025 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002026 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002027 return 0;
2028}
2029
2030int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2031{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002032 /* make sure the new values will be lazily loaded */
2033 save_fpu_regs();
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002034 if (test_fp_ctl(fpu->fpc))
2035 return -EINVAL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002036 current->thread.fpu.fpc = fpu->fpc;
2037 if (MACHINE_HAS_VX)
2038 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2039 else
2040 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002041 return 0;
2042}
2043
2044int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2045{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002046 /* make sure we have the latest values */
2047 save_fpu_regs();
2048 if (MACHINE_HAS_VX)
2049 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2050 else
2051 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2052 fpu->fpc = current->thread.fpu.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002053 return 0;
2054}
2055
2056static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2057{
2058 int rc = 0;
2059
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002060 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002061 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002062 else {
2063 vcpu->run->psw_mask = psw.mask;
2064 vcpu->run->psw_addr = psw.addr;
2065 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002066 return rc;
2067}
2068
2069int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2070 struct kvm_translation *tr)
2071{
2072 return -EINVAL; /* not implemented yet */
2073}
2074
David Hildenbrand27291e22014-01-23 12:26:52 +01002075#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2076 KVM_GUESTDBG_USE_HW_BP | \
2077 KVM_GUESTDBG_ENABLE)
2078
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002079int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2080 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002081{
David Hildenbrand27291e22014-01-23 12:26:52 +01002082 int rc = 0;
2083
2084 vcpu->guest_debug = 0;
2085 kvm_s390_clear_bp_data(vcpu);
2086
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002087 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002088 return -EINVAL;
2089
2090 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2091 vcpu->guest_debug = dbg->control;
2092 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002093 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002094
2095 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2096 rc = kvm_s390_import_bp_data(vcpu, dbg);
2097 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002098 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002099 vcpu->arch.guestdbg.last_bp = 0;
2100 }
2101
2102 if (rc) {
2103 vcpu->guest_debug = 0;
2104 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002105 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002106 }
2107
2108 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002109}
2110
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002111int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2112 struct kvm_mp_state *mp_state)
2113{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002114 /* CHECK_STOP and LOAD are not supported yet */
2115 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2116 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002117}
2118
2119int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2120 struct kvm_mp_state *mp_state)
2121{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002122 int rc = 0;
2123
2124 /* user space knows about this interface - let it control the state */
2125 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2126
2127 switch (mp_state->mp_state) {
2128 case KVM_MP_STATE_STOPPED:
2129 kvm_s390_vcpu_stop(vcpu);
2130 break;
2131 case KVM_MP_STATE_OPERATING:
2132 kvm_s390_vcpu_start(vcpu);
2133 break;
2134 case KVM_MP_STATE_LOAD:
2135 case KVM_MP_STATE_CHECK_STOP:
2136 /* fall through - CHECK_STOP and LOAD are not supported yet */
2137 default:
2138 rc = -ENXIO;
2139 }
2140
2141 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002142}
2143
David Hildenbrand8ad35752014-03-14 11:00:21 +01002144static bool ibs_enabled(struct kvm_vcpu *vcpu)
2145{
2146 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2147}
2148
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002149static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2150{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002151retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002152 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002153 if (!vcpu->requests)
2154 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002155 /*
2156 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2157 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2158 * This ensures that the ipte instruction for this request has
2159 * already finished. We might race against a second unmapper that
2160 * wants to set the blocking bit. Lets just retry the request loop.
2161 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002162 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002163 int rc;
2164 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02002165 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002166 PAGE_SIZE * 2);
2167 if (rc)
2168 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002169 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002170 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002171
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002172 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2173 vcpu->arch.sie_block->ihcpu = 0xffff;
2174 goto retry;
2175 }
2176
David Hildenbrand8ad35752014-03-14 11:00:21 +01002177 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2178 if (!ibs_enabled(vcpu)) {
2179 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002180 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002181 &vcpu->arch.sie_block->cpuflags);
2182 }
2183 goto retry;
2184 }
2185
2186 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2187 if (ibs_enabled(vcpu)) {
2188 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002189 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002190 &vcpu->arch.sie_block->cpuflags);
2191 }
2192 goto retry;
2193 }
2194
David Hildenbrand0759d062014-05-13 16:54:32 +02002195 /* nothing to do, just clear the request */
2196 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2197
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002198 return 0;
2199}
2200
David Hildenbrand25ed1672015-05-12 09:49:14 +02002201void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2202{
2203 struct kvm_vcpu *vcpu;
2204 int i;
2205
2206 mutex_lock(&kvm->lock);
2207 preempt_disable();
2208 kvm->arch.epoch = tod - get_tod_clock();
2209 kvm_s390_vcpu_block_all(kvm);
2210 kvm_for_each_vcpu(i, vcpu, kvm)
2211 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2212 kvm_s390_vcpu_unblock_all(kvm);
2213 preempt_enable();
2214 mutex_unlock(&kvm->lock);
2215}
2216
Thomas Huthfa576c52014-05-06 17:20:16 +02002217/**
2218 * kvm_arch_fault_in_page - fault-in guest page if necessary
2219 * @vcpu: The corresponding virtual cpu
2220 * @gpa: Guest physical address
2221 * @writable: Whether the page should be writable or not
2222 *
2223 * Make sure that a guest page has been faulted-in on the host.
2224 *
2225 * Return: Zero on success, negative error code otherwise.
2226 */
2227long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002228{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002229 return gmap_fault(vcpu->arch.gmap, gpa,
2230 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002231}
2232
Dominik Dingel3c038e62013-10-07 17:11:48 +02002233static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2234 unsigned long token)
2235{
2236 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002237 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002238
2239 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002240 irq.u.ext.ext_params2 = token;
2241 irq.type = KVM_S390_INT_PFAULT_INIT;
2242 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002243 } else {
2244 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002245 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002246 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2247 }
2248}
2249
2250void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2251 struct kvm_async_pf *work)
2252{
2253 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2254 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2255}
2256
2257void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2258 struct kvm_async_pf *work)
2259{
2260 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2261 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2262}
2263
2264void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2265 struct kvm_async_pf *work)
2266{
2267 /* s390 will always inject the page directly */
2268}
2269
2270bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2271{
2272 /*
2273 * s390 will always inject the page directly,
2274 * but we still want check_async_completion to cleanup
2275 */
2276 return true;
2277}
2278
2279static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2280{
2281 hva_t hva;
2282 struct kvm_arch_async_pf arch;
2283 int rc;
2284
2285 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2286 return 0;
2287 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2288 vcpu->arch.pfault_compare)
2289 return 0;
2290 if (psw_extint_disabled(vcpu))
2291 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002292 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002293 return 0;
2294 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2295 return 0;
2296 if (!vcpu->arch.gmap->pfault_enabled)
2297 return 0;
2298
Heiko Carstens81480cc2014-01-01 16:36:07 +01002299 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2300 hva += current->thread.gmap_addr & ~PAGE_MASK;
2301 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002302 return 0;
2303
2304 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2305 return rc;
2306}
2307
Thomas Huth3fb4c402013-09-12 10:33:43 +02002308static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002309{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002310 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002311
Dominik Dingel3c038e62013-10-07 17:11:48 +02002312 /*
2313 * On s390 notifications for arriving pages will be delivered directly
2314 * to the guest but the house keeping for completed pfaults is
2315 * handled outside the worker.
2316 */
2317 kvm_check_async_pf_completion(vcpu);
2318
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002319 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2320 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002321
2322 if (need_resched())
2323 schedule();
2324
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002325 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002326 s390_handle_mcck();
2327
Jens Freimann79395032014-04-17 10:10:30 +02002328 if (!kvm_is_ucontrol(vcpu->kvm)) {
2329 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2330 if (rc)
2331 return rc;
2332 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002333
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002334 rc = kvm_s390_handle_requests(vcpu);
2335 if (rc)
2336 return rc;
2337
David Hildenbrand27291e22014-01-23 12:26:52 +01002338 if (guestdbg_enabled(vcpu)) {
2339 kvm_s390_backup_guest_per_regs(vcpu);
2340 kvm_s390_patch_guest_per_regs(vcpu);
2341 }
2342
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002343 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002344 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2345 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2346 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002347
Thomas Huth3fb4c402013-09-12 10:33:43 +02002348 return 0;
2349}
2350
Thomas Huth492d8642015-02-10 16:11:01 +01002351static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2352{
David Hildenbrand56317922016-01-12 17:37:58 +01002353 struct kvm_s390_pgm_info pgm_info = {
2354 .code = PGM_ADDRESSING,
2355 };
2356 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01002357 int rc;
2358
2359 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2360 trace_kvm_s390_sie_fault(vcpu);
2361
2362 /*
2363 * We want to inject an addressing exception, which is defined as a
2364 * suppressing or terminating exception. However, since we came here
2365 * by a DAT access exception, the PSW still points to the faulting
2366 * instruction since DAT exceptions are nullifying. So we've got
2367 * to look up the current opcode to get the length of the instruction
2368 * to be able to forward the PSW.
2369 */
David Hildenbrand65977322015-11-16 16:17:45 +01002370 rc = read_guest_instr(vcpu, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01002371 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01002372 if (rc < 0) {
2373 return rc;
2374 } else if (rc) {
2375 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2376 * Forward by arbitrary ilc, injection will take care of
2377 * nullification if necessary.
2378 */
2379 pgm_info = vcpu->arch.pgm;
2380 ilen = 4;
2381 }
David Hildenbrand56317922016-01-12 17:37:58 +01002382 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2383 kvm_s390_forward_psw(vcpu, ilen);
2384 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01002385}
2386
Thomas Huth3fb4c402013-09-12 10:33:43 +02002387static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2388{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002389 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2390 vcpu->arch.sie_block->icptcode);
2391 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2392
David Hildenbrand27291e22014-01-23 12:26:52 +01002393 if (guestdbg_enabled(vcpu))
2394 kvm_s390_restore_guest_per_regs(vcpu);
2395
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002396 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2397 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002398
2399 if (vcpu->arch.sie_block->icptcode > 0) {
2400 int rc = kvm_handle_sie_intercept(vcpu);
2401
2402 if (rc != -EOPNOTSUPP)
2403 return rc;
2404 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2405 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2406 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2407 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2408 return -EREMOTE;
2409 } else if (exit_reason != -EFAULT) {
2410 vcpu->stat.exit_null++;
2411 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002412 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2413 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2414 vcpu->run->s390_ucontrol.trans_exc_code =
2415 current->thread.gmap_addr;
2416 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002417 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002418 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002419 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002420 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002421 if (kvm_arch_setup_async_pf(vcpu))
2422 return 0;
2423 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002424 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02002425 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002426}
2427
2428static int __vcpu_run(struct kvm_vcpu *vcpu)
2429{
2430 int rc, exit_reason;
2431
Thomas Huth800c1062013-09-12 10:33:45 +02002432 /*
2433 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2434 * ning the guest), so that memslots (and other stuff) are protected
2435 */
2436 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2437
Thomas Hutha76ccff2013-09-12 10:33:44 +02002438 do {
2439 rc = vcpu_pre_run(vcpu);
2440 if (rc)
2441 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002442
Thomas Huth800c1062013-09-12 10:33:45 +02002443 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002444 /*
2445 * As PF_VCPU will be used in fault handler, between
2446 * guest_enter and guest_exit should be no uaccess.
2447 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002448 local_irq_disable();
2449 __kvm_guest_enter();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002450 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002451 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002452 exit_reason = sie64a(vcpu->arch.sie_block,
2453 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002454 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002455 __enable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002456 __kvm_guest_exit();
2457 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002458 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002459
Thomas Hutha76ccff2013-09-12 10:33:44 +02002460 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002461 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002462
Thomas Huth800c1062013-09-12 10:33:45 +02002463 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002464 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002465}
2466
David Hildenbrandb028ee32014-07-17 10:47:43 +02002467static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2468{
2469 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2470 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2471 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2472 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2473 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2474 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002475 /* some control register changes require a tlb flush */
2476 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002477 }
2478 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01002479 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002480 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2481 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2482 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2483 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2484 }
2485 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2486 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2487 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2488 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002489 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2490 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002491 }
2492 kvm_run->kvm_dirty_regs = 0;
2493}
2494
2495static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2496{
2497 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2498 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2499 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2500 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01002501 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002502 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2503 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2504 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2505 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2506 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2507 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2508 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2509}
2510
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002511int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2512{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002513 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002514 sigset_t sigsaved;
2515
David Hildenbrand27291e22014-01-23 12:26:52 +01002516 if (guestdbg_exit_pending(vcpu)) {
2517 kvm_s390_prepare_debug_exit(vcpu);
2518 return 0;
2519 }
2520
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002521 if (vcpu->sigset_active)
2522 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2523
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002524 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2525 kvm_s390_vcpu_start(vcpu);
2526 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002527 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002528 vcpu->vcpu_id);
2529 return -EINVAL;
2530 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002531
David Hildenbrandb028ee32014-07-17 10:47:43 +02002532 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002533 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002534
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002535 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002536 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002537
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002538 if (signal_pending(current) && !rc) {
2539 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002540 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002541 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002542
David Hildenbrand27291e22014-01-23 12:26:52 +01002543 if (guestdbg_exit_pending(vcpu) && !rc) {
2544 kvm_s390_prepare_debug_exit(vcpu);
2545 rc = 0;
2546 }
2547
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002548 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02002549 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002550 rc = 0;
2551 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002552
David Hildenbranddb0758b2016-02-15 09:42:25 +01002553 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002554 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002555
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002556 if (vcpu->sigset_active)
2557 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2558
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002559 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002560 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002561}
2562
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002563/*
2564 * store status at address
2565 * we use have two special cases:
2566 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2567 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2568 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002569int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002570{
Carsten Otte092670c2011-07-24 10:48:22 +02002571 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002572 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02002573 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01002574 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002575 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002576
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002577 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002578 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2579 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002580 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002581 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002582 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2583 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002584 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002585 gpa = px;
2586 } else
2587 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002588
2589 /* manually convert vector registers if necessary */
2590 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01002591 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002592 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2593 fprs, 128);
2594 } else {
2595 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002596 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002597 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002598 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002599 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002600 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002601 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002602 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02002603 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002604 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002605 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002606 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002607 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01002608 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002609 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01002610 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002611 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002612 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002613 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002614 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002615 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02002616 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01002617 &vcpu->arch.sie_block->gcr, 128);
2618 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002619}
2620
Thomas Huthe8798922013-11-06 15:46:33 +01002621int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2622{
2623 /*
2624 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2625 * copying in vcpu load/put. Lets update our copies before we save
2626 * it into the save area
2627 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002628 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002629 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01002630 save_access_regs(vcpu->run->s.regs.acrs);
2631
2632 return kvm_s390_store_status_unloaded(vcpu, addr);
2633}
2634
Eric Farmanbc17de72014-04-14 16:01:09 -04002635/*
2636 * store additional status at address
2637 */
2638int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2639 unsigned long gpa)
2640{
2641 /* Only bits 0-53 are used for address formation */
2642 if (!(gpa & ~0x3ff))
2643 return 0;
2644
2645 return write_guest_abs(vcpu, gpa & ~0x3ff,
2646 (void *)&vcpu->run->s.regs.vrs, 512);
2647}
2648
2649int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2650{
2651 if (!test_kvm_facility(vcpu->kvm, 129))
2652 return 0;
2653
2654 /*
2655 * The guest VXRS are in the host VXRs due to the lazy
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002656 * copying in vcpu load/put. We can simply call save_fpu_regs()
2657 * to save the current register state because we are in the
2658 * middle of a load/put cycle.
2659 *
2660 * Let's update our copies before we save it into the save area.
Eric Farmanbc17de72014-04-14 16:01:09 -04002661 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002662 save_fpu_regs();
Eric Farmanbc17de72014-04-14 16:01:09 -04002663
2664 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2665}
2666
David Hildenbrand8ad35752014-03-14 11:00:21 +01002667static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2668{
2669 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002670 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002671}
2672
2673static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2674{
2675 unsigned int i;
2676 struct kvm_vcpu *vcpu;
2677
2678 kvm_for_each_vcpu(i, vcpu, kvm) {
2679 __disable_ibs_on_vcpu(vcpu);
2680 }
2681}
2682
2683static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2684{
2685 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002686 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002687}
2688
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002689void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2690{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002691 int i, online_vcpus, started_vcpus = 0;
2692
2693 if (!is_vcpu_stopped(vcpu))
2694 return;
2695
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002696 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002697 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002698 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002699 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2700
2701 for (i = 0; i < online_vcpus; i++) {
2702 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2703 started_vcpus++;
2704 }
2705
2706 if (started_vcpus == 0) {
2707 /* we're the only active VCPU -> speed it up */
2708 __enable_ibs_on_vcpu(vcpu);
2709 } else if (started_vcpus == 1) {
2710 /*
2711 * As we are starting a second VCPU, we have to disable
2712 * the IBS facility on all VCPUs to remove potentially
2713 * oustanding ENABLE requests.
2714 */
2715 __disable_ibs_on_all_vcpus(vcpu->kvm);
2716 }
2717
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002718 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002719 /*
2720 * Another VCPU might have used IBS while we were offline.
2721 * Let's play safe and flush the VCPU at startup.
2722 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002723 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002724 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002725 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002726}
2727
2728void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2729{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002730 int i, online_vcpus, started_vcpus = 0;
2731 struct kvm_vcpu *started_vcpu = NULL;
2732
2733 if (is_vcpu_stopped(vcpu))
2734 return;
2735
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002736 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002737 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002738 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002739 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2740
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002741 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002742 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002743
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002744 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002745 __disable_ibs_on_vcpu(vcpu);
2746
2747 for (i = 0; i < online_vcpus; i++) {
2748 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2749 started_vcpus++;
2750 started_vcpu = vcpu->kvm->vcpus[i];
2751 }
2752 }
2753
2754 if (started_vcpus == 1) {
2755 /*
2756 * As we only have one VCPU left, we want to enable the
2757 * IBS facility for that VCPU to speed it up.
2758 */
2759 __enable_ibs_on_vcpu(started_vcpu);
2760 }
2761
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002762 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002763 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002764}
2765
Cornelia Huckd6712df2012-12-20 15:32:11 +01002766static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2767 struct kvm_enable_cap *cap)
2768{
2769 int r;
2770
2771 if (cap->flags)
2772 return -EINVAL;
2773
2774 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002775 case KVM_CAP_S390_CSS_SUPPORT:
2776 if (!vcpu->kvm->arch.css_support) {
2777 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02002778 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002779 trace_kvm_s390_enable_css(vcpu->kvm);
2780 }
2781 r = 0;
2782 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002783 default:
2784 r = -EINVAL;
2785 break;
2786 }
2787 return r;
2788}
2789
Thomas Huth41408c282015-02-06 15:01:21 +01002790static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2791 struct kvm_s390_mem_op *mop)
2792{
2793 void __user *uaddr = (void __user *)mop->buf;
2794 void *tmpbuf = NULL;
2795 int r, srcu_idx;
2796 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2797 | KVM_S390_MEMOP_F_CHECK_ONLY;
2798
2799 if (mop->flags & ~supported_flags)
2800 return -EINVAL;
2801
2802 if (mop->size > MEM_OP_MAX_SIZE)
2803 return -E2BIG;
2804
2805 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2806 tmpbuf = vmalloc(mop->size);
2807 if (!tmpbuf)
2808 return -ENOMEM;
2809 }
2810
2811 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2812
2813 switch (mop->op) {
2814 case KVM_S390_MEMOP_LOGICAL_READ:
2815 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002816 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2817 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01002818 break;
2819 }
2820 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2821 if (r == 0) {
2822 if (copy_to_user(uaddr, tmpbuf, mop->size))
2823 r = -EFAULT;
2824 }
2825 break;
2826 case KVM_S390_MEMOP_LOGICAL_WRITE:
2827 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01002828 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2829 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01002830 break;
2831 }
2832 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2833 r = -EFAULT;
2834 break;
2835 }
2836 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2837 break;
2838 default:
2839 r = -EINVAL;
2840 }
2841
2842 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2843
2844 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2845 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2846
2847 vfree(tmpbuf);
2848 return r;
2849}
2850
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002851long kvm_arch_vcpu_ioctl(struct file *filp,
2852 unsigned int ioctl, unsigned long arg)
2853{
2854 struct kvm_vcpu *vcpu = filp->private_data;
2855 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002856 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002857 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002858
Avi Kivity937366242010-05-13 12:35:17 +03002859 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01002860 case KVM_S390_IRQ: {
2861 struct kvm_s390_irq s390irq;
2862
2863 r = -EFAULT;
2864 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2865 break;
2866 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2867 break;
2868 }
Avi Kivity937366242010-05-13 12:35:17 +03002869 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002870 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002871 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002872
Avi Kivity937366242010-05-13 12:35:17 +03002873 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002874 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03002875 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002876 if (s390int_to_s390irq(&s390int, &s390irq))
2877 return -EINVAL;
2878 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03002879 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002880 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002881 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002882 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002883 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002884 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002885 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002886 case KVM_S390_SET_INITIAL_PSW: {
2887 psw_t psw;
2888
Avi Kivitybc923cc2010-05-13 12:21:46 +03002889 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002890 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002891 break;
2892 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2893 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002894 }
2895 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002896 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2897 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002898 case KVM_SET_ONE_REG:
2899 case KVM_GET_ONE_REG: {
2900 struct kvm_one_reg reg;
2901 r = -EFAULT;
2902 if (copy_from_user(&reg, argp, sizeof(reg)))
2903 break;
2904 if (ioctl == KVM_SET_ONE_REG)
2905 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2906 else
2907 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2908 break;
2909 }
Carsten Otte27e03932012-01-04 10:25:21 +01002910#ifdef CONFIG_KVM_S390_UCONTROL
2911 case KVM_S390_UCAS_MAP: {
2912 struct kvm_s390_ucas_mapping ucasmap;
2913
2914 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2915 r = -EFAULT;
2916 break;
2917 }
2918
2919 if (!kvm_is_ucontrol(vcpu->kvm)) {
2920 r = -EINVAL;
2921 break;
2922 }
2923
2924 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2925 ucasmap.vcpu_addr, ucasmap.length);
2926 break;
2927 }
2928 case KVM_S390_UCAS_UNMAP: {
2929 struct kvm_s390_ucas_mapping ucasmap;
2930
2931 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2932 r = -EFAULT;
2933 break;
2934 }
2935
2936 if (!kvm_is_ucontrol(vcpu->kvm)) {
2937 r = -EINVAL;
2938 break;
2939 }
2940
2941 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2942 ucasmap.length);
2943 break;
2944 }
2945#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002946 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002947 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002948 break;
2949 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002950 case KVM_ENABLE_CAP:
2951 {
2952 struct kvm_enable_cap cap;
2953 r = -EFAULT;
2954 if (copy_from_user(&cap, argp, sizeof(cap)))
2955 break;
2956 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2957 break;
2958 }
Thomas Huth41408c282015-02-06 15:01:21 +01002959 case KVM_S390_MEM_OP: {
2960 struct kvm_s390_mem_op mem_op;
2961
2962 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2963 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2964 else
2965 r = -EFAULT;
2966 break;
2967 }
Jens Freimann816c7662014-11-24 17:13:46 +01002968 case KVM_S390_SET_IRQ_STATE: {
2969 struct kvm_s390_irq_state irq_state;
2970
2971 r = -EFAULT;
2972 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2973 break;
2974 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2975 irq_state.len == 0 ||
2976 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2977 r = -EINVAL;
2978 break;
2979 }
2980 r = kvm_s390_set_irq_state(vcpu,
2981 (void __user *) irq_state.buf,
2982 irq_state.len);
2983 break;
2984 }
2985 case KVM_S390_GET_IRQ_STATE: {
2986 struct kvm_s390_irq_state irq_state;
2987
2988 r = -EFAULT;
2989 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2990 break;
2991 if (irq_state.len == 0) {
2992 r = -EINVAL;
2993 break;
2994 }
2995 r = kvm_s390_get_irq_state(vcpu,
2996 (__u8 __user *) irq_state.buf,
2997 irq_state.len);
2998 break;
2999 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003000 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003001 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003002 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003003 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003004}
3005
Carsten Otte5b1c1492012-01-04 10:25:23 +01003006int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3007{
3008#ifdef CONFIG_KVM_S390_UCONTROL
3009 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3010 && (kvm_is_ucontrol(vcpu->kvm))) {
3011 vmf->page = virt_to_page(vcpu->arch.sie_block);
3012 get_page(vmf->page);
3013 return 0;
3014 }
3015#endif
3016 return VM_FAULT_SIGBUS;
3017}
3018
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303019int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3020 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003021{
3022 return 0;
3023}
3024
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003025/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003026int kvm_arch_prepare_memory_region(struct kvm *kvm,
3027 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003028 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003029 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003030{
Nick Wangdd2887e2013-03-25 17:22:57 +01003031 /* A few sanity checks. We can have memory slots which have to be
3032 located/ended at a segment boundary (1MB). The memory in userland is
3033 ok to be fragmented into various different vmas. It is okay to mmap()
3034 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003035
Carsten Otte598841c2011-07-24 10:48:21 +02003036 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003037 return -EINVAL;
3038
Carsten Otte598841c2011-07-24 10:48:21 +02003039 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003040 return -EINVAL;
3041
Dominik Dingela3a92c32014-12-01 17:24:42 +01003042 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3043 return -EINVAL;
3044
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003045 return 0;
3046}
3047
3048void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003049 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003050 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003051 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003052 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003053{
Carsten Ottef7850c92011-07-24 10:48:23 +02003054 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003055
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003056 /* If the basics of the memslot do not change, we do not want
3057 * to update the gmap. Every update causes several unnecessary
3058 * segment translation exceptions. This is usually handled just
3059 * fine by the normal fault handler + gmap, but it will also
3060 * cause faults on the prefix page of running guest CPUs.
3061 */
3062 if (old->userspace_addr == mem->userspace_addr &&
3063 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3064 old->npages * PAGE_SIZE == mem->memory_size)
3065 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003066
3067 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3068 mem->guest_phys_addr, mem->memory_size);
3069 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003070 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003071 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003072}
3073
Alexander Yarygin60a37702016-04-01 15:38:57 +03003074static inline unsigned long nonhyp_mask(int i)
3075{
3076 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3077
3078 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3079}
3080
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003081void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3082{
3083 vcpu->valid_wakeup = false;
3084}
3085
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003086static int __init kvm_s390_init(void)
3087{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003088 int i;
3089
David Hildenbrand07197fd2015-01-30 16:01:38 +01003090 if (!sclp.has_sief2) {
3091 pr_info("SIE not available\n");
3092 return -ENODEV;
3093 }
3094
Alexander Yarygin60a37702016-04-01 15:38:57 +03003095 for (i = 0; i < 16; i++)
3096 kvm_s390_fac_list_mask[i] |=
3097 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3098
Michael Mueller9d8d5782015-02-02 15:42:51 +01003099 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003100}
3101
3102static void __exit kvm_s390_exit(void)
3103{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003104 kvm_exit();
3105}
3106
3107module_init(kvm_s390_init);
3108module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003109
3110/*
3111 * Enable autoloading of the kvm module.
3112 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3113 * since x86 takes a different approach.
3114 */
3115#include <linux/miscdevice.h>
3116MODULE_ALIAS_MISCDEV(KVM_MINOR);
3117MODULE_ALIAS("devname:kvm");