blob: 0dc22baa0a5789a2e99174f0e5404160f8f2b946 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010029#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010030#include <asm/lowcore.h>
31#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010032#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010033#include <asm/switch_to.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020034#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010035#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include "gaccess.h"
37
Cornelia Huck5786fff2012-07-23 17:20:29 +020038#define CREATE_TRACE_POINTS
39#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020040#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020041
Thomas Huth41408c282015-02-06 15:01:21 +010042#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
43
Heiko Carstensb0c632d2008-03-25 18:47:20 +010044#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
45
46struct kvm_stats_debugfs_item debugfs_entries[] = {
47 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020048 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010049 { "exit_validity", VCPU_STAT(exit_validity) },
50 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
51 { "exit_external_request", VCPU_STAT(exit_external_request) },
52 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010053 { "exit_instruction", VCPU_STAT(exit_instruction) },
54 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
55 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010056 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020057 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020058 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010060 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
61 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010062 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020063 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010064 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
65 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
66 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
67 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
68 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
69 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
70 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020071 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010072 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
73 { "instruction_spx", VCPU_STAT(instruction_spx) },
74 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
75 { "instruction_stap", VCPU_STAT(instruction_stap) },
76 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010077 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010078 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
79 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020080 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010081 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
82 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020083 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010084 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010085 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020086 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010087 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020088 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
89 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010090 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020091 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
92 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -050093 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010094 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
95 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
96 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020097 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
98 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
99 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100100 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100101 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200102 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100103 { NULL }
104};
105
Michael Mueller9d8d5782015-02-02 15:42:51 +0100106/* upper facilities limit for kvm */
107unsigned long kvm_s390_fac_list_mask[] = {
108 0xff82fffbf4fc2000UL,
109 0x005c000000000000UL,
Eric Farman13211ea2014-04-30 13:39:46 -0400110 0x4000000000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100111};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100112
Michael Mueller9d8d5782015-02-02 15:42:51 +0100113unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200114{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100115 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
116 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200117}
118
Michael Mueller9d8d5782015-02-02 15:42:51 +0100119static struct gmap_notifier gmap_notifier;
120
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200122int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123{
124 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200125 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100126}
127
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200128static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
129
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130int kvm_arch_hardware_setup(void)
131{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200132 gmap_notifier.notifier_call = kvm_gmap_notifier;
133 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100134 return 0;
135}
136
137void kvm_arch_hardware_unsetup(void)
138{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200139 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100140}
141
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100142int kvm_arch_init(void *opaque)
143{
Cornelia Huck84877d92014-09-02 10:27:35 +0100144 /* Register floating interrupt controller interface. */
145 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100146}
147
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100148/* Section: device related */
149long kvm_arch_dev_ioctl(struct file *filp,
150 unsigned int ioctl, unsigned long arg)
151{
152 if (ioctl == KVM_S390_ENABLE_SIE)
153 return s390_enable_sie();
154 return -EINVAL;
155}
156
Alexander Graf784aa3d2014-07-14 18:27:35 +0200157int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100158{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100159 int r;
160
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200161 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100162 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200163 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100164 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100165#ifdef CONFIG_KVM_S390_UCONTROL
166 case KVM_CAP_S390_UCONTROL:
167#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200168 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100169 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200170 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100171 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100172 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200173 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100174 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200175 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200176 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200177 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200178 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200179 case KVM_CAP_MP_STATE:
David Hildenbrand2444b352014-10-09 14:10:13 +0200180 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100181 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400182 case KVM_CAP_S390_SKEYS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100183 r = 1;
184 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100185 case KVM_CAP_S390_MEM_OP:
186 r = MEM_OP_MAX_SIZE;
187 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200188 case KVM_CAP_NR_VCPUS:
189 case KVM_CAP_MAX_VCPUS:
190 r = KVM_MAX_VCPUS;
191 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100192 case KVM_CAP_NR_MEMSLOTS:
193 r = KVM_USER_MEM_SLOTS;
194 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200195 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100196 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200197 break;
Eric Farman68c55752014-06-09 10:57:26 -0400198 case KVM_CAP_S390_VECTOR_REGISTERS:
199 r = MACHINE_HAS_VX;
200 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200201 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100202 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200203 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100204 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100205}
206
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400207static void kvm_s390_sync_dirty_log(struct kvm *kvm,
208 struct kvm_memory_slot *memslot)
209{
210 gfn_t cur_gfn, last_gfn;
211 unsigned long address;
212 struct gmap *gmap = kvm->arch.gmap;
213
214 down_read(&gmap->mm->mmap_sem);
215 /* Loop over all guest pages */
216 last_gfn = memslot->base_gfn + memslot->npages;
217 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
218 address = gfn_to_hva_memslot(memslot, cur_gfn);
219
220 if (gmap_test_and_clear_dirty(address, gmap))
221 mark_page_dirty(kvm, cur_gfn);
222 }
223 up_read(&gmap->mm->mmap_sem);
224}
225
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100226/* Section: vm related */
227/*
228 * Get (and clear) the dirty memory log for a memory slot.
229 */
230int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
231 struct kvm_dirty_log *log)
232{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400233 int r;
234 unsigned long n;
235 struct kvm_memory_slot *memslot;
236 int is_dirty = 0;
237
238 mutex_lock(&kvm->slots_lock);
239
240 r = -EINVAL;
241 if (log->slot >= KVM_USER_MEM_SLOTS)
242 goto out;
243
244 memslot = id_to_memslot(kvm->memslots, log->slot);
245 r = -ENOENT;
246 if (!memslot->dirty_bitmap)
247 goto out;
248
249 kvm_s390_sync_dirty_log(kvm, memslot);
250 r = kvm_get_dirty_log(kvm, log, &is_dirty);
251 if (r)
252 goto out;
253
254 /* Clear the dirty log */
255 if (is_dirty) {
256 n = kvm_dirty_bitmap_bytes(memslot);
257 memset(memslot->dirty_bitmap, 0, n);
258 }
259 r = 0;
260out:
261 mutex_unlock(&kvm->slots_lock);
262 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100263}
264
Cornelia Huckd938dc52013-10-23 18:26:34 +0200265static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
266{
267 int r;
268
269 if (cap->flags)
270 return -EINVAL;
271
272 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200273 case KVM_CAP_S390_IRQCHIP:
274 kvm->arch.use_irqchip = 1;
275 r = 0;
276 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200277 case KVM_CAP_S390_USER_SIGP:
278 kvm->arch.user_sigp = 1;
279 r = 0;
280 break;
Eric Farman68c55752014-06-09 10:57:26 -0400281 case KVM_CAP_S390_VECTOR_REGISTERS:
282 kvm->arch.use_vectors = MACHINE_HAS_VX;
283 r = MACHINE_HAS_VX ? 0 : -EINVAL;
284 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100285 case KVM_CAP_S390_USER_STSI:
286 kvm->arch.user_stsi = 1;
287 r = 0;
288 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200289 default:
290 r = -EINVAL;
291 break;
292 }
293 return r;
294}
295
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100296static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
297{
298 int ret;
299
300 switch (attr->attr) {
301 case KVM_S390_VM_MEM_LIMIT_SIZE:
302 ret = 0;
303 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
304 ret = -EFAULT;
305 break;
306 default:
307 ret = -ENXIO;
308 break;
309 }
310 return ret;
311}
312
313static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200314{
315 int ret;
316 unsigned int idx;
317 switch (attr->attr) {
318 case KVM_S390_VM_MEM_ENABLE_CMMA:
319 ret = -EBUSY;
320 mutex_lock(&kvm->lock);
321 if (atomic_read(&kvm->online_vcpus) == 0) {
322 kvm->arch.use_cmma = 1;
323 ret = 0;
324 }
325 mutex_unlock(&kvm->lock);
326 break;
327 case KVM_S390_VM_MEM_CLR_CMMA:
328 mutex_lock(&kvm->lock);
329 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200330 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200331 srcu_read_unlock(&kvm->srcu, idx);
332 mutex_unlock(&kvm->lock);
333 ret = 0;
334 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100335 case KVM_S390_VM_MEM_LIMIT_SIZE: {
336 unsigned long new_limit;
337
338 if (kvm_is_ucontrol(kvm))
339 return -EINVAL;
340
341 if (get_user(new_limit, (u64 __user *)attr->addr))
342 return -EFAULT;
343
344 if (new_limit > kvm->arch.gmap->asce_end)
345 return -E2BIG;
346
347 ret = -EBUSY;
348 mutex_lock(&kvm->lock);
349 if (atomic_read(&kvm->online_vcpus) == 0) {
350 /* gmap_alloc will round the limit up */
351 struct gmap *new = gmap_alloc(current->mm, new_limit);
352
353 if (!new) {
354 ret = -ENOMEM;
355 } else {
356 gmap_free(kvm->arch.gmap);
357 new->private = kvm;
358 kvm->arch.gmap = new;
359 ret = 0;
360 }
361 }
362 mutex_unlock(&kvm->lock);
363 break;
364 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200365 default:
366 ret = -ENXIO;
367 break;
368 }
369 return ret;
370}
371
Tony Krowiaka374e892014-09-03 10:13:53 +0200372static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
373
374static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
375{
376 struct kvm_vcpu *vcpu;
377 int i;
378
Michael Mueller9d8d5782015-02-02 15:42:51 +0100379 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200380 return -EINVAL;
381
382 mutex_lock(&kvm->lock);
383 switch (attr->attr) {
384 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
385 get_random_bytes(
386 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
387 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
388 kvm->arch.crypto.aes_kw = 1;
389 break;
390 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
391 get_random_bytes(
392 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
393 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
394 kvm->arch.crypto.dea_kw = 1;
395 break;
396 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
397 kvm->arch.crypto.aes_kw = 0;
398 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
399 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
400 break;
401 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
402 kvm->arch.crypto.dea_kw = 0;
403 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
404 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
405 break;
406 default:
407 mutex_unlock(&kvm->lock);
408 return -ENXIO;
409 }
410
411 kvm_for_each_vcpu(i, vcpu, kvm) {
412 kvm_s390_vcpu_crypto_setup(vcpu);
413 exit_sie(vcpu);
414 }
415 mutex_unlock(&kvm->lock);
416 return 0;
417}
418
Jason J. Herne72f25022014-11-25 09:46:02 -0500419static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
420{
421 u8 gtod_high;
422
423 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
424 sizeof(gtod_high)))
425 return -EFAULT;
426
427 if (gtod_high != 0)
428 return -EINVAL;
429
430 return 0;
431}
432
433static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
434{
435 struct kvm_vcpu *cur_vcpu;
436 unsigned int vcpu_idx;
437 u64 host_tod, gtod;
438 int r;
439
440 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
441 return -EFAULT;
442
443 r = store_tod_clock(&host_tod);
444 if (r)
445 return r;
446
447 mutex_lock(&kvm->lock);
448 kvm->arch.epoch = gtod - host_tod;
449 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
450 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
451 exit_sie(cur_vcpu);
452 }
453 mutex_unlock(&kvm->lock);
454 return 0;
455}
456
457static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
458{
459 int ret;
460
461 if (attr->flags)
462 return -EINVAL;
463
464 switch (attr->attr) {
465 case KVM_S390_VM_TOD_HIGH:
466 ret = kvm_s390_set_tod_high(kvm, attr);
467 break;
468 case KVM_S390_VM_TOD_LOW:
469 ret = kvm_s390_set_tod_low(kvm, attr);
470 break;
471 default:
472 ret = -ENXIO;
473 break;
474 }
475 return ret;
476}
477
478static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
479{
480 u8 gtod_high = 0;
481
482 if (copy_to_user((void __user *)attr->addr, &gtod_high,
483 sizeof(gtod_high)))
484 return -EFAULT;
485
486 return 0;
487}
488
489static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
490{
491 u64 host_tod, gtod;
492 int r;
493
494 r = store_tod_clock(&host_tod);
495 if (r)
496 return r;
497
498 gtod = host_tod + kvm->arch.epoch;
499 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
500 return -EFAULT;
501
502 return 0;
503}
504
505static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
506{
507 int ret;
508
509 if (attr->flags)
510 return -EINVAL;
511
512 switch (attr->attr) {
513 case KVM_S390_VM_TOD_HIGH:
514 ret = kvm_s390_get_tod_high(kvm, attr);
515 break;
516 case KVM_S390_VM_TOD_LOW:
517 ret = kvm_s390_get_tod_low(kvm, attr);
518 break;
519 default:
520 ret = -ENXIO;
521 break;
522 }
523 return ret;
524}
525
Michael Mueller658b6ed2015-02-02 15:49:35 +0100526static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
527{
528 struct kvm_s390_vm_cpu_processor *proc;
529 int ret = 0;
530
531 mutex_lock(&kvm->lock);
532 if (atomic_read(&kvm->online_vcpus)) {
533 ret = -EBUSY;
534 goto out;
535 }
536 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
537 if (!proc) {
538 ret = -ENOMEM;
539 goto out;
540 }
541 if (!copy_from_user(proc, (void __user *)attr->addr,
542 sizeof(*proc))) {
543 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
544 sizeof(struct cpuid));
545 kvm->arch.model.ibc = proc->ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100546 memcpy(kvm->arch.model.fac->list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100547 S390_ARCH_FAC_LIST_SIZE_BYTE);
548 } else
549 ret = -EFAULT;
550 kfree(proc);
551out:
552 mutex_unlock(&kvm->lock);
553 return ret;
554}
555
556static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
557{
558 int ret = -ENXIO;
559
560 switch (attr->attr) {
561 case KVM_S390_VM_CPU_PROCESSOR:
562 ret = kvm_s390_set_processor(kvm, attr);
563 break;
564 }
565 return ret;
566}
567
568static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
569{
570 struct kvm_s390_vm_cpu_processor *proc;
571 int ret = 0;
572
573 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
574 if (!proc) {
575 ret = -ENOMEM;
576 goto out;
577 }
578 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
579 proc->ibc = kvm->arch.model.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100580 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100581 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
582 ret = -EFAULT;
583 kfree(proc);
584out:
585 return ret;
586}
587
588static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
589{
590 struct kvm_s390_vm_cpu_machine *mach;
591 int ret = 0;
592
593 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
594 if (!mach) {
595 ret = -ENOMEM;
596 goto out;
597 }
598 get_cpu_id((struct cpuid *) &mach->cpuid);
599 mach->ibc = sclp_get_ibc();
Michael Mueller981467c2015-02-24 13:51:04 +0100600 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
601 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100602 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100603 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100604 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
605 ret = -EFAULT;
606 kfree(mach);
607out:
608 return ret;
609}
610
611static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
612{
613 int ret = -ENXIO;
614
615 switch (attr->attr) {
616 case KVM_S390_VM_CPU_PROCESSOR:
617 ret = kvm_s390_get_processor(kvm, attr);
618 break;
619 case KVM_S390_VM_CPU_MACHINE:
620 ret = kvm_s390_get_machine(kvm, attr);
621 break;
622 }
623 return ret;
624}
625
Dominik Dingelf2061652014-04-09 13:13:00 +0200626static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
627{
628 int ret;
629
630 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200631 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100632 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200633 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500634 case KVM_S390_VM_TOD:
635 ret = kvm_s390_set_tod(kvm, attr);
636 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100637 case KVM_S390_VM_CPU_MODEL:
638 ret = kvm_s390_set_cpu_model(kvm, attr);
639 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200640 case KVM_S390_VM_CRYPTO:
641 ret = kvm_s390_vm_set_crypto(kvm, attr);
642 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200643 default:
644 ret = -ENXIO;
645 break;
646 }
647
648 return ret;
649}
650
651static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
652{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100653 int ret;
654
655 switch (attr->group) {
656 case KVM_S390_VM_MEM_CTRL:
657 ret = kvm_s390_get_mem_control(kvm, attr);
658 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500659 case KVM_S390_VM_TOD:
660 ret = kvm_s390_get_tod(kvm, attr);
661 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100662 case KVM_S390_VM_CPU_MODEL:
663 ret = kvm_s390_get_cpu_model(kvm, attr);
664 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100665 default:
666 ret = -ENXIO;
667 break;
668 }
669
670 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200671}
672
673static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
674{
675 int ret;
676
677 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200678 case KVM_S390_VM_MEM_CTRL:
679 switch (attr->attr) {
680 case KVM_S390_VM_MEM_ENABLE_CMMA:
681 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100682 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200683 ret = 0;
684 break;
685 default:
686 ret = -ENXIO;
687 break;
688 }
689 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500690 case KVM_S390_VM_TOD:
691 switch (attr->attr) {
692 case KVM_S390_VM_TOD_LOW:
693 case KVM_S390_VM_TOD_HIGH:
694 ret = 0;
695 break;
696 default:
697 ret = -ENXIO;
698 break;
699 }
700 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100701 case KVM_S390_VM_CPU_MODEL:
702 switch (attr->attr) {
703 case KVM_S390_VM_CPU_PROCESSOR:
704 case KVM_S390_VM_CPU_MACHINE:
705 ret = 0;
706 break;
707 default:
708 ret = -ENXIO;
709 break;
710 }
711 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200712 case KVM_S390_VM_CRYPTO:
713 switch (attr->attr) {
714 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
715 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
716 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
717 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
718 ret = 0;
719 break;
720 default:
721 ret = -ENXIO;
722 break;
723 }
724 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200725 default:
726 ret = -ENXIO;
727 break;
728 }
729
730 return ret;
731}
732
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400733static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
734{
735 uint8_t *keys;
736 uint64_t hva;
737 unsigned long curkey;
738 int i, r = 0;
739
740 if (args->flags != 0)
741 return -EINVAL;
742
743 /* Is this guest using storage keys? */
744 if (!mm_use_skey(current->mm))
745 return KVM_S390_GET_SKEYS_NONE;
746
747 /* Enforce sane limit on memory allocation */
748 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
749 return -EINVAL;
750
751 keys = kmalloc_array(args->count, sizeof(uint8_t),
752 GFP_KERNEL | __GFP_NOWARN);
753 if (!keys)
754 keys = vmalloc(sizeof(uint8_t) * args->count);
755 if (!keys)
756 return -ENOMEM;
757
758 for (i = 0; i < args->count; i++) {
759 hva = gfn_to_hva(kvm, args->start_gfn + i);
760 if (kvm_is_error_hva(hva)) {
761 r = -EFAULT;
762 goto out;
763 }
764
765 curkey = get_guest_storage_key(current->mm, hva);
766 if (IS_ERR_VALUE(curkey)) {
767 r = curkey;
768 goto out;
769 }
770 keys[i] = curkey;
771 }
772
773 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
774 sizeof(uint8_t) * args->count);
775 if (r)
776 r = -EFAULT;
777out:
778 kvfree(keys);
779 return r;
780}
781
782static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
783{
784 uint8_t *keys;
785 uint64_t hva;
786 int i, r = 0;
787
788 if (args->flags != 0)
789 return -EINVAL;
790
791 /* Enforce sane limit on memory allocation */
792 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
793 return -EINVAL;
794
795 keys = kmalloc_array(args->count, sizeof(uint8_t),
796 GFP_KERNEL | __GFP_NOWARN);
797 if (!keys)
798 keys = vmalloc(sizeof(uint8_t) * args->count);
799 if (!keys)
800 return -ENOMEM;
801
802 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
803 sizeof(uint8_t) * args->count);
804 if (r) {
805 r = -EFAULT;
806 goto out;
807 }
808
809 /* Enable storage key handling for the guest */
810 s390_enable_skey();
811
812 for (i = 0; i < args->count; i++) {
813 hva = gfn_to_hva(kvm, args->start_gfn + i);
814 if (kvm_is_error_hva(hva)) {
815 r = -EFAULT;
816 goto out;
817 }
818
819 /* Lowest order bit is reserved */
820 if (keys[i] & 0x01) {
821 r = -EINVAL;
822 goto out;
823 }
824
825 r = set_guest_storage_key(current->mm, hva,
826 (unsigned long)keys[i], 0);
827 if (r)
828 goto out;
829 }
830out:
831 kvfree(keys);
832 return r;
833}
834
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100835long kvm_arch_vm_ioctl(struct file *filp,
836 unsigned int ioctl, unsigned long arg)
837{
838 struct kvm *kvm = filp->private_data;
839 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200840 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100841 int r;
842
843 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100844 case KVM_S390_INTERRUPT: {
845 struct kvm_s390_interrupt s390int;
846
847 r = -EFAULT;
848 if (copy_from_user(&s390int, argp, sizeof(s390int)))
849 break;
850 r = kvm_s390_inject_vm(kvm, &s390int);
851 break;
852 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200853 case KVM_ENABLE_CAP: {
854 struct kvm_enable_cap cap;
855 r = -EFAULT;
856 if (copy_from_user(&cap, argp, sizeof(cap)))
857 break;
858 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
859 break;
860 }
Cornelia Huck84223592013-07-15 13:36:01 +0200861 case KVM_CREATE_IRQCHIP: {
862 struct kvm_irq_routing_entry routing;
863
864 r = -EINVAL;
865 if (kvm->arch.use_irqchip) {
866 /* Set up dummy routing. */
867 memset(&routing, 0, sizeof(routing));
868 kvm_set_irq_routing(kvm, &routing, 0, 0);
869 r = 0;
870 }
871 break;
872 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200873 case KVM_SET_DEVICE_ATTR: {
874 r = -EFAULT;
875 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
876 break;
877 r = kvm_s390_vm_set_attr(kvm, &attr);
878 break;
879 }
880 case KVM_GET_DEVICE_ATTR: {
881 r = -EFAULT;
882 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
883 break;
884 r = kvm_s390_vm_get_attr(kvm, &attr);
885 break;
886 }
887 case KVM_HAS_DEVICE_ATTR: {
888 r = -EFAULT;
889 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
890 break;
891 r = kvm_s390_vm_has_attr(kvm, &attr);
892 break;
893 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400894 case KVM_S390_GET_SKEYS: {
895 struct kvm_s390_skeys args;
896
897 r = -EFAULT;
898 if (copy_from_user(&args, argp,
899 sizeof(struct kvm_s390_skeys)))
900 break;
901 r = kvm_s390_get_skeys(kvm, &args);
902 break;
903 }
904 case KVM_S390_SET_SKEYS: {
905 struct kvm_s390_skeys args;
906
907 r = -EFAULT;
908 if (copy_from_user(&args, argp,
909 sizeof(struct kvm_s390_skeys)))
910 break;
911 r = kvm_s390_set_skeys(kvm, &args);
912 break;
913 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100914 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300915 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100916 }
917
918 return r;
919}
920
Tony Krowiak45c9b472015-01-13 11:33:26 -0500921static int kvm_s390_query_ap_config(u8 *config)
922{
923 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +0100924 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -0500925
Christian Borntraeger86044c82015-02-26 13:53:47 +0100926 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -0500927 asm volatile(
928 "lgr 0,%1\n"
929 "lgr 2,%2\n"
930 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +0100931 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -0500932 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +0100933 "1:\n"
934 EX_TABLE(0b, 1b)
935 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -0500936 : "r" (fcn_code), "r" (config)
937 : "cc", "0", "2", "memory"
938 );
939
940 return cc;
941}
942
943static int kvm_s390_apxa_installed(void)
944{
945 u8 config[128];
946 int cc;
947
948 if (test_facility(2) && test_facility(12)) {
949 cc = kvm_s390_query_ap_config(config);
950
951 if (cc)
952 pr_err("PQAP(QCI) failed with cc=%d", cc);
953 else
954 return config[0] & 0x40;
955 }
956
957 return 0;
958}
959
960static void kvm_s390_set_crycb_format(struct kvm *kvm)
961{
962 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
963
964 if (kvm_s390_apxa_installed())
965 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
966 else
967 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
968}
969
Michael Mueller9d8d5782015-02-02 15:42:51 +0100970static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
971{
972 get_cpu_id(cpu_id);
973 cpu_id->version = 0xff;
974}
975
Tony Krowiak5102ee82014-06-27 14:46:01 -0400976static int kvm_s390_crypto_init(struct kvm *kvm)
977{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100978 if (!test_kvm_facility(kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -0400979 return 0;
980
981 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
982 GFP_KERNEL | GFP_DMA);
983 if (!kvm->arch.crypto.crycb)
984 return -ENOMEM;
985
Tony Krowiak45c9b472015-01-13 11:33:26 -0500986 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400987
Tony Krowiaked6f76b2015-02-24 14:06:57 -0500988 /* Enable AES/DEA protected key functions by default */
989 kvm->arch.crypto.aes_kw = 1;
990 kvm->arch.crypto.dea_kw = 1;
991 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
992 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
993 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
994 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiaka374e892014-09-03 10:13:53 +0200995
Tony Krowiak5102ee82014-06-27 14:46:01 -0400996 return 0;
997}
998
Carsten Ottee08b9632012-01-04 10:25:20 +0100999int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001000{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001001 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001002 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001003 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001004
Carsten Ottee08b9632012-01-04 10:25:20 +01001005 rc = -EINVAL;
1006#ifdef CONFIG_KVM_S390_UCONTROL
1007 if (type & ~KVM_VM_S390_UCONTROL)
1008 goto out_err;
1009 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1010 goto out_err;
1011#else
1012 if (type)
1013 goto out_err;
1014#endif
1015
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001016 rc = s390_enable_sie();
1017 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001018 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001019
Carsten Otteb2904112011-10-18 12:27:13 +02001020 rc = -ENOMEM;
1021
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001022 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1023 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001024 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001025 spin_lock(&kvm_lock);
1026 sca_offset = (sca_offset + 16) & 0x7f0;
1027 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1028 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001029
1030 sprintf(debug_name, "kvm-%u", current->pid);
1031
1032 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
1033 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001034 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001035
Michael Mueller9d8d5782015-02-02 15:42:51 +01001036 /*
1037 * The architectural maximum amount of facilities is 16 kbit. To store
1038 * this amount, 2 kbyte of memory is required. Thus we need a full
Michael Mueller981467c2015-02-24 13:51:04 +01001039 * page to hold the guest facility list (arch.model.fac->list) and the
1040 * facility mask (arch.model.fac->mask). Its address size has to be
Michael Mueller9d8d5782015-02-02 15:42:51 +01001041 * 31 bits and word aligned.
1042 */
1043 kvm->arch.model.fac =
Michael Mueller981467c2015-02-24 13:51:04 +01001044 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001045 if (!kvm->arch.model.fac)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001046 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001047
Michael Muellerfb5bf932015-02-27 14:25:10 +01001048 /* Populate the facility mask initially. */
Michael Mueller981467c2015-02-24 13:51:04 +01001049 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001050 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001051 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1052 if (i < kvm_s390_fac_list_mask_size())
Michael Mueller981467c2015-02-24 13:51:04 +01001053 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001054 else
Michael Mueller981467c2015-02-24 13:51:04 +01001055 kvm->arch.model.fac->mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001056 }
1057
Michael Mueller981467c2015-02-24 13:51:04 +01001058 /* Populate the facility list initially. */
1059 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1060 S390_ARCH_FAC_LIST_SIZE_BYTE);
1061
Michael Mueller9d8d5782015-02-02 15:42:51 +01001062 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001063 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001064
Tony Krowiak5102ee82014-06-27 14:46:01 -04001065 if (kvm_s390_crypto_init(kvm) < 0)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001066 goto out_err;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001067
Carsten Otteba5c1e92008-03-25 18:47:26 +01001068 spin_lock_init(&kvm->arch.float_int.lock);
1069 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001070 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001071 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001072
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001073 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1074 VM_EVENT(kvm, 3, "%s", "vm created");
1075
Carsten Ottee08b9632012-01-04 10:25:20 +01001076 if (type & KVM_VM_S390_UCONTROL) {
1077 kvm->arch.gmap = NULL;
1078 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +02001079 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001080 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001081 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001082 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001083 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001084 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001085
1086 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001087 kvm->arch.use_irqchip = 0;
Eric Farman68c55752014-06-09 10:57:26 -04001088 kvm->arch.use_vectors = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001089 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001090
David Hildenbrand8ad35752014-03-14 11:00:21 +01001091 spin_lock_init(&kvm->arch.start_stop_lock);
1092
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001093 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001094out_err:
Dominik Dingel40f5b732015-03-12 13:55:53 +01001095 kfree(kvm->arch.crypto.crycb);
1096 free_page((unsigned long)kvm->arch.model.fac);
1097 debug_unregister(kvm->arch.dbf);
1098 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001099 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001100}
1101
Christian Borntraegerd329c032008-11-26 14:50:27 +01001102void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1103{
1104 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001105 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001106 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001107 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +01001108 if (!kvm_is_ucontrol(vcpu->kvm)) {
1109 clear_bit(63 - vcpu->vcpu_id,
1110 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1111 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1112 (__u64) vcpu->arch.sie_block)
1113 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1114 }
Carsten Otteabf4a712009-05-12 17:21:51 +02001115 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +01001116
1117 if (kvm_is_ucontrol(vcpu->kvm))
1118 gmap_free(vcpu->arch.gmap);
1119
Dominik Dingelb31605c2014-03-25 13:47:11 +01001120 if (kvm_s390_cmma_enabled(vcpu->kvm))
1121 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001122 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001123
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001124 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001125 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001126}
1127
1128static void kvm_free_vcpus(struct kvm *kvm)
1129{
1130 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001131 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001132
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001133 kvm_for_each_vcpu(i, vcpu, kvm)
1134 kvm_arch_vcpu_destroy(vcpu);
1135
1136 mutex_lock(&kvm->lock);
1137 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1138 kvm->vcpus[i] = NULL;
1139
1140 atomic_set(&kvm->online_vcpus, 0);
1141 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001142}
1143
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001144void kvm_arch_destroy_vm(struct kvm *kvm)
1145{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001146 kvm_free_vcpus(kvm);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001147 free_page((unsigned long)kvm->arch.model.fac);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001148 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +01001149 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001150 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +01001151 if (!kvm_is_ucontrol(kvm))
1152 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001153 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001154 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001155}
1156
1157/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001158static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1159{
1160 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1161 if (!vcpu->arch.gmap)
1162 return -ENOMEM;
1163 vcpu->arch.gmap->private = vcpu->kvm;
1164
1165 return 0;
1166}
1167
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001168int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1169{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001170 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1171 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001172 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1173 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001174 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001175 KVM_SYNC_CRS |
1176 KVM_SYNC_ARCH0 |
1177 KVM_SYNC_PFAULT;
Eric Farman68c55752014-06-09 10:57:26 -04001178 if (test_kvm_facility(vcpu->kvm, 129))
1179 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001180
1181 if (kvm_is_ucontrol(vcpu->kvm))
1182 return __kvm_ucontrol_vcpu_init(vcpu);
1183
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001184 return 0;
1185}
1186
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001187void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1188{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001189 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Eric Farman68c55752014-06-09 10:57:26 -04001190 if (vcpu->kvm->arch.use_vectors)
1191 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1192 else
1193 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001194 save_access_regs(vcpu->arch.host_acrs);
Eric Farman68c55752014-06-09 10:57:26 -04001195 if (vcpu->kvm->arch.use_vectors) {
1196 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1197 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1198 } else {
1199 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1200 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1201 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001202 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001203 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001204 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001205}
1206
1207void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1208{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001209 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001210 gmap_disable(vcpu->arch.gmap);
Eric Farman68c55752014-06-09 10:57:26 -04001211 if (vcpu->kvm->arch.use_vectors) {
1212 save_fp_ctl(&vcpu->run->s.regs.fpc);
1213 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1214 } else {
1215 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1216 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1217 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001218 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001219 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Eric Farman68c55752014-06-09 10:57:26 -04001220 if (vcpu->kvm->arch.use_vectors)
1221 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1222 else
1223 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001224 restore_access_regs(vcpu->arch.host_acrs);
1225}
1226
1227static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1228{
1229 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1230 vcpu->arch.sie_block->gpsw.mask = 0UL;
1231 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001232 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001233 vcpu->arch.sie_block->cputm = 0UL;
1234 vcpu->arch.sie_block->ckc = 0UL;
1235 vcpu->arch.sie_block->todpr = 0;
1236 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1237 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1238 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1239 vcpu->arch.guest_fpregs.fpc = 0;
1240 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1241 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001242 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001243 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1244 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001245 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1246 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001247 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001248}
1249
Dominik Dingel31928aa2014-12-04 15:47:07 +01001250void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001251{
Jason J. Herne72f25022014-11-25 09:46:02 -05001252 mutex_lock(&vcpu->kvm->lock);
1253 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1254 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001255 if (!kvm_is_ucontrol(vcpu->kvm))
1256 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001257}
1258
Tony Krowiak5102ee82014-06-27 14:46:01 -04001259static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1260{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001261 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001262 return;
1263
Tony Krowiaka374e892014-09-03 10:13:53 +02001264 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1265
1266 if (vcpu->kvm->arch.crypto.aes_kw)
1267 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1268 if (vcpu->kvm->arch.crypto.dea_kw)
1269 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1270
Tony Krowiak5102ee82014-06-27 14:46:01 -04001271 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1272}
1273
Dominik Dingelb31605c2014-03-25 13:47:11 +01001274void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1275{
1276 free_page(vcpu->arch.sie_block->cbrlo);
1277 vcpu->arch.sie_block->cbrlo = 0;
1278}
1279
1280int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1281{
1282 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1283 if (!vcpu->arch.sie_block->cbrlo)
1284 return -ENOMEM;
1285
1286 vcpu->arch.sie_block->ecb2 |= 0x80;
1287 vcpu->arch.sie_block->ecb2 &= ~0x08;
1288 return 0;
1289}
1290
Michael Mueller91520f12015-02-27 14:32:11 +01001291static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1292{
1293 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1294
1295 vcpu->arch.cpu_id = model->cpu_id;
1296 vcpu->arch.sie_block->ibc = model->ibc;
1297 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1298}
1299
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001300int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1301{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001302 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001303
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001304 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1305 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001306 CPUSTAT_STOPPED |
1307 CPUSTAT_GED);
Michael Mueller91520f12015-02-27 14:32:11 +01001308 kvm_s390_vcpu_setup_model(vcpu);
1309
Christian Borntraegerfc345312010-06-17 23:16:20 +02001310 vcpu->arch.sie_block->ecb = 6;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001311 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001312 vcpu->arch.sie_block->ecb |= 0x10;
1313
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001314 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001315 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +01001316 if (sclp_has_siif())
1317 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001318 if (sclp_has_sigpif())
1319 vcpu->arch.sie_block->eca |= 0x10000000U;
Eric Farman13211ea2014-04-30 13:39:46 -04001320 if (vcpu->kvm->arch.use_vectors) {
1321 vcpu->arch.sie_block->eca |= 0x00020000;
1322 vcpu->arch.sie_block->ecd |= 0x20000000;
1323 }
Thomas Huth492d8642015-02-10 16:11:01 +01001324 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001325
Dominik Dingelb31605c2014-03-25 13:47:11 +01001326 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1327 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1328 if (rc)
1329 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001330 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001331 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001332 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001333
Tony Krowiak5102ee82014-06-27 14:46:01 -04001334 kvm_s390_vcpu_crypto_setup(vcpu);
1335
Dominik Dingelb31605c2014-03-25 13:47:11 +01001336 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001337}
1338
1339struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1340 unsigned int id)
1341{
Carsten Otte4d475552011-10-18 12:27:12 +02001342 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001343 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001344 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001345
Carsten Otte4d475552011-10-18 12:27:12 +02001346 if (id >= KVM_MAX_VCPUS)
1347 goto out;
1348
1349 rc = -ENOMEM;
1350
Michael Muellerb110fea2013-06-12 13:54:54 +02001351 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001352 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001353 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001354
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001355 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1356 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001357 goto out_free_cpu;
1358
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001359 vcpu->arch.sie_block = &sie_page->sie_block;
1360 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
Eric Farman68c55752014-06-09 10:57:26 -04001361 vcpu->arch.host_vregs = &sie_page->vregs;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001362
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001363 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001364 if (!kvm_is_ucontrol(kvm)) {
1365 if (!kvm->arch.sca) {
1366 WARN_ON_ONCE(1);
1367 goto out_free_cpu;
1368 }
1369 if (!kvm->arch.sca->cpu[id].sda)
1370 kvm->arch.sca->cpu[id].sda =
1371 (__u64) vcpu->arch.sie_block;
1372 vcpu->arch.sie_block->scaoh =
1373 (__u32)(((__u64)kvm->arch.sca) >> 32);
1374 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1375 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1376 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001377
Carsten Otteba5c1e92008-03-25 18:47:26 +01001378 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001379 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001380 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001381 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001382
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001383 rc = kvm_vcpu_init(vcpu, kvm, id);
1384 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001385 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001386 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1387 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001388 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001389
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001390 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001391out_free_sie_block:
1392 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001393out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001394 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001395out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001396 return ERR_PTR(rc);
1397}
1398
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001399int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1400{
David Hildenbrand9a022062014-08-05 17:40:47 +02001401 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001402}
1403
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001404void s390_vcpu_block(struct kvm_vcpu *vcpu)
1405{
1406 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1407}
1408
1409void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1410{
1411 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1412}
1413
1414/*
1415 * Kick a guest cpu out of SIE and wait until SIE is not running.
1416 * If the CPU is not running (e.g. waiting as idle) the function will
1417 * return immediately. */
1418void exit_sie(struct kvm_vcpu *vcpu)
1419{
1420 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1421 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1422 cpu_relax();
1423}
1424
1425/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1426void exit_sie_sync(struct kvm_vcpu *vcpu)
1427{
1428 s390_vcpu_block(vcpu);
1429 exit_sie(vcpu);
1430}
1431
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001432static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1433{
1434 int i;
1435 struct kvm *kvm = gmap->private;
1436 struct kvm_vcpu *vcpu;
1437
1438 kvm_for_each_vcpu(i, vcpu, kvm) {
1439 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001440 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001441 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1442 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1443 exit_sie_sync(vcpu);
1444 }
1445 }
1446}
1447
Christoffer Dallb6d33832012-03-08 16:44:24 -05001448int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1449{
1450 /* kvm common code refers to this, but never calls it */
1451 BUG();
1452 return 0;
1453}
1454
Carsten Otte14eebd92012-05-15 14:15:26 +02001455static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1456 struct kvm_one_reg *reg)
1457{
1458 int r = -EINVAL;
1459
1460 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001461 case KVM_REG_S390_TODPR:
1462 r = put_user(vcpu->arch.sie_block->todpr,
1463 (u32 __user *)reg->addr);
1464 break;
1465 case KVM_REG_S390_EPOCHDIFF:
1466 r = put_user(vcpu->arch.sie_block->epoch,
1467 (u64 __user *)reg->addr);
1468 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001469 case KVM_REG_S390_CPU_TIMER:
1470 r = put_user(vcpu->arch.sie_block->cputm,
1471 (u64 __user *)reg->addr);
1472 break;
1473 case KVM_REG_S390_CLOCK_COMP:
1474 r = put_user(vcpu->arch.sie_block->ckc,
1475 (u64 __user *)reg->addr);
1476 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001477 case KVM_REG_S390_PFTOKEN:
1478 r = put_user(vcpu->arch.pfault_token,
1479 (u64 __user *)reg->addr);
1480 break;
1481 case KVM_REG_S390_PFCOMPARE:
1482 r = put_user(vcpu->arch.pfault_compare,
1483 (u64 __user *)reg->addr);
1484 break;
1485 case KVM_REG_S390_PFSELECT:
1486 r = put_user(vcpu->arch.pfault_select,
1487 (u64 __user *)reg->addr);
1488 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001489 case KVM_REG_S390_PP:
1490 r = put_user(vcpu->arch.sie_block->pp,
1491 (u64 __user *)reg->addr);
1492 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001493 case KVM_REG_S390_GBEA:
1494 r = put_user(vcpu->arch.sie_block->gbea,
1495 (u64 __user *)reg->addr);
1496 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001497 default:
1498 break;
1499 }
1500
1501 return r;
1502}
1503
1504static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1505 struct kvm_one_reg *reg)
1506{
1507 int r = -EINVAL;
1508
1509 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001510 case KVM_REG_S390_TODPR:
1511 r = get_user(vcpu->arch.sie_block->todpr,
1512 (u32 __user *)reg->addr);
1513 break;
1514 case KVM_REG_S390_EPOCHDIFF:
1515 r = get_user(vcpu->arch.sie_block->epoch,
1516 (u64 __user *)reg->addr);
1517 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001518 case KVM_REG_S390_CPU_TIMER:
1519 r = get_user(vcpu->arch.sie_block->cputm,
1520 (u64 __user *)reg->addr);
1521 break;
1522 case KVM_REG_S390_CLOCK_COMP:
1523 r = get_user(vcpu->arch.sie_block->ckc,
1524 (u64 __user *)reg->addr);
1525 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001526 case KVM_REG_S390_PFTOKEN:
1527 r = get_user(vcpu->arch.pfault_token,
1528 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001529 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1530 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001531 break;
1532 case KVM_REG_S390_PFCOMPARE:
1533 r = get_user(vcpu->arch.pfault_compare,
1534 (u64 __user *)reg->addr);
1535 break;
1536 case KVM_REG_S390_PFSELECT:
1537 r = get_user(vcpu->arch.pfault_select,
1538 (u64 __user *)reg->addr);
1539 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001540 case KVM_REG_S390_PP:
1541 r = get_user(vcpu->arch.sie_block->pp,
1542 (u64 __user *)reg->addr);
1543 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001544 case KVM_REG_S390_GBEA:
1545 r = get_user(vcpu->arch.sie_block->gbea,
1546 (u64 __user *)reg->addr);
1547 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001548 default:
1549 break;
1550 }
1551
1552 return r;
1553}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001554
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001555static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1556{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001557 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001558 return 0;
1559}
1560
1561int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1562{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001563 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001564 return 0;
1565}
1566
1567int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1568{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001569 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001570 return 0;
1571}
1572
1573int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1574 struct kvm_sregs *sregs)
1575{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001576 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001577 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001578 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001579 return 0;
1580}
1581
1582int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1583 struct kvm_sregs *sregs)
1584{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001585 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001586 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001587 return 0;
1588}
1589
1590int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1591{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001592 if (test_fp_ctl(fpu->fpc))
1593 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001594 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001595 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1596 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1597 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001598 return 0;
1599}
1600
1601int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1602{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001603 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1604 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001605 return 0;
1606}
1607
1608static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1609{
1610 int rc = 0;
1611
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001612 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001613 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001614 else {
1615 vcpu->run->psw_mask = psw.mask;
1616 vcpu->run->psw_addr = psw.addr;
1617 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001618 return rc;
1619}
1620
1621int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1622 struct kvm_translation *tr)
1623{
1624 return -EINVAL; /* not implemented yet */
1625}
1626
David Hildenbrand27291e22014-01-23 12:26:52 +01001627#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1628 KVM_GUESTDBG_USE_HW_BP | \
1629 KVM_GUESTDBG_ENABLE)
1630
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001631int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1632 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001633{
David Hildenbrand27291e22014-01-23 12:26:52 +01001634 int rc = 0;
1635
1636 vcpu->guest_debug = 0;
1637 kvm_s390_clear_bp_data(vcpu);
1638
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001639 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001640 return -EINVAL;
1641
1642 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1643 vcpu->guest_debug = dbg->control;
1644 /* enforce guest PER */
1645 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1646
1647 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1648 rc = kvm_s390_import_bp_data(vcpu, dbg);
1649 } else {
1650 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1651 vcpu->arch.guestdbg.last_bp = 0;
1652 }
1653
1654 if (rc) {
1655 vcpu->guest_debug = 0;
1656 kvm_s390_clear_bp_data(vcpu);
1657 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1658 }
1659
1660 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001661}
1662
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001663int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1664 struct kvm_mp_state *mp_state)
1665{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001666 /* CHECK_STOP and LOAD are not supported yet */
1667 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1668 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001669}
1670
1671int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1672 struct kvm_mp_state *mp_state)
1673{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001674 int rc = 0;
1675
1676 /* user space knows about this interface - let it control the state */
1677 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1678
1679 switch (mp_state->mp_state) {
1680 case KVM_MP_STATE_STOPPED:
1681 kvm_s390_vcpu_stop(vcpu);
1682 break;
1683 case KVM_MP_STATE_OPERATING:
1684 kvm_s390_vcpu_start(vcpu);
1685 break;
1686 case KVM_MP_STATE_LOAD:
1687 case KVM_MP_STATE_CHECK_STOP:
1688 /* fall through - CHECK_STOP and LOAD are not supported yet */
1689 default:
1690 rc = -ENXIO;
1691 }
1692
1693 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001694}
1695
Dominik Dingelb31605c2014-03-25 13:47:11 +01001696bool kvm_s390_cmma_enabled(struct kvm *kvm)
1697{
1698 if (!MACHINE_IS_LPAR)
1699 return false;
1700 /* only enable for z10 and later */
1701 if (!MACHINE_HAS_EDAT1)
1702 return false;
1703 if (!kvm->arch.use_cmma)
1704 return false;
1705 return true;
1706}
1707
David Hildenbrand8ad35752014-03-14 11:00:21 +01001708static bool ibs_enabled(struct kvm_vcpu *vcpu)
1709{
1710 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1711}
1712
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001713static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1714{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001715retry:
1716 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001717 /*
1718 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1719 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1720 * This ensures that the ipte instruction for this request has
1721 * already finished. We might race against a second unmapper that
1722 * wants to set the blocking bit. Lets just retry the request loop.
1723 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001724 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001725 int rc;
1726 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001727 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001728 PAGE_SIZE * 2);
1729 if (rc)
1730 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001731 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001732 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001733
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001734 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1735 vcpu->arch.sie_block->ihcpu = 0xffff;
1736 goto retry;
1737 }
1738
David Hildenbrand8ad35752014-03-14 11:00:21 +01001739 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1740 if (!ibs_enabled(vcpu)) {
1741 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1742 atomic_set_mask(CPUSTAT_IBS,
1743 &vcpu->arch.sie_block->cpuflags);
1744 }
1745 goto retry;
1746 }
1747
1748 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1749 if (ibs_enabled(vcpu)) {
1750 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1751 atomic_clear_mask(CPUSTAT_IBS,
1752 &vcpu->arch.sie_block->cpuflags);
1753 }
1754 goto retry;
1755 }
1756
David Hildenbrand0759d062014-05-13 16:54:32 +02001757 /* nothing to do, just clear the request */
1758 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1759
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001760 return 0;
1761}
1762
Thomas Huthfa576c52014-05-06 17:20:16 +02001763/**
1764 * kvm_arch_fault_in_page - fault-in guest page if necessary
1765 * @vcpu: The corresponding virtual cpu
1766 * @gpa: Guest physical address
1767 * @writable: Whether the page should be writable or not
1768 *
1769 * Make sure that a guest page has been faulted-in on the host.
1770 *
1771 * Return: Zero on success, negative error code otherwise.
1772 */
1773long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001774{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001775 return gmap_fault(vcpu->arch.gmap, gpa,
1776 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001777}
1778
Dominik Dingel3c038e62013-10-07 17:11:48 +02001779static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1780 unsigned long token)
1781{
1782 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001783 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001784
1785 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001786 irq.u.ext.ext_params2 = token;
1787 irq.type = KVM_S390_INT_PFAULT_INIT;
1788 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001789 } else {
1790 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001791 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001792 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1793 }
1794}
1795
1796void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1797 struct kvm_async_pf *work)
1798{
1799 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1800 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1801}
1802
1803void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1804 struct kvm_async_pf *work)
1805{
1806 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1807 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1808}
1809
1810void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1811 struct kvm_async_pf *work)
1812{
1813 /* s390 will always inject the page directly */
1814}
1815
1816bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1817{
1818 /*
1819 * s390 will always inject the page directly,
1820 * but we still want check_async_completion to cleanup
1821 */
1822 return true;
1823}
1824
1825static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1826{
1827 hva_t hva;
1828 struct kvm_arch_async_pf arch;
1829 int rc;
1830
1831 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1832 return 0;
1833 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1834 vcpu->arch.pfault_compare)
1835 return 0;
1836 if (psw_extint_disabled(vcpu))
1837 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001838 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001839 return 0;
1840 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1841 return 0;
1842 if (!vcpu->arch.gmap->pfault_enabled)
1843 return 0;
1844
Heiko Carstens81480cc2014-01-01 16:36:07 +01001845 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1846 hva += current->thread.gmap_addr & ~PAGE_MASK;
1847 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001848 return 0;
1849
1850 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1851 return rc;
1852}
1853
Thomas Huth3fb4c402013-09-12 10:33:43 +02001854static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001855{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001856 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001857
Dominik Dingel3c038e62013-10-07 17:11:48 +02001858 /*
1859 * On s390 notifications for arriving pages will be delivered directly
1860 * to the guest but the house keeping for completed pfaults is
1861 * handled outside the worker.
1862 */
1863 kvm_check_async_pf_completion(vcpu);
1864
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001865 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001866
1867 if (need_resched())
1868 schedule();
1869
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001870 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001871 s390_handle_mcck();
1872
Jens Freimann79395032014-04-17 10:10:30 +02001873 if (!kvm_is_ucontrol(vcpu->kvm)) {
1874 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1875 if (rc)
1876 return rc;
1877 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001878
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001879 rc = kvm_s390_handle_requests(vcpu);
1880 if (rc)
1881 return rc;
1882
David Hildenbrand27291e22014-01-23 12:26:52 +01001883 if (guestdbg_enabled(vcpu)) {
1884 kvm_s390_backup_guest_per_regs(vcpu);
1885 kvm_s390_patch_guest_per_regs(vcpu);
1886 }
1887
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001888 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001889 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1890 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1891 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001892
Thomas Huth3fb4c402013-09-12 10:33:43 +02001893 return 0;
1894}
1895
Thomas Huth492d8642015-02-10 16:11:01 +01001896static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1897{
1898 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1899 u8 opcode;
1900 int rc;
1901
1902 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1903 trace_kvm_s390_sie_fault(vcpu);
1904
1905 /*
1906 * We want to inject an addressing exception, which is defined as a
1907 * suppressing or terminating exception. However, since we came here
1908 * by a DAT access exception, the PSW still points to the faulting
1909 * instruction since DAT exceptions are nullifying. So we've got
1910 * to look up the current opcode to get the length of the instruction
1911 * to be able to forward the PSW.
1912 */
Alexander Yarygin8ae04b82015-01-19 13:24:51 +03001913 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
Thomas Huth492d8642015-02-10 16:11:01 +01001914 if (rc)
1915 return kvm_s390_inject_prog_cond(vcpu, rc);
1916 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1917
1918 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1919}
1920
Thomas Huth3fb4c402013-09-12 10:33:43 +02001921static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1922{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001923 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001924
1925 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1926 vcpu->arch.sie_block->icptcode);
1927 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1928
David Hildenbrand27291e22014-01-23 12:26:52 +01001929 if (guestdbg_enabled(vcpu))
1930 kvm_s390_restore_guest_per_regs(vcpu);
1931
Thomas Huth3fb4c402013-09-12 10:33:43 +02001932 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001933 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001934 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1935 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1936 vcpu->run->s390_ucontrol.trans_exc_code =
1937 current->thread.gmap_addr;
1938 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1939 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001940
1941 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001942 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001943 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001944 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001945 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001946 } else {
1947 gpa_t gpa = current->thread.gmap_addr;
1948 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1949 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001950 }
1951
Thomas Huth492d8642015-02-10 16:11:01 +01001952 if (rc == -1)
1953 rc = vcpu_post_run_fault_in_sie(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001954
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001955 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001956
Thomas Hutha76ccff2013-09-12 10:33:44 +02001957 if (rc == 0) {
1958 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001959 /* Don't exit for host interrupts. */
1960 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001961 else
1962 rc = kvm_handle_sie_intercept(vcpu);
1963 }
1964
Thomas Huth3fb4c402013-09-12 10:33:43 +02001965 return rc;
1966}
1967
1968static int __vcpu_run(struct kvm_vcpu *vcpu)
1969{
1970 int rc, exit_reason;
1971
Thomas Huth800c1062013-09-12 10:33:45 +02001972 /*
1973 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1974 * ning the guest), so that memslots (and other stuff) are protected
1975 */
1976 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1977
Thomas Hutha76ccff2013-09-12 10:33:44 +02001978 do {
1979 rc = vcpu_pre_run(vcpu);
1980 if (rc)
1981 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001982
Thomas Huth800c1062013-09-12 10:33:45 +02001983 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001984 /*
1985 * As PF_VCPU will be used in fault handler, between
1986 * guest_enter and guest_exit should be no uaccess.
1987 */
1988 preempt_disable();
1989 kvm_guest_enter();
1990 preempt_enable();
1991 exit_reason = sie64a(vcpu->arch.sie_block,
1992 vcpu->run->s.regs.gprs);
1993 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001994 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001995
Thomas Hutha76ccff2013-09-12 10:33:44 +02001996 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001997 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001998
Thomas Huth800c1062013-09-12 10:33:45 +02001999 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002000 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002001}
2002
David Hildenbrandb028ee32014-07-17 10:47:43 +02002003static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2004{
2005 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2006 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2007 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2008 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2009 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2010 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002011 /* some control register changes require a tlb flush */
2012 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002013 }
2014 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2015 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2016 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2017 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2018 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2019 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2020 }
2021 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2022 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2023 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2024 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002025 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2026 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002027 }
2028 kvm_run->kvm_dirty_regs = 0;
2029}
2030
2031static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2032{
2033 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2034 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2035 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2036 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2037 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2038 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2039 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2040 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2041 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2042 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2043 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2044 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2045}
2046
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002047int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2048{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002049 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002050 sigset_t sigsaved;
2051
David Hildenbrand27291e22014-01-23 12:26:52 +01002052 if (guestdbg_exit_pending(vcpu)) {
2053 kvm_s390_prepare_debug_exit(vcpu);
2054 return 0;
2055 }
2056
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002057 if (vcpu->sigset_active)
2058 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2059
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002060 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2061 kvm_s390_vcpu_start(vcpu);
2062 } else if (is_vcpu_stopped(vcpu)) {
2063 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
2064 vcpu->vcpu_id);
2065 return -EINVAL;
2066 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002067
David Hildenbrandb028ee32014-07-17 10:47:43 +02002068 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002069
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002070 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002071 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002072
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002073 if (signal_pending(current) && !rc) {
2074 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002075 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002076 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002077
David Hildenbrand27291e22014-01-23 12:26:52 +01002078 if (guestdbg_exit_pending(vcpu) && !rc) {
2079 kvm_s390_prepare_debug_exit(vcpu);
2080 rc = 0;
2081 }
2082
Heiko Carstensb8e660b2010-02-26 22:37:41 +01002083 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002084 /* intercept cannot be handled in-kernel, prepare kvm-run */
2085 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2086 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002087 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2088 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2089 rc = 0;
2090 }
2091
2092 if (rc == -EREMOTE) {
2093 /* intercept was handled, but userspace support is needed
2094 * kvm_run has been prepared by the handler */
2095 rc = 0;
2096 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002097
David Hildenbrandb028ee32014-07-17 10:47:43 +02002098 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002099
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002100 if (vcpu->sigset_active)
2101 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2102
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002103 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002104 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002105}
2106
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002107/*
2108 * store status at address
2109 * we use have two special cases:
2110 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2111 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2112 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002113int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002114{
Carsten Otte092670c2011-07-24 10:48:22 +02002115 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02002116 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01002117 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002118 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002119
Heiko Carstensd0bce602014-01-01 16:45:58 +01002120 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2121 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002122 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002123 gpa = SAVE_AREA_BASE;
2124 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2125 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002126 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002127 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2128 }
2129 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2130 vcpu->arch.guest_fpregs.fprs, 128);
2131 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2132 vcpu->run->s.regs.gprs, 128);
2133 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2134 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02002135 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002136 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02002137 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002138 rc |= write_guest_abs(vcpu,
2139 gpa + offsetof(struct save_area, fp_ctrl_reg),
2140 &vcpu->arch.guest_fpregs.fpc, 4);
2141 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2142 &vcpu->arch.sie_block->todpr, 4);
2143 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2144 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002145 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002146 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2147 &clkcomp, 8);
2148 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2149 &vcpu->run->s.regs.acrs, 64);
2150 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2151 &vcpu->arch.sie_block->gcr, 128);
2152 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002153}
2154
Thomas Huthe8798922013-11-06 15:46:33 +01002155int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2156{
2157 /*
2158 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2159 * copying in vcpu load/put. Lets update our copies before we save
2160 * it into the save area
2161 */
2162 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2163 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2164 save_access_regs(vcpu->run->s.regs.acrs);
2165
2166 return kvm_s390_store_status_unloaded(vcpu, addr);
2167}
2168
Eric Farmanbc17de72014-04-14 16:01:09 -04002169/*
2170 * store additional status at address
2171 */
2172int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2173 unsigned long gpa)
2174{
2175 /* Only bits 0-53 are used for address formation */
2176 if (!(gpa & ~0x3ff))
2177 return 0;
2178
2179 return write_guest_abs(vcpu, gpa & ~0x3ff,
2180 (void *)&vcpu->run->s.regs.vrs, 512);
2181}
2182
2183int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2184{
2185 if (!test_kvm_facility(vcpu->kvm, 129))
2186 return 0;
2187
2188 /*
2189 * The guest VXRS are in the host VXRs due to the lazy
2190 * copying in vcpu load/put. Let's update our copies before we save
2191 * it into the save area.
2192 */
2193 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2194
2195 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2196}
2197
David Hildenbrand8ad35752014-03-14 11:00:21 +01002198static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2199{
2200 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2201 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
2202 exit_sie_sync(vcpu);
2203}
2204
2205static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2206{
2207 unsigned int i;
2208 struct kvm_vcpu *vcpu;
2209
2210 kvm_for_each_vcpu(i, vcpu, kvm) {
2211 __disable_ibs_on_vcpu(vcpu);
2212 }
2213}
2214
2215static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2216{
2217 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2218 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2219 exit_sie_sync(vcpu);
2220}
2221
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002222void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2223{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002224 int i, online_vcpus, started_vcpus = 0;
2225
2226 if (!is_vcpu_stopped(vcpu))
2227 return;
2228
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002229 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002230 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002231 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002232 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2233
2234 for (i = 0; i < online_vcpus; i++) {
2235 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2236 started_vcpus++;
2237 }
2238
2239 if (started_vcpus == 0) {
2240 /* we're the only active VCPU -> speed it up */
2241 __enable_ibs_on_vcpu(vcpu);
2242 } else if (started_vcpus == 1) {
2243 /*
2244 * As we are starting a second VCPU, we have to disable
2245 * the IBS facility on all VCPUs to remove potentially
2246 * oustanding ENABLE requests.
2247 */
2248 __disable_ibs_on_all_vcpus(vcpu->kvm);
2249 }
2250
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002251 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002252 /*
2253 * Another VCPU might have used IBS while we were offline.
2254 * Let's play safe and flush the VCPU at startup.
2255 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002256 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002257 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002258 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002259}
2260
2261void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2262{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002263 int i, online_vcpus, started_vcpus = 0;
2264 struct kvm_vcpu *started_vcpu = NULL;
2265
2266 if (is_vcpu_stopped(vcpu))
2267 return;
2268
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002269 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002270 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002271 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002272 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2273
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002274 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002275 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002276
David Hildenbrand6cddd432014-10-15 16:48:53 +02002277 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002278 __disable_ibs_on_vcpu(vcpu);
2279
2280 for (i = 0; i < online_vcpus; i++) {
2281 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2282 started_vcpus++;
2283 started_vcpu = vcpu->kvm->vcpus[i];
2284 }
2285 }
2286
2287 if (started_vcpus == 1) {
2288 /*
2289 * As we only have one VCPU left, we want to enable the
2290 * IBS facility for that VCPU to speed it up.
2291 */
2292 __enable_ibs_on_vcpu(started_vcpu);
2293 }
2294
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002295 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002296 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002297}
2298
Cornelia Huckd6712df2012-12-20 15:32:11 +01002299static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2300 struct kvm_enable_cap *cap)
2301{
2302 int r;
2303
2304 if (cap->flags)
2305 return -EINVAL;
2306
2307 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002308 case KVM_CAP_S390_CSS_SUPPORT:
2309 if (!vcpu->kvm->arch.css_support) {
2310 vcpu->kvm->arch.css_support = 1;
2311 trace_kvm_s390_enable_css(vcpu->kvm);
2312 }
2313 r = 0;
2314 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002315 default:
2316 r = -EINVAL;
2317 break;
2318 }
2319 return r;
2320}
2321
Thomas Huth41408c282015-02-06 15:01:21 +01002322static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2323 struct kvm_s390_mem_op *mop)
2324{
2325 void __user *uaddr = (void __user *)mop->buf;
2326 void *tmpbuf = NULL;
2327 int r, srcu_idx;
2328 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2329 | KVM_S390_MEMOP_F_CHECK_ONLY;
2330
2331 if (mop->flags & ~supported_flags)
2332 return -EINVAL;
2333
2334 if (mop->size > MEM_OP_MAX_SIZE)
2335 return -E2BIG;
2336
2337 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2338 tmpbuf = vmalloc(mop->size);
2339 if (!tmpbuf)
2340 return -ENOMEM;
2341 }
2342
2343 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2344
2345 switch (mop->op) {
2346 case KVM_S390_MEMOP_LOGICAL_READ:
2347 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2348 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2349 break;
2350 }
2351 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2352 if (r == 0) {
2353 if (copy_to_user(uaddr, tmpbuf, mop->size))
2354 r = -EFAULT;
2355 }
2356 break;
2357 case KVM_S390_MEMOP_LOGICAL_WRITE:
2358 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2359 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2360 break;
2361 }
2362 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2363 r = -EFAULT;
2364 break;
2365 }
2366 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2367 break;
2368 default:
2369 r = -EINVAL;
2370 }
2371
2372 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2373
2374 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2375 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2376
2377 vfree(tmpbuf);
2378 return r;
2379}
2380
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002381long kvm_arch_vcpu_ioctl(struct file *filp,
2382 unsigned int ioctl, unsigned long arg)
2383{
2384 struct kvm_vcpu *vcpu = filp->private_data;
2385 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002386 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002387 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002388
Avi Kivity937366242010-05-13 12:35:17 +03002389 switch (ioctl) {
2390 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002391 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002392 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002393
Avi Kivity937366242010-05-13 12:35:17 +03002394 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002395 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03002396 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002397 if (s390int_to_s390irq(&s390int, &s390irq))
2398 return -EINVAL;
2399 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03002400 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002401 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002402 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002403 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002404 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002405 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002406 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002407 case KVM_S390_SET_INITIAL_PSW: {
2408 psw_t psw;
2409
Avi Kivitybc923cc2010-05-13 12:21:46 +03002410 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002411 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002412 break;
2413 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2414 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002415 }
2416 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002417 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2418 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002419 case KVM_SET_ONE_REG:
2420 case KVM_GET_ONE_REG: {
2421 struct kvm_one_reg reg;
2422 r = -EFAULT;
2423 if (copy_from_user(&reg, argp, sizeof(reg)))
2424 break;
2425 if (ioctl == KVM_SET_ONE_REG)
2426 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2427 else
2428 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2429 break;
2430 }
Carsten Otte27e03932012-01-04 10:25:21 +01002431#ifdef CONFIG_KVM_S390_UCONTROL
2432 case KVM_S390_UCAS_MAP: {
2433 struct kvm_s390_ucas_mapping ucasmap;
2434
2435 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2436 r = -EFAULT;
2437 break;
2438 }
2439
2440 if (!kvm_is_ucontrol(vcpu->kvm)) {
2441 r = -EINVAL;
2442 break;
2443 }
2444
2445 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2446 ucasmap.vcpu_addr, ucasmap.length);
2447 break;
2448 }
2449 case KVM_S390_UCAS_UNMAP: {
2450 struct kvm_s390_ucas_mapping ucasmap;
2451
2452 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2453 r = -EFAULT;
2454 break;
2455 }
2456
2457 if (!kvm_is_ucontrol(vcpu->kvm)) {
2458 r = -EINVAL;
2459 break;
2460 }
2461
2462 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2463 ucasmap.length);
2464 break;
2465 }
2466#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002467 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002468 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002469 break;
2470 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002471 case KVM_ENABLE_CAP:
2472 {
2473 struct kvm_enable_cap cap;
2474 r = -EFAULT;
2475 if (copy_from_user(&cap, argp, sizeof(cap)))
2476 break;
2477 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2478 break;
2479 }
Thomas Huth41408c282015-02-06 15:01:21 +01002480 case KVM_S390_MEM_OP: {
2481 struct kvm_s390_mem_op mem_op;
2482
2483 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2484 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2485 else
2486 r = -EFAULT;
2487 break;
2488 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002489 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002490 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002491 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002492 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002493}
2494
Carsten Otte5b1c1492012-01-04 10:25:23 +01002495int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2496{
2497#ifdef CONFIG_KVM_S390_UCONTROL
2498 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2499 && (kvm_is_ucontrol(vcpu->kvm))) {
2500 vmf->page = virt_to_page(vcpu->arch.sie_block);
2501 get_page(vmf->page);
2502 return 0;
2503 }
2504#endif
2505 return VM_FAULT_SIGBUS;
2506}
2507
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302508int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2509 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002510{
2511 return 0;
2512}
2513
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002514/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002515int kvm_arch_prepare_memory_region(struct kvm *kvm,
2516 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002517 struct kvm_userspace_memory_region *mem,
2518 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002519{
Nick Wangdd2887e2013-03-25 17:22:57 +01002520 /* A few sanity checks. We can have memory slots which have to be
2521 located/ended at a segment boundary (1MB). The memory in userland is
2522 ok to be fragmented into various different vmas. It is okay to mmap()
2523 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002524
Carsten Otte598841c2011-07-24 10:48:21 +02002525 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002526 return -EINVAL;
2527
Carsten Otte598841c2011-07-24 10:48:21 +02002528 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002529 return -EINVAL;
2530
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002531 return 0;
2532}
2533
2534void kvm_arch_commit_memory_region(struct kvm *kvm,
2535 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002536 const struct kvm_memory_slot *old,
2537 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002538{
Carsten Ottef7850c92011-07-24 10:48:23 +02002539 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002540
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002541 /* If the basics of the memslot do not change, we do not want
2542 * to update the gmap. Every update causes several unnecessary
2543 * segment translation exceptions. This is usually handled just
2544 * fine by the normal fault handler + gmap, but it will also
2545 * cause faults on the prefix page of running guest CPUs.
2546 */
2547 if (old->userspace_addr == mem->userspace_addr &&
2548 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2549 old->npages * PAGE_SIZE == mem->memory_size)
2550 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002551
2552 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2553 mem->guest_phys_addr, mem->memory_size);
2554 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02002555 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002556 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002557}
2558
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002559static int __init kvm_s390_init(void)
2560{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002561 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002562}
2563
2564static void __exit kvm_s390_exit(void)
2565{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002566 kvm_exit();
2567}
2568
2569module_init(kvm_s390_init);
2570module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002571
2572/*
2573 * Enable autoloading of the kvm module.
2574 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2575 * since x86 takes a different approach.
2576 */
2577#include <linux/miscdevice.h>
2578MODULE_ALIAS_MISCDEV(KVM_MINOR);
2579MODULE_ALIAS("devname:kvm");