blob: dbc9ca34d9da1bec12a9d75f0f6cc1e5a3ec57a3 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010029#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010030#include <asm/lowcore.h>
31#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010032#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010033#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020034#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020035#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010036#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include "gaccess.h"
38
Cornelia Huck5786fff2012-07-23 17:20:29 +020039#define CREATE_TRACE_POINTS
40#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020041#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020042
Thomas Huth41408c282015-02-06 15:01:21 +010043#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
44
Heiko Carstensb0c632d2008-03-25 18:47:20 +010045#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
46
47struct kvm_stats_debugfs_item debugfs_entries[] = {
48 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020049 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010050 { "exit_validity", VCPU_STAT(exit_validity) },
51 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
52 { "exit_external_request", VCPU_STAT(exit_external_request) },
53 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010054 { "exit_instruction", VCPU_STAT(exit_instruction) },
55 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
56 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010057 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020058 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020059 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010060 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010061 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
62 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010063 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020064 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010065 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
66 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
67 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
68 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
69 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
70 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
71 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020072 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010073 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
74 { "instruction_spx", VCPU_STAT(instruction_spx) },
75 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
76 { "instruction_stap", VCPU_STAT(instruction_stap) },
77 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010078 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010079 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
80 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020081 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010082 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
83 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020084 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010085 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010086 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010088 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020089 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
90 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010091 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020092 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
93 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -050094 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010095 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
96 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
97 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020098 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
99 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
100 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100101 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100102 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200103 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100104 { NULL }
105};
106
Michael Mueller9d8d5782015-02-02 15:42:51 +0100107/* upper facilities limit for kvm */
108unsigned long kvm_s390_fac_list_mask[] = {
Christian Borntraegera3ed8da2015-03-18 13:54:31 +0100109 0xffe6fffbfcfdfc40UL,
110 0x205c800000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100111};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100112
Michael Mueller9d8d5782015-02-02 15:42:51 +0100113unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200114{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100115 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
116 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200117}
118
Michael Mueller9d8d5782015-02-02 15:42:51 +0100119static struct gmap_notifier gmap_notifier;
120
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200122int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123{
124 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200125 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100126}
127
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200128static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
129
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130int kvm_arch_hardware_setup(void)
131{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200132 gmap_notifier.notifier_call = kvm_gmap_notifier;
133 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100134 return 0;
135}
136
137void kvm_arch_hardware_unsetup(void)
138{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200139 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100140}
141
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100142int kvm_arch_init(void *opaque)
143{
Cornelia Huck84877d92014-09-02 10:27:35 +0100144 /* Register floating interrupt controller interface. */
145 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100146}
147
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100148/* Section: device related */
149long kvm_arch_dev_ioctl(struct file *filp,
150 unsigned int ioctl, unsigned long arg)
151{
152 if (ioctl == KVM_S390_ENABLE_SIE)
153 return s390_enable_sie();
154 return -EINVAL;
155}
156
Alexander Graf784aa3d2014-07-14 18:27:35 +0200157int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100158{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100159 int r;
160
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200161 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100162 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200163 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100164 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100165#ifdef CONFIG_KVM_S390_UCONTROL
166 case KVM_CAP_S390_UCONTROL:
167#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200168 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100169 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200170 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100171 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100172 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200173 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100174 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200175 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200176 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200177 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200178 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200179 case KVM_CAP_MP_STATE:
David Hildenbrand2444b352014-10-09 14:10:13 +0200180 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100181 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400182 case KVM_CAP_S390_SKEYS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100183 r = 1;
184 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100185 case KVM_CAP_S390_MEM_OP:
186 r = MEM_OP_MAX_SIZE;
187 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200188 case KVM_CAP_NR_VCPUS:
189 case KVM_CAP_MAX_VCPUS:
190 r = KVM_MAX_VCPUS;
191 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100192 case KVM_CAP_NR_MEMSLOTS:
193 r = KVM_USER_MEM_SLOTS;
194 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200195 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100196 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200197 break;
Eric Farman68c55752014-06-09 10:57:26 -0400198 case KVM_CAP_S390_VECTOR_REGISTERS:
199 r = MACHINE_HAS_VX;
200 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200201 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100202 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200203 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100204 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100205}
206
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400207static void kvm_s390_sync_dirty_log(struct kvm *kvm,
208 struct kvm_memory_slot *memslot)
209{
210 gfn_t cur_gfn, last_gfn;
211 unsigned long address;
212 struct gmap *gmap = kvm->arch.gmap;
213
214 down_read(&gmap->mm->mmap_sem);
215 /* Loop over all guest pages */
216 last_gfn = memslot->base_gfn + memslot->npages;
217 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
218 address = gfn_to_hva_memslot(memslot, cur_gfn);
219
220 if (gmap_test_and_clear_dirty(address, gmap))
221 mark_page_dirty(kvm, cur_gfn);
222 }
223 up_read(&gmap->mm->mmap_sem);
224}
225
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100226/* Section: vm related */
227/*
228 * Get (and clear) the dirty memory log for a memory slot.
229 */
230int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
231 struct kvm_dirty_log *log)
232{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400233 int r;
234 unsigned long n;
235 struct kvm_memory_slot *memslot;
236 int is_dirty = 0;
237
238 mutex_lock(&kvm->slots_lock);
239
240 r = -EINVAL;
241 if (log->slot >= KVM_USER_MEM_SLOTS)
242 goto out;
243
244 memslot = id_to_memslot(kvm->memslots, log->slot);
245 r = -ENOENT;
246 if (!memslot->dirty_bitmap)
247 goto out;
248
249 kvm_s390_sync_dirty_log(kvm, memslot);
250 r = kvm_get_dirty_log(kvm, log, &is_dirty);
251 if (r)
252 goto out;
253
254 /* Clear the dirty log */
255 if (is_dirty) {
256 n = kvm_dirty_bitmap_bytes(memslot);
257 memset(memslot->dirty_bitmap, 0, n);
258 }
259 r = 0;
260out:
261 mutex_unlock(&kvm->slots_lock);
262 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100263}
264
Cornelia Huckd938dc52013-10-23 18:26:34 +0200265static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
266{
267 int r;
268
269 if (cap->flags)
270 return -EINVAL;
271
272 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200273 case KVM_CAP_S390_IRQCHIP:
274 kvm->arch.use_irqchip = 1;
275 r = 0;
276 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200277 case KVM_CAP_S390_USER_SIGP:
278 kvm->arch.user_sigp = 1;
279 r = 0;
280 break;
Eric Farman68c55752014-06-09 10:57:26 -0400281 case KVM_CAP_S390_VECTOR_REGISTERS:
Michael Mueller18280d82015-03-16 16:05:41 +0100282 if (MACHINE_HAS_VX) {
283 set_kvm_facility(kvm->arch.model.fac->mask, 129);
284 set_kvm_facility(kvm->arch.model.fac->list, 129);
285 r = 0;
286 } else
287 r = -EINVAL;
Eric Farman68c55752014-06-09 10:57:26 -0400288 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100289 case KVM_CAP_S390_USER_STSI:
290 kvm->arch.user_stsi = 1;
291 r = 0;
292 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200293 default:
294 r = -EINVAL;
295 break;
296 }
297 return r;
298}
299
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100300static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
301{
302 int ret;
303
304 switch (attr->attr) {
305 case KVM_S390_VM_MEM_LIMIT_SIZE:
306 ret = 0;
307 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
308 ret = -EFAULT;
309 break;
310 default:
311 ret = -ENXIO;
312 break;
313 }
314 return ret;
315}
316
317static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200318{
319 int ret;
320 unsigned int idx;
321 switch (attr->attr) {
322 case KVM_S390_VM_MEM_ENABLE_CMMA:
323 ret = -EBUSY;
324 mutex_lock(&kvm->lock);
325 if (atomic_read(&kvm->online_vcpus) == 0) {
326 kvm->arch.use_cmma = 1;
327 ret = 0;
328 }
329 mutex_unlock(&kvm->lock);
330 break;
331 case KVM_S390_VM_MEM_CLR_CMMA:
332 mutex_lock(&kvm->lock);
333 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200334 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200335 srcu_read_unlock(&kvm->srcu, idx);
336 mutex_unlock(&kvm->lock);
337 ret = 0;
338 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100339 case KVM_S390_VM_MEM_LIMIT_SIZE: {
340 unsigned long new_limit;
341
342 if (kvm_is_ucontrol(kvm))
343 return -EINVAL;
344
345 if (get_user(new_limit, (u64 __user *)attr->addr))
346 return -EFAULT;
347
348 if (new_limit > kvm->arch.gmap->asce_end)
349 return -E2BIG;
350
351 ret = -EBUSY;
352 mutex_lock(&kvm->lock);
353 if (atomic_read(&kvm->online_vcpus) == 0) {
354 /* gmap_alloc will round the limit up */
355 struct gmap *new = gmap_alloc(current->mm, new_limit);
356
357 if (!new) {
358 ret = -ENOMEM;
359 } else {
360 gmap_free(kvm->arch.gmap);
361 new->private = kvm;
362 kvm->arch.gmap = new;
363 ret = 0;
364 }
365 }
366 mutex_unlock(&kvm->lock);
367 break;
368 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200369 default:
370 ret = -ENXIO;
371 break;
372 }
373 return ret;
374}
375
Tony Krowiaka374e892014-09-03 10:13:53 +0200376static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
377
378static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
379{
380 struct kvm_vcpu *vcpu;
381 int i;
382
Michael Mueller9d8d5782015-02-02 15:42:51 +0100383 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200384 return -EINVAL;
385
386 mutex_lock(&kvm->lock);
387 switch (attr->attr) {
388 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
389 get_random_bytes(
390 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
391 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
392 kvm->arch.crypto.aes_kw = 1;
393 break;
394 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
395 get_random_bytes(
396 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
397 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
398 kvm->arch.crypto.dea_kw = 1;
399 break;
400 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
401 kvm->arch.crypto.aes_kw = 0;
402 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
403 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
404 break;
405 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
406 kvm->arch.crypto.dea_kw = 0;
407 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
408 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
409 break;
410 default:
411 mutex_unlock(&kvm->lock);
412 return -ENXIO;
413 }
414
415 kvm_for_each_vcpu(i, vcpu, kvm) {
416 kvm_s390_vcpu_crypto_setup(vcpu);
417 exit_sie(vcpu);
418 }
419 mutex_unlock(&kvm->lock);
420 return 0;
421}
422
Jason J. Herne72f25022014-11-25 09:46:02 -0500423static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
424{
425 u8 gtod_high;
426
427 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
428 sizeof(gtod_high)))
429 return -EFAULT;
430
431 if (gtod_high != 0)
432 return -EINVAL;
433
434 return 0;
435}
436
437static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
438{
439 struct kvm_vcpu *cur_vcpu;
440 unsigned int vcpu_idx;
441 u64 host_tod, gtod;
442 int r;
443
444 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
445 return -EFAULT;
446
447 r = store_tod_clock(&host_tod);
448 if (r)
449 return r;
450
451 mutex_lock(&kvm->lock);
452 kvm->arch.epoch = gtod - host_tod;
453 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
454 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
455 exit_sie(cur_vcpu);
456 }
457 mutex_unlock(&kvm->lock);
458 return 0;
459}
460
461static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
462{
463 int ret;
464
465 if (attr->flags)
466 return -EINVAL;
467
468 switch (attr->attr) {
469 case KVM_S390_VM_TOD_HIGH:
470 ret = kvm_s390_set_tod_high(kvm, attr);
471 break;
472 case KVM_S390_VM_TOD_LOW:
473 ret = kvm_s390_set_tod_low(kvm, attr);
474 break;
475 default:
476 ret = -ENXIO;
477 break;
478 }
479 return ret;
480}
481
482static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
483{
484 u8 gtod_high = 0;
485
486 if (copy_to_user((void __user *)attr->addr, &gtod_high,
487 sizeof(gtod_high)))
488 return -EFAULT;
489
490 return 0;
491}
492
493static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
494{
495 u64 host_tod, gtod;
496 int r;
497
498 r = store_tod_clock(&host_tod);
499 if (r)
500 return r;
501
502 gtod = host_tod + kvm->arch.epoch;
503 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
504 return -EFAULT;
505
506 return 0;
507}
508
509static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
510{
511 int ret;
512
513 if (attr->flags)
514 return -EINVAL;
515
516 switch (attr->attr) {
517 case KVM_S390_VM_TOD_HIGH:
518 ret = kvm_s390_get_tod_high(kvm, attr);
519 break;
520 case KVM_S390_VM_TOD_LOW:
521 ret = kvm_s390_get_tod_low(kvm, attr);
522 break;
523 default:
524 ret = -ENXIO;
525 break;
526 }
527 return ret;
528}
529
Michael Mueller658b6ed2015-02-02 15:49:35 +0100530static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
531{
532 struct kvm_s390_vm_cpu_processor *proc;
533 int ret = 0;
534
535 mutex_lock(&kvm->lock);
536 if (atomic_read(&kvm->online_vcpus)) {
537 ret = -EBUSY;
538 goto out;
539 }
540 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
541 if (!proc) {
542 ret = -ENOMEM;
543 goto out;
544 }
545 if (!copy_from_user(proc, (void __user *)attr->addr,
546 sizeof(*proc))) {
547 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
548 sizeof(struct cpuid));
549 kvm->arch.model.ibc = proc->ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100550 memcpy(kvm->arch.model.fac->list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100551 S390_ARCH_FAC_LIST_SIZE_BYTE);
552 } else
553 ret = -EFAULT;
554 kfree(proc);
555out:
556 mutex_unlock(&kvm->lock);
557 return ret;
558}
559
560static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
561{
562 int ret = -ENXIO;
563
564 switch (attr->attr) {
565 case KVM_S390_VM_CPU_PROCESSOR:
566 ret = kvm_s390_set_processor(kvm, attr);
567 break;
568 }
569 return ret;
570}
571
572static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
573{
574 struct kvm_s390_vm_cpu_processor *proc;
575 int ret = 0;
576
577 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
578 if (!proc) {
579 ret = -ENOMEM;
580 goto out;
581 }
582 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
583 proc->ibc = kvm->arch.model.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100584 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100585 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
586 ret = -EFAULT;
587 kfree(proc);
588out:
589 return ret;
590}
591
592static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
593{
594 struct kvm_s390_vm_cpu_machine *mach;
595 int ret = 0;
596
597 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
598 if (!mach) {
599 ret = -ENOMEM;
600 goto out;
601 }
602 get_cpu_id((struct cpuid *) &mach->cpuid);
603 mach->ibc = sclp_get_ibc();
Michael Mueller981467c2015-02-24 13:51:04 +0100604 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
605 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100606 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100607 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100608 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
609 ret = -EFAULT;
610 kfree(mach);
611out:
612 return ret;
613}
614
615static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
616{
617 int ret = -ENXIO;
618
619 switch (attr->attr) {
620 case KVM_S390_VM_CPU_PROCESSOR:
621 ret = kvm_s390_get_processor(kvm, attr);
622 break;
623 case KVM_S390_VM_CPU_MACHINE:
624 ret = kvm_s390_get_machine(kvm, attr);
625 break;
626 }
627 return ret;
628}
629
Dominik Dingelf2061652014-04-09 13:13:00 +0200630static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
631{
632 int ret;
633
634 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200635 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100636 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200637 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500638 case KVM_S390_VM_TOD:
639 ret = kvm_s390_set_tod(kvm, attr);
640 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100641 case KVM_S390_VM_CPU_MODEL:
642 ret = kvm_s390_set_cpu_model(kvm, attr);
643 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200644 case KVM_S390_VM_CRYPTO:
645 ret = kvm_s390_vm_set_crypto(kvm, attr);
646 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200647 default:
648 ret = -ENXIO;
649 break;
650 }
651
652 return ret;
653}
654
655static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
656{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100657 int ret;
658
659 switch (attr->group) {
660 case KVM_S390_VM_MEM_CTRL:
661 ret = kvm_s390_get_mem_control(kvm, attr);
662 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500663 case KVM_S390_VM_TOD:
664 ret = kvm_s390_get_tod(kvm, attr);
665 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100666 case KVM_S390_VM_CPU_MODEL:
667 ret = kvm_s390_get_cpu_model(kvm, attr);
668 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100669 default:
670 ret = -ENXIO;
671 break;
672 }
673
674 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200675}
676
677static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
678{
679 int ret;
680
681 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200682 case KVM_S390_VM_MEM_CTRL:
683 switch (attr->attr) {
684 case KVM_S390_VM_MEM_ENABLE_CMMA:
685 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100686 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200687 ret = 0;
688 break;
689 default:
690 ret = -ENXIO;
691 break;
692 }
693 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500694 case KVM_S390_VM_TOD:
695 switch (attr->attr) {
696 case KVM_S390_VM_TOD_LOW:
697 case KVM_S390_VM_TOD_HIGH:
698 ret = 0;
699 break;
700 default:
701 ret = -ENXIO;
702 break;
703 }
704 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100705 case KVM_S390_VM_CPU_MODEL:
706 switch (attr->attr) {
707 case KVM_S390_VM_CPU_PROCESSOR:
708 case KVM_S390_VM_CPU_MACHINE:
709 ret = 0;
710 break;
711 default:
712 ret = -ENXIO;
713 break;
714 }
715 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200716 case KVM_S390_VM_CRYPTO:
717 switch (attr->attr) {
718 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
719 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
720 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
721 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
722 ret = 0;
723 break;
724 default:
725 ret = -ENXIO;
726 break;
727 }
728 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200729 default:
730 ret = -ENXIO;
731 break;
732 }
733
734 return ret;
735}
736
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400737static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
738{
739 uint8_t *keys;
740 uint64_t hva;
741 unsigned long curkey;
742 int i, r = 0;
743
744 if (args->flags != 0)
745 return -EINVAL;
746
747 /* Is this guest using storage keys? */
748 if (!mm_use_skey(current->mm))
749 return KVM_S390_GET_SKEYS_NONE;
750
751 /* Enforce sane limit on memory allocation */
752 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
753 return -EINVAL;
754
755 keys = kmalloc_array(args->count, sizeof(uint8_t),
756 GFP_KERNEL | __GFP_NOWARN);
757 if (!keys)
758 keys = vmalloc(sizeof(uint8_t) * args->count);
759 if (!keys)
760 return -ENOMEM;
761
762 for (i = 0; i < args->count; i++) {
763 hva = gfn_to_hva(kvm, args->start_gfn + i);
764 if (kvm_is_error_hva(hva)) {
765 r = -EFAULT;
766 goto out;
767 }
768
769 curkey = get_guest_storage_key(current->mm, hva);
770 if (IS_ERR_VALUE(curkey)) {
771 r = curkey;
772 goto out;
773 }
774 keys[i] = curkey;
775 }
776
777 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
778 sizeof(uint8_t) * args->count);
779 if (r)
780 r = -EFAULT;
781out:
782 kvfree(keys);
783 return r;
784}
785
786static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
787{
788 uint8_t *keys;
789 uint64_t hva;
790 int i, r = 0;
791
792 if (args->flags != 0)
793 return -EINVAL;
794
795 /* Enforce sane limit on memory allocation */
796 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
797 return -EINVAL;
798
799 keys = kmalloc_array(args->count, sizeof(uint8_t),
800 GFP_KERNEL | __GFP_NOWARN);
801 if (!keys)
802 keys = vmalloc(sizeof(uint8_t) * args->count);
803 if (!keys)
804 return -ENOMEM;
805
806 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
807 sizeof(uint8_t) * args->count);
808 if (r) {
809 r = -EFAULT;
810 goto out;
811 }
812
813 /* Enable storage key handling for the guest */
814 s390_enable_skey();
815
816 for (i = 0; i < args->count; i++) {
817 hva = gfn_to_hva(kvm, args->start_gfn + i);
818 if (kvm_is_error_hva(hva)) {
819 r = -EFAULT;
820 goto out;
821 }
822
823 /* Lowest order bit is reserved */
824 if (keys[i] & 0x01) {
825 r = -EINVAL;
826 goto out;
827 }
828
829 r = set_guest_storage_key(current->mm, hva,
830 (unsigned long)keys[i], 0);
831 if (r)
832 goto out;
833 }
834out:
835 kvfree(keys);
836 return r;
837}
838
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100839long kvm_arch_vm_ioctl(struct file *filp,
840 unsigned int ioctl, unsigned long arg)
841{
842 struct kvm *kvm = filp->private_data;
843 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200844 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100845 int r;
846
847 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100848 case KVM_S390_INTERRUPT: {
849 struct kvm_s390_interrupt s390int;
850
851 r = -EFAULT;
852 if (copy_from_user(&s390int, argp, sizeof(s390int)))
853 break;
854 r = kvm_s390_inject_vm(kvm, &s390int);
855 break;
856 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200857 case KVM_ENABLE_CAP: {
858 struct kvm_enable_cap cap;
859 r = -EFAULT;
860 if (copy_from_user(&cap, argp, sizeof(cap)))
861 break;
862 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
863 break;
864 }
Cornelia Huck84223592013-07-15 13:36:01 +0200865 case KVM_CREATE_IRQCHIP: {
866 struct kvm_irq_routing_entry routing;
867
868 r = -EINVAL;
869 if (kvm->arch.use_irqchip) {
870 /* Set up dummy routing. */
871 memset(&routing, 0, sizeof(routing));
872 kvm_set_irq_routing(kvm, &routing, 0, 0);
873 r = 0;
874 }
875 break;
876 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200877 case KVM_SET_DEVICE_ATTR: {
878 r = -EFAULT;
879 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
880 break;
881 r = kvm_s390_vm_set_attr(kvm, &attr);
882 break;
883 }
884 case KVM_GET_DEVICE_ATTR: {
885 r = -EFAULT;
886 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
887 break;
888 r = kvm_s390_vm_get_attr(kvm, &attr);
889 break;
890 }
891 case KVM_HAS_DEVICE_ATTR: {
892 r = -EFAULT;
893 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
894 break;
895 r = kvm_s390_vm_has_attr(kvm, &attr);
896 break;
897 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400898 case KVM_S390_GET_SKEYS: {
899 struct kvm_s390_skeys args;
900
901 r = -EFAULT;
902 if (copy_from_user(&args, argp,
903 sizeof(struct kvm_s390_skeys)))
904 break;
905 r = kvm_s390_get_skeys(kvm, &args);
906 break;
907 }
908 case KVM_S390_SET_SKEYS: {
909 struct kvm_s390_skeys args;
910
911 r = -EFAULT;
912 if (copy_from_user(&args, argp,
913 sizeof(struct kvm_s390_skeys)))
914 break;
915 r = kvm_s390_set_skeys(kvm, &args);
916 break;
917 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100918 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300919 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100920 }
921
922 return r;
923}
924
Tony Krowiak45c9b472015-01-13 11:33:26 -0500925static int kvm_s390_query_ap_config(u8 *config)
926{
927 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +0100928 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -0500929
Christian Borntraeger86044c82015-02-26 13:53:47 +0100930 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -0500931 asm volatile(
932 "lgr 0,%1\n"
933 "lgr 2,%2\n"
934 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +0100935 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -0500936 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +0100937 "1:\n"
938 EX_TABLE(0b, 1b)
939 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -0500940 : "r" (fcn_code), "r" (config)
941 : "cc", "0", "2", "memory"
942 );
943
944 return cc;
945}
946
947static int kvm_s390_apxa_installed(void)
948{
949 u8 config[128];
950 int cc;
951
952 if (test_facility(2) && test_facility(12)) {
953 cc = kvm_s390_query_ap_config(config);
954
955 if (cc)
956 pr_err("PQAP(QCI) failed with cc=%d", cc);
957 else
958 return config[0] & 0x40;
959 }
960
961 return 0;
962}
963
964static void kvm_s390_set_crycb_format(struct kvm *kvm)
965{
966 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
967
968 if (kvm_s390_apxa_installed())
969 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
970 else
971 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
972}
973
Michael Mueller9d8d5782015-02-02 15:42:51 +0100974static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
975{
976 get_cpu_id(cpu_id);
977 cpu_id->version = 0xff;
978}
979
Tony Krowiak5102ee82014-06-27 14:46:01 -0400980static int kvm_s390_crypto_init(struct kvm *kvm)
981{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100982 if (!test_kvm_facility(kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -0400983 return 0;
984
985 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
986 GFP_KERNEL | GFP_DMA);
987 if (!kvm->arch.crypto.crycb)
988 return -ENOMEM;
989
Tony Krowiak45c9b472015-01-13 11:33:26 -0500990 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400991
Tony Krowiaked6f76b2015-02-24 14:06:57 -0500992 /* Enable AES/DEA protected key functions by default */
993 kvm->arch.crypto.aes_kw = 1;
994 kvm->arch.crypto.dea_kw = 1;
995 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
996 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
997 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
998 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiaka374e892014-09-03 10:13:53 +0200999
Tony Krowiak5102ee82014-06-27 14:46:01 -04001000 return 0;
1001}
1002
Carsten Ottee08b9632012-01-04 10:25:20 +01001003int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001004{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001005 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001006 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001007 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001008
Carsten Ottee08b9632012-01-04 10:25:20 +01001009 rc = -EINVAL;
1010#ifdef CONFIG_KVM_S390_UCONTROL
1011 if (type & ~KVM_VM_S390_UCONTROL)
1012 goto out_err;
1013 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1014 goto out_err;
1015#else
1016 if (type)
1017 goto out_err;
1018#endif
1019
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001020 rc = s390_enable_sie();
1021 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001022 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001023
Carsten Otteb2904112011-10-18 12:27:13 +02001024 rc = -ENOMEM;
1025
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001026 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1027 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001028 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001029 spin_lock(&kvm_lock);
1030 sca_offset = (sca_offset + 16) & 0x7f0;
1031 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1032 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001033
1034 sprintf(debug_name, "kvm-%u", current->pid);
1035
1036 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
1037 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001038 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001039
Michael Mueller9d8d5782015-02-02 15:42:51 +01001040 /*
1041 * The architectural maximum amount of facilities is 16 kbit. To store
1042 * this amount, 2 kbyte of memory is required. Thus we need a full
Michael Mueller981467c2015-02-24 13:51:04 +01001043 * page to hold the guest facility list (arch.model.fac->list) and the
1044 * facility mask (arch.model.fac->mask). Its address size has to be
Michael Mueller9d8d5782015-02-02 15:42:51 +01001045 * 31 bits and word aligned.
1046 */
1047 kvm->arch.model.fac =
Michael Mueller981467c2015-02-24 13:51:04 +01001048 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001049 if (!kvm->arch.model.fac)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001050 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001051
Michael Muellerfb5bf932015-02-27 14:25:10 +01001052 /* Populate the facility mask initially. */
Michael Mueller981467c2015-02-24 13:51:04 +01001053 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001054 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001055 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1056 if (i < kvm_s390_fac_list_mask_size())
Michael Mueller981467c2015-02-24 13:51:04 +01001057 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001058 else
Michael Mueller981467c2015-02-24 13:51:04 +01001059 kvm->arch.model.fac->mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001060 }
1061
Michael Mueller981467c2015-02-24 13:51:04 +01001062 /* Populate the facility list initially. */
1063 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1064 S390_ARCH_FAC_LIST_SIZE_BYTE);
1065
Michael Mueller9d8d5782015-02-02 15:42:51 +01001066 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001067 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001068
Tony Krowiak5102ee82014-06-27 14:46:01 -04001069 if (kvm_s390_crypto_init(kvm) < 0)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001070 goto out_err;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001071
Carsten Otteba5c1e92008-03-25 18:47:26 +01001072 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001073 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1074 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001075 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001076 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001077
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001078 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1079 VM_EVENT(kvm, 3, "%s", "vm created");
1080
Carsten Ottee08b9632012-01-04 10:25:20 +01001081 if (type & KVM_VM_S390_UCONTROL) {
1082 kvm->arch.gmap = NULL;
1083 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +02001084 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001085 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001086 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001087 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001088 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001089 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001090
1091 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001092 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001093 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001094
David Hildenbrand8ad35752014-03-14 11:00:21 +01001095 spin_lock_init(&kvm->arch.start_stop_lock);
1096
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001097 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001098out_err:
Dominik Dingel40f5b732015-03-12 13:55:53 +01001099 kfree(kvm->arch.crypto.crycb);
1100 free_page((unsigned long)kvm->arch.model.fac);
1101 debug_unregister(kvm->arch.dbf);
1102 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001103 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001104}
1105
Christian Borntraegerd329c032008-11-26 14:50:27 +01001106void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1107{
1108 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001109 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001110 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001111 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +01001112 if (!kvm_is_ucontrol(vcpu->kvm)) {
1113 clear_bit(63 - vcpu->vcpu_id,
1114 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1115 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1116 (__u64) vcpu->arch.sie_block)
1117 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1118 }
Carsten Otteabf4a712009-05-12 17:21:51 +02001119 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +01001120
1121 if (kvm_is_ucontrol(vcpu->kvm))
1122 gmap_free(vcpu->arch.gmap);
1123
Dominik Dingelb31605c2014-03-25 13:47:11 +01001124 if (kvm_s390_cmma_enabled(vcpu->kvm))
1125 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001126 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001127
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001128 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001129 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001130}
1131
1132static void kvm_free_vcpus(struct kvm *kvm)
1133{
1134 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001135 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001136
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001137 kvm_for_each_vcpu(i, vcpu, kvm)
1138 kvm_arch_vcpu_destroy(vcpu);
1139
1140 mutex_lock(&kvm->lock);
1141 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1142 kvm->vcpus[i] = NULL;
1143
1144 atomic_set(&kvm->online_vcpus, 0);
1145 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001146}
1147
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001148void kvm_arch_destroy_vm(struct kvm *kvm)
1149{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001150 kvm_free_vcpus(kvm);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001151 free_page((unsigned long)kvm->arch.model.fac);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001152 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +01001153 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001154 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +01001155 if (!kvm_is_ucontrol(kvm))
1156 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001157 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001158 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001159}
1160
1161/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001162static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1163{
1164 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1165 if (!vcpu->arch.gmap)
1166 return -ENOMEM;
1167 vcpu->arch.gmap->private = vcpu->kvm;
1168
1169 return 0;
1170}
1171
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001172int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1173{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001174 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1175 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001176 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1177 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001178 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001179 KVM_SYNC_CRS |
1180 KVM_SYNC_ARCH0 |
1181 KVM_SYNC_PFAULT;
Eric Farman68c55752014-06-09 10:57:26 -04001182 if (test_kvm_facility(vcpu->kvm, 129))
1183 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001184
1185 if (kvm_is_ucontrol(vcpu->kvm))
1186 return __kvm_ucontrol_vcpu_init(vcpu);
1187
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001188 return 0;
1189}
1190
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001191void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1192{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001193 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Michael Mueller18280d82015-03-16 16:05:41 +01001194 if (test_kvm_facility(vcpu->kvm, 129))
Eric Farman68c55752014-06-09 10:57:26 -04001195 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1196 else
1197 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001198 save_access_regs(vcpu->arch.host_acrs);
Michael Mueller18280d82015-03-16 16:05:41 +01001199 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman68c55752014-06-09 10:57:26 -04001200 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1201 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1202 } else {
1203 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1204 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1205 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001206 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001207 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001208 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001209}
1210
1211void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1212{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001213 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001214 gmap_disable(vcpu->arch.gmap);
Michael Mueller18280d82015-03-16 16:05:41 +01001215 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman68c55752014-06-09 10:57:26 -04001216 save_fp_ctl(&vcpu->run->s.regs.fpc);
1217 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1218 } else {
1219 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1220 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1221 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001222 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001223 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Michael Mueller18280d82015-03-16 16:05:41 +01001224 if (test_kvm_facility(vcpu->kvm, 129))
Eric Farman68c55752014-06-09 10:57:26 -04001225 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1226 else
1227 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001228 restore_access_regs(vcpu->arch.host_acrs);
1229}
1230
1231static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1232{
1233 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1234 vcpu->arch.sie_block->gpsw.mask = 0UL;
1235 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001236 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001237 vcpu->arch.sie_block->cputm = 0UL;
1238 vcpu->arch.sie_block->ckc = 0UL;
1239 vcpu->arch.sie_block->todpr = 0;
1240 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1241 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1242 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1243 vcpu->arch.guest_fpregs.fpc = 0;
1244 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1245 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001246 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001247 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1248 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001249 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1250 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001251 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001252}
1253
Dominik Dingel31928aa2014-12-04 15:47:07 +01001254void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001255{
Jason J. Herne72f25022014-11-25 09:46:02 -05001256 mutex_lock(&vcpu->kvm->lock);
1257 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1258 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001259 if (!kvm_is_ucontrol(vcpu->kvm))
1260 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001261}
1262
Tony Krowiak5102ee82014-06-27 14:46:01 -04001263static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1264{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001265 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001266 return;
1267
Tony Krowiaka374e892014-09-03 10:13:53 +02001268 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1269
1270 if (vcpu->kvm->arch.crypto.aes_kw)
1271 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1272 if (vcpu->kvm->arch.crypto.dea_kw)
1273 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1274
Tony Krowiak5102ee82014-06-27 14:46:01 -04001275 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1276}
1277
Dominik Dingelb31605c2014-03-25 13:47:11 +01001278void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1279{
1280 free_page(vcpu->arch.sie_block->cbrlo);
1281 vcpu->arch.sie_block->cbrlo = 0;
1282}
1283
1284int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1285{
1286 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1287 if (!vcpu->arch.sie_block->cbrlo)
1288 return -ENOMEM;
1289
1290 vcpu->arch.sie_block->ecb2 |= 0x80;
1291 vcpu->arch.sie_block->ecb2 &= ~0x08;
1292 return 0;
1293}
1294
Michael Mueller91520f12015-02-27 14:32:11 +01001295static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1296{
1297 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1298
1299 vcpu->arch.cpu_id = model->cpu_id;
1300 vcpu->arch.sie_block->ibc = model->ibc;
1301 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1302}
1303
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001304int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1305{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001306 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001307
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001308 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1309 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001310 CPUSTAT_STOPPED |
1311 CPUSTAT_GED);
Michael Mueller91520f12015-02-27 14:32:11 +01001312 kvm_s390_vcpu_setup_model(vcpu);
1313
Christian Borntraegerfc345312010-06-17 23:16:20 +02001314 vcpu->arch.sie_block->ecb = 6;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001315 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001316 vcpu->arch.sie_block->ecb |= 0x10;
1317
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001318 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001319 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +01001320 if (sclp_has_siif())
1321 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001322 if (sclp_has_sigpif())
1323 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller18280d82015-03-16 16:05:41 +01001324 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001325 vcpu->arch.sie_block->eca |= 0x00020000;
1326 vcpu->arch.sie_block->ecd |= 0x20000000;
1327 }
Thomas Huth492d8642015-02-10 16:11:01 +01001328 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001329
Dominik Dingelb31605c2014-03-25 13:47:11 +01001330 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1331 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1332 if (rc)
1333 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001334 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001335 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001336 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001337
Tony Krowiak5102ee82014-06-27 14:46:01 -04001338 kvm_s390_vcpu_crypto_setup(vcpu);
1339
Dominik Dingelb31605c2014-03-25 13:47:11 +01001340 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001341}
1342
1343struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1344 unsigned int id)
1345{
Carsten Otte4d475552011-10-18 12:27:12 +02001346 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001347 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001348 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001349
Carsten Otte4d475552011-10-18 12:27:12 +02001350 if (id >= KVM_MAX_VCPUS)
1351 goto out;
1352
1353 rc = -ENOMEM;
1354
Michael Muellerb110fea2013-06-12 13:54:54 +02001355 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001356 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001357 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001358
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001359 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1360 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001361 goto out_free_cpu;
1362
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001363 vcpu->arch.sie_block = &sie_page->sie_block;
1364 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
Eric Farman68c55752014-06-09 10:57:26 -04001365 vcpu->arch.host_vregs = &sie_page->vregs;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001366
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001367 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001368 if (!kvm_is_ucontrol(kvm)) {
1369 if (!kvm->arch.sca) {
1370 WARN_ON_ONCE(1);
1371 goto out_free_cpu;
1372 }
1373 if (!kvm->arch.sca->cpu[id].sda)
1374 kvm->arch.sca->cpu[id].sda =
1375 (__u64) vcpu->arch.sie_block;
1376 vcpu->arch.sie_block->scaoh =
1377 (__u32)(((__u64)kvm->arch.sca) >> 32);
1378 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1379 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1380 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001381
Carsten Otteba5c1e92008-03-25 18:47:26 +01001382 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001383 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001384 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001385 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001386
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001387 rc = kvm_vcpu_init(vcpu, kvm, id);
1388 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001389 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001390 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1391 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001392 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001393
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001394 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001395out_free_sie_block:
1396 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001397out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001398 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001399out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001400 return ERR_PTR(rc);
1401}
1402
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001403int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1404{
David Hildenbrand9a022062014-08-05 17:40:47 +02001405 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001406}
1407
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001408void s390_vcpu_block(struct kvm_vcpu *vcpu)
1409{
1410 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1411}
1412
1413void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1414{
1415 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1416}
1417
1418/*
1419 * Kick a guest cpu out of SIE and wait until SIE is not running.
1420 * If the CPU is not running (e.g. waiting as idle) the function will
1421 * return immediately. */
1422void exit_sie(struct kvm_vcpu *vcpu)
1423{
1424 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1425 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1426 cpu_relax();
1427}
1428
1429/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1430void exit_sie_sync(struct kvm_vcpu *vcpu)
1431{
1432 s390_vcpu_block(vcpu);
1433 exit_sie(vcpu);
1434}
1435
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001436static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1437{
1438 int i;
1439 struct kvm *kvm = gmap->private;
1440 struct kvm_vcpu *vcpu;
1441
1442 kvm_for_each_vcpu(i, vcpu, kvm) {
1443 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001444 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001445 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1446 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1447 exit_sie_sync(vcpu);
1448 }
1449 }
1450}
1451
Christoffer Dallb6d33832012-03-08 16:44:24 -05001452int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1453{
1454 /* kvm common code refers to this, but never calls it */
1455 BUG();
1456 return 0;
1457}
1458
Carsten Otte14eebd92012-05-15 14:15:26 +02001459static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1460 struct kvm_one_reg *reg)
1461{
1462 int r = -EINVAL;
1463
1464 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001465 case KVM_REG_S390_TODPR:
1466 r = put_user(vcpu->arch.sie_block->todpr,
1467 (u32 __user *)reg->addr);
1468 break;
1469 case KVM_REG_S390_EPOCHDIFF:
1470 r = put_user(vcpu->arch.sie_block->epoch,
1471 (u64 __user *)reg->addr);
1472 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001473 case KVM_REG_S390_CPU_TIMER:
1474 r = put_user(vcpu->arch.sie_block->cputm,
1475 (u64 __user *)reg->addr);
1476 break;
1477 case KVM_REG_S390_CLOCK_COMP:
1478 r = put_user(vcpu->arch.sie_block->ckc,
1479 (u64 __user *)reg->addr);
1480 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001481 case KVM_REG_S390_PFTOKEN:
1482 r = put_user(vcpu->arch.pfault_token,
1483 (u64 __user *)reg->addr);
1484 break;
1485 case KVM_REG_S390_PFCOMPARE:
1486 r = put_user(vcpu->arch.pfault_compare,
1487 (u64 __user *)reg->addr);
1488 break;
1489 case KVM_REG_S390_PFSELECT:
1490 r = put_user(vcpu->arch.pfault_select,
1491 (u64 __user *)reg->addr);
1492 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001493 case KVM_REG_S390_PP:
1494 r = put_user(vcpu->arch.sie_block->pp,
1495 (u64 __user *)reg->addr);
1496 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001497 case KVM_REG_S390_GBEA:
1498 r = put_user(vcpu->arch.sie_block->gbea,
1499 (u64 __user *)reg->addr);
1500 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001501 default:
1502 break;
1503 }
1504
1505 return r;
1506}
1507
1508static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1509 struct kvm_one_reg *reg)
1510{
1511 int r = -EINVAL;
1512
1513 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001514 case KVM_REG_S390_TODPR:
1515 r = get_user(vcpu->arch.sie_block->todpr,
1516 (u32 __user *)reg->addr);
1517 break;
1518 case KVM_REG_S390_EPOCHDIFF:
1519 r = get_user(vcpu->arch.sie_block->epoch,
1520 (u64 __user *)reg->addr);
1521 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001522 case KVM_REG_S390_CPU_TIMER:
1523 r = get_user(vcpu->arch.sie_block->cputm,
1524 (u64 __user *)reg->addr);
1525 break;
1526 case KVM_REG_S390_CLOCK_COMP:
1527 r = get_user(vcpu->arch.sie_block->ckc,
1528 (u64 __user *)reg->addr);
1529 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001530 case KVM_REG_S390_PFTOKEN:
1531 r = get_user(vcpu->arch.pfault_token,
1532 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001533 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1534 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001535 break;
1536 case KVM_REG_S390_PFCOMPARE:
1537 r = get_user(vcpu->arch.pfault_compare,
1538 (u64 __user *)reg->addr);
1539 break;
1540 case KVM_REG_S390_PFSELECT:
1541 r = get_user(vcpu->arch.pfault_select,
1542 (u64 __user *)reg->addr);
1543 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001544 case KVM_REG_S390_PP:
1545 r = get_user(vcpu->arch.sie_block->pp,
1546 (u64 __user *)reg->addr);
1547 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001548 case KVM_REG_S390_GBEA:
1549 r = get_user(vcpu->arch.sie_block->gbea,
1550 (u64 __user *)reg->addr);
1551 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001552 default:
1553 break;
1554 }
1555
1556 return r;
1557}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001558
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001559static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1560{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001561 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001562 return 0;
1563}
1564
1565int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1566{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001567 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001568 return 0;
1569}
1570
1571int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1572{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001573 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001574 return 0;
1575}
1576
1577int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1578 struct kvm_sregs *sregs)
1579{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001580 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001581 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001582 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001583 return 0;
1584}
1585
1586int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1587 struct kvm_sregs *sregs)
1588{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001589 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001590 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001591 return 0;
1592}
1593
1594int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1595{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001596 if (test_fp_ctl(fpu->fpc))
1597 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001598 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001599 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1600 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1601 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001602 return 0;
1603}
1604
1605int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1606{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001607 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1608 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001609 return 0;
1610}
1611
1612static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1613{
1614 int rc = 0;
1615
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001616 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001617 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001618 else {
1619 vcpu->run->psw_mask = psw.mask;
1620 vcpu->run->psw_addr = psw.addr;
1621 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001622 return rc;
1623}
1624
1625int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1626 struct kvm_translation *tr)
1627{
1628 return -EINVAL; /* not implemented yet */
1629}
1630
David Hildenbrand27291e22014-01-23 12:26:52 +01001631#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1632 KVM_GUESTDBG_USE_HW_BP | \
1633 KVM_GUESTDBG_ENABLE)
1634
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001635int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1636 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001637{
David Hildenbrand27291e22014-01-23 12:26:52 +01001638 int rc = 0;
1639
1640 vcpu->guest_debug = 0;
1641 kvm_s390_clear_bp_data(vcpu);
1642
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001643 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001644 return -EINVAL;
1645
1646 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1647 vcpu->guest_debug = dbg->control;
1648 /* enforce guest PER */
1649 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1650
1651 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1652 rc = kvm_s390_import_bp_data(vcpu, dbg);
1653 } else {
1654 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1655 vcpu->arch.guestdbg.last_bp = 0;
1656 }
1657
1658 if (rc) {
1659 vcpu->guest_debug = 0;
1660 kvm_s390_clear_bp_data(vcpu);
1661 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1662 }
1663
1664 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001665}
1666
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001667int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1668 struct kvm_mp_state *mp_state)
1669{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001670 /* CHECK_STOP and LOAD are not supported yet */
1671 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1672 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001673}
1674
1675int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1676 struct kvm_mp_state *mp_state)
1677{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001678 int rc = 0;
1679
1680 /* user space knows about this interface - let it control the state */
1681 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1682
1683 switch (mp_state->mp_state) {
1684 case KVM_MP_STATE_STOPPED:
1685 kvm_s390_vcpu_stop(vcpu);
1686 break;
1687 case KVM_MP_STATE_OPERATING:
1688 kvm_s390_vcpu_start(vcpu);
1689 break;
1690 case KVM_MP_STATE_LOAD:
1691 case KVM_MP_STATE_CHECK_STOP:
1692 /* fall through - CHECK_STOP and LOAD are not supported yet */
1693 default:
1694 rc = -ENXIO;
1695 }
1696
1697 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001698}
1699
Dominik Dingelb31605c2014-03-25 13:47:11 +01001700bool kvm_s390_cmma_enabled(struct kvm *kvm)
1701{
1702 if (!MACHINE_IS_LPAR)
1703 return false;
1704 /* only enable for z10 and later */
1705 if (!MACHINE_HAS_EDAT1)
1706 return false;
1707 if (!kvm->arch.use_cmma)
1708 return false;
1709 return true;
1710}
1711
David Hildenbrand8ad35752014-03-14 11:00:21 +01001712static bool ibs_enabled(struct kvm_vcpu *vcpu)
1713{
1714 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1715}
1716
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001717static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1718{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001719retry:
1720 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001721 /*
1722 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1723 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1724 * This ensures that the ipte instruction for this request has
1725 * already finished. We might race against a second unmapper that
1726 * wants to set the blocking bit. Lets just retry the request loop.
1727 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001728 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001729 int rc;
1730 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001731 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001732 PAGE_SIZE * 2);
1733 if (rc)
1734 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001735 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001736 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001737
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001738 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1739 vcpu->arch.sie_block->ihcpu = 0xffff;
1740 goto retry;
1741 }
1742
David Hildenbrand8ad35752014-03-14 11:00:21 +01001743 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1744 if (!ibs_enabled(vcpu)) {
1745 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1746 atomic_set_mask(CPUSTAT_IBS,
1747 &vcpu->arch.sie_block->cpuflags);
1748 }
1749 goto retry;
1750 }
1751
1752 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1753 if (ibs_enabled(vcpu)) {
1754 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1755 atomic_clear_mask(CPUSTAT_IBS,
1756 &vcpu->arch.sie_block->cpuflags);
1757 }
1758 goto retry;
1759 }
1760
David Hildenbrand0759d062014-05-13 16:54:32 +02001761 /* nothing to do, just clear the request */
1762 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1763
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001764 return 0;
1765}
1766
Thomas Huthfa576c52014-05-06 17:20:16 +02001767/**
1768 * kvm_arch_fault_in_page - fault-in guest page if necessary
1769 * @vcpu: The corresponding virtual cpu
1770 * @gpa: Guest physical address
1771 * @writable: Whether the page should be writable or not
1772 *
1773 * Make sure that a guest page has been faulted-in on the host.
1774 *
1775 * Return: Zero on success, negative error code otherwise.
1776 */
1777long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001778{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001779 return gmap_fault(vcpu->arch.gmap, gpa,
1780 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001781}
1782
Dominik Dingel3c038e62013-10-07 17:11:48 +02001783static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1784 unsigned long token)
1785{
1786 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001787 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001788
1789 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001790 irq.u.ext.ext_params2 = token;
1791 irq.type = KVM_S390_INT_PFAULT_INIT;
1792 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001793 } else {
1794 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001795 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001796 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1797 }
1798}
1799
1800void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1801 struct kvm_async_pf *work)
1802{
1803 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1804 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1805}
1806
1807void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1808 struct kvm_async_pf *work)
1809{
1810 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1811 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1812}
1813
1814void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1815 struct kvm_async_pf *work)
1816{
1817 /* s390 will always inject the page directly */
1818}
1819
1820bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1821{
1822 /*
1823 * s390 will always inject the page directly,
1824 * but we still want check_async_completion to cleanup
1825 */
1826 return true;
1827}
1828
1829static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1830{
1831 hva_t hva;
1832 struct kvm_arch_async_pf arch;
1833 int rc;
1834
1835 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1836 return 0;
1837 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1838 vcpu->arch.pfault_compare)
1839 return 0;
1840 if (psw_extint_disabled(vcpu))
1841 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001842 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001843 return 0;
1844 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1845 return 0;
1846 if (!vcpu->arch.gmap->pfault_enabled)
1847 return 0;
1848
Heiko Carstens81480cc2014-01-01 16:36:07 +01001849 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1850 hva += current->thread.gmap_addr & ~PAGE_MASK;
1851 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001852 return 0;
1853
1854 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1855 return rc;
1856}
1857
Thomas Huth3fb4c402013-09-12 10:33:43 +02001858static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001859{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001860 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001861
Dominik Dingel3c038e62013-10-07 17:11:48 +02001862 /*
1863 * On s390 notifications for arriving pages will be delivered directly
1864 * to the guest but the house keeping for completed pfaults is
1865 * handled outside the worker.
1866 */
1867 kvm_check_async_pf_completion(vcpu);
1868
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001869 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001870
1871 if (need_resched())
1872 schedule();
1873
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001874 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001875 s390_handle_mcck();
1876
Jens Freimann79395032014-04-17 10:10:30 +02001877 if (!kvm_is_ucontrol(vcpu->kvm)) {
1878 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1879 if (rc)
1880 return rc;
1881 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001882
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001883 rc = kvm_s390_handle_requests(vcpu);
1884 if (rc)
1885 return rc;
1886
David Hildenbrand27291e22014-01-23 12:26:52 +01001887 if (guestdbg_enabled(vcpu)) {
1888 kvm_s390_backup_guest_per_regs(vcpu);
1889 kvm_s390_patch_guest_per_regs(vcpu);
1890 }
1891
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001892 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001893 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1894 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1895 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001896
Thomas Huth3fb4c402013-09-12 10:33:43 +02001897 return 0;
1898}
1899
Thomas Huth492d8642015-02-10 16:11:01 +01001900static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1901{
1902 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1903 u8 opcode;
1904 int rc;
1905
1906 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1907 trace_kvm_s390_sie_fault(vcpu);
1908
1909 /*
1910 * We want to inject an addressing exception, which is defined as a
1911 * suppressing or terminating exception. However, since we came here
1912 * by a DAT access exception, the PSW still points to the faulting
1913 * instruction since DAT exceptions are nullifying. So we've got
1914 * to look up the current opcode to get the length of the instruction
1915 * to be able to forward the PSW.
1916 */
Alexander Yarygin8ae04b82015-01-19 13:24:51 +03001917 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
Thomas Huth492d8642015-02-10 16:11:01 +01001918 if (rc)
1919 return kvm_s390_inject_prog_cond(vcpu, rc);
1920 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1921
1922 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1923}
1924
Thomas Huth3fb4c402013-09-12 10:33:43 +02001925static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1926{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001927 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001928
1929 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1930 vcpu->arch.sie_block->icptcode);
1931 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1932
David Hildenbrand27291e22014-01-23 12:26:52 +01001933 if (guestdbg_enabled(vcpu))
1934 kvm_s390_restore_guest_per_regs(vcpu);
1935
Thomas Huth3fb4c402013-09-12 10:33:43 +02001936 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001937 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001938 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1939 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1940 vcpu->run->s390_ucontrol.trans_exc_code =
1941 current->thread.gmap_addr;
1942 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1943 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001944
1945 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001946 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001947 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001948 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001949 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001950 } else {
1951 gpa_t gpa = current->thread.gmap_addr;
1952 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1953 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001954 }
1955
Thomas Huth492d8642015-02-10 16:11:01 +01001956 if (rc == -1)
1957 rc = vcpu_post_run_fault_in_sie(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001958
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001959 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001960
Thomas Hutha76ccff2013-09-12 10:33:44 +02001961 if (rc == 0) {
1962 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001963 /* Don't exit for host interrupts. */
1964 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001965 else
1966 rc = kvm_handle_sie_intercept(vcpu);
1967 }
1968
Thomas Huth3fb4c402013-09-12 10:33:43 +02001969 return rc;
1970}
1971
1972static int __vcpu_run(struct kvm_vcpu *vcpu)
1973{
1974 int rc, exit_reason;
1975
Thomas Huth800c1062013-09-12 10:33:45 +02001976 /*
1977 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1978 * ning the guest), so that memslots (and other stuff) are protected
1979 */
1980 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1981
Thomas Hutha76ccff2013-09-12 10:33:44 +02001982 do {
1983 rc = vcpu_pre_run(vcpu);
1984 if (rc)
1985 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001986
Thomas Huth800c1062013-09-12 10:33:45 +02001987 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001988 /*
1989 * As PF_VCPU will be used in fault handler, between
1990 * guest_enter and guest_exit should be no uaccess.
1991 */
1992 preempt_disable();
1993 kvm_guest_enter();
1994 preempt_enable();
1995 exit_reason = sie64a(vcpu->arch.sie_block,
1996 vcpu->run->s.regs.gprs);
1997 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001998 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001999
Thomas Hutha76ccff2013-09-12 10:33:44 +02002000 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002001 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002002
Thomas Huth800c1062013-09-12 10:33:45 +02002003 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002004 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002005}
2006
David Hildenbrandb028ee32014-07-17 10:47:43 +02002007static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2008{
2009 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2010 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2011 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2012 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2013 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2014 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002015 /* some control register changes require a tlb flush */
2016 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002017 }
2018 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2019 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2020 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2021 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2022 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2023 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2024 }
2025 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2026 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2027 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2028 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002029 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2030 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002031 }
2032 kvm_run->kvm_dirty_regs = 0;
2033}
2034
2035static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2036{
2037 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2038 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2039 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2040 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2041 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2042 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2043 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2044 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2045 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2046 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2047 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2048 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2049}
2050
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002051int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2052{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002053 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002054 sigset_t sigsaved;
2055
David Hildenbrand27291e22014-01-23 12:26:52 +01002056 if (guestdbg_exit_pending(vcpu)) {
2057 kvm_s390_prepare_debug_exit(vcpu);
2058 return 0;
2059 }
2060
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002061 if (vcpu->sigset_active)
2062 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2063
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002064 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2065 kvm_s390_vcpu_start(vcpu);
2066 } else if (is_vcpu_stopped(vcpu)) {
2067 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
2068 vcpu->vcpu_id);
2069 return -EINVAL;
2070 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002071
David Hildenbrandb028ee32014-07-17 10:47:43 +02002072 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002073
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002074 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002075 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002076
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002077 if (signal_pending(current) && !rc) {
2078 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002079 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002080 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002081
David Hildenbrand27291e22014-01-23 12:26:52 +01002082 if (guestdbg_exit_pending(vcpu) && !rc) {
2083 kvm_s390_prepare_debug_exit(vcpu);
2084 rc = 0;
2085 }
2086
Heiko Carstensb8e660b2010-02-26 22:37:41 +01002087 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002088 /* intercept cannot be handled in-kernel, prepare kvm-run */
2089 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2090 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002091 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2092 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2093 rc = 0;
2094 }
2095
2096 if (rc == -EREMOTE) {
2097 /* intercept was handled, but userspace support is needed
2098 * kvm_run has been prepared by the handler */
2099 rc = 0;
2100 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002101
David Hildenbrandb028ee32014-07-17 10:47:43 +02002102 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002103
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002104 if (vcpu->sigset_active)
2105 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2106
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002107 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002108 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002109}
2110
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002111/*
2112 * store status at address
2113 * we use have two special cases:
2114 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2115 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2116 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002117int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002118{
Carsten Otte092670c2011-07-24 10:48:22 +02002119 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02002120 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01002121 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002122 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002123
Heiko Carstensd0bce602014-01-01 16:45:58 +01002124 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2125 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002126 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002127 gpa = SAVE_AREA_BASE;
2128 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2129 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002130 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002131 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2132 }
2133 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2134 vcpu->arch.guest_fpregs.fprs, 128);
2135 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2136 vcpu->run->s.regs.gprs, 128);
2137 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2138 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02002139 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002140 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02002141 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002142 rc |= write_guest_abs(vcpu,
2143 gpa + offsetof(struct save_area, fp_ctrl_reg),
2144 &vcpu->arch.guest_fpregs.fpc, 4);
2145 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2146 &vcpu->arch.sie_block->todpr, 4);
2147 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2148 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002149 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002150 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2151 &clkcomp, 8);
2152 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2153 &vcpu->run->s.regs.acrs, 64);
2154 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2155 &vcpu->arch.sie_block->gcr, 128);
2156 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002157}
2158
Thomas Huthe8798922013-11-06 15:46:33 +01002159int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2160{
2161 /*
2162 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2163 * copying in vcpu load/put. Lets update our copies before we save
2164 * it into the save area
2165 */
2166 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2167 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2168 save_access_regs(vcpu->run->s.regs.acrs);
2169
2170 return kvm_s390_store_status_unloaded(vcpu, addr);
2171}
2172
Eric Farmanbc17de72014-04-14 16:01:09 -04002173/*
2174 * store additional status at address
2175 */
2176int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2177 unsigned long gpa)
2178{
2179 /* Only bits 0-53 are used for address formation */
2180 if (!(gpa & ~0x3ff))
2181 return 0;
2182
2183 return write_guest_abs(vcpu, gpa & ~0x3ff,
2184 (void *)&vcpu->run->s.regs.vrs, 512);
2185}
2186
2187int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2188{
2189 if (!test_kvm_facility(vcpu->kvm, 129))
2190 return 0;
2191
2192 /*
2193 * The guest VXRS are in the host VXRs due to the lazy
2194 * copying in vcpu load/put. Let's update our copies before we save
2195 * it into the save area.
2196 */
2197 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2198
2199 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2200}
2201
David Hildenbrand8ad35752014-03-14 11:00:21 +01002202static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2203{
2204 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2205 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
2206 exit_sie_sync(vcpu);
2207}
2208
2209static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2210{
2211 unsigned int i;
2212 struct kvm_vcpu *vcpu;
2213
2214 kvm_for_each_vcpu(i, vcpu, kvm) {
2215 __disable_ibs_on_vcpu(vcpu);
2216 }
2217}
2218
2219static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2220{
2221 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2222 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2223 exit_sie_sync(vcpu);
2224}
2225
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002226void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2227{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002228 int i, online_vcpus, started_vcpus = 0;
2229
2230 if (!is_vcpu_stopped(vcpu))
2231 return;
2232
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002233 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002234 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002235 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002236 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2237
2238 for (i = 0; i < online_vcpus; i++) {
2239 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2240 started_vcpus++;
2241 }
2242
2243 if (started_vcpus == 0) {
2244 /* we're the only active VCPU -> speed it up */
2245 __enable_ibs_on_vcpu(vcpu);
2246 } else if (started_vcpus == 1) {
2247 /*
2248 * As we are starting a second VCPU, we have to disable
2249 * the IBS facility on all VCPUs to remove potentially
2250 * oustanding ENABLE requests.
2251 */
2252 __disable_ibs_on_all_vcpus(vcpu->kvm);
2253 }
2254
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002255 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002256 /*
2257 * Another VCPU might have used IBS while we were offline.
2258 * Let's play safe and flush the VCPU at startup.
2259 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002260 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002261 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002262 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002263}
2264
2265void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2266{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002267 int i, online_vcpus, started_vcpus = 0;
2268 struct kvm_vcpu *started_vcpu = NULL;
2269
2270 if (is_vcpu_stopped(vcpu))
2271 return;
2272
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002273 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002274 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002275 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002276 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2277
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002278 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002279 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002280
David Hildenbrand6cddd432014-10-15 16:48:53 +02002281 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002282 __disable_ibs_on_vcpu(vcpu);
2283
2284 for (i = 0; i < online_vcpus; i++) {
2285 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2286 started_vcpus++;
2287 started_vcpu = vcpu->kvm->vcpus[i];
2288 }
2289 }
2290
2291 if (started_vcpus == 1) {
2292 /*
2293 * As we only have one VCPU left, we want to enable the
2294 * IBS facility for that VCPU to speed it up.
2295 */
2296 __enable_ibs_on_vcpu(started_vcpu);
2297 }
2298
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002299 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002300 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002301}
2302
Cornelia Huckd6712df2012-12-20 15:32:11 +01002303static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2304 struct kvm_enable_cap *cap)
2305{
2306 int r;
2307
2308 if (cap->flags)
2309 return -EINVAL;
2310
2311 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002312 case KVM_CAP_S390_CSS_SUPPORT:
2313 if (!vcpu->kvm->arch.css_support) {
2314 vcpu->kvm->arch.css_support = 1;
2315 trace_kvm_s390_enable_css(vcpu->kvm);
2316 }
2317 r = 0;
2318 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002319 default:
2320 r = -EINVAL;
2321 break;
2322 }
2323 return r;
2324}
2325
Thomas Huth41408c282015-02-06 15:01:21 +01002326static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2327 struct kvm_s390_mem_op *mop)
2328{
2329 void __user *uaddr = (void __user *)mop->buf;
2330 void *tmpbuf = NULL;
2331 int r, srcu_idx;
2332 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2333 | KVM_S390_MEMOP_F_CHECK_ONLY;
2334
2335 if (mop->flags & ~supported_flags)
2336 return -EINVAL;
2337
2338 if (mop->size > MEM_OP_MAX_SIZE)
2339 return -E2BIG;
2340
2341 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2342 tmpbuf = vmalloc(mop->size);
2343 if (!tmpbuf)
2344 return -ENOMEM;
2345 }
2346
2347 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2348
2349 switch (mop->op) {
2350 case KVM_S390_MEMOP_LOGICAL_READ:
2351 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2352 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2353 break;
2354 }
2355 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2356 if (r == 0) {
2357 if (copy_to_user(uaddr, tmpbuf, mop->size))
2358 r = -EFAULT;
2359 }
2360 break;
2361 case KVM_S390_MEMOP_LOGICAL_WRITE:
2362 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2363 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2364 break;
2365 }
2366 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2367 r = -EFAULT;
2368 break;
2369 }
2370 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2371 break;
2372 default:
2373 r = -EINVAL;
2374 }
2375
2376 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2377
2378 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2379 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2380
2381 vfree(tmpbuf);
2382 return r;
2383}
2384
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002385long kvm_arch_vcpu_ioctl(struct file *filp,
2386 unsigned int ioctl, unsigned long arg)
2387{
2388 struct kvm_vcpu *vcpu = filp->private_data;
2389 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002390 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002391 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002392
Avi Kivity937366242010-05-13 12:35:17 +03002393 switch (ioctl) {
2394 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002395 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002396 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002397
Avi Kivity937366242010-05-13 12:35:17 +03002398 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002399 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03002400 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002401 if (s390int_to_s390irq(&s390int, &s390irq))
2402 return -EINVAL;
2403 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03002404 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002405 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002406 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002407 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002408 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002409 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002410 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002411 case KVM_S390_SET_INITIAL_PSW: {
2412 psw_t psw;
2413
Avi Kivitybc923cc2010-05-13 12:21:46 +03002414 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002415 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002416 break;
2417 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2418 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002419 }
2420 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002421 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2422 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002423 case KVM_SET_ONE_REG:
2424 case KVM_GET_ONE_REG: {
2425 struct kvm_one_reg reg;
2426 r = -EFAULT;
2427 if (copy_from_user(&reg, argp, sizeof(reg)))
2428 break;
2429 if (ioctl == KVM_SET_ONE_REG)
2430 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2431 else
2432 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2433 break;
2434 }
Carsten Otte27e03932012-01-04 10:25:21 +01002435#ifdef CONFIG_KVM_S390_UCONTROL
2436 case KVM_S390_UCAS_MAP: {
2437 struct kvm_s390_ucas_mapping ucasmap;
2438
2439 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2440 r = -EFAULT;
2441 break;
2442 }
2443
2444 if (!kvm_is_ucontrol(vcpu->kvm)) {
2445 r = -EINVAL;
2446 break;
2447 }
2448
2449 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2450 ucasmap.vcpu_addr, ucasmap.length);
2451 break;
2452 }
2453 case KVM_S390_UCAS_UNMAP: {
2454 struct kvm_s390_ucas_mapping ucasmap;
2455
2456 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2457 r = -EFAULT;
2458 break;
2459 }
2460
2461 if (!kvm_is_ucontrol(vcpu->kvm)) {
2462 r = -EINVAL;
2463 break;
2464 }
2465
2466 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2467 ucasmap.length);
2468 break;
2469 }
2470#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002471 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002472 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002473 break;
2474 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002475 case KVM_ENABLE_CAP:
2476 {
2477 struct kvm_enable_cap cap;
2478 r = -EFAULT;
2479 if (copy_from_user(&cap, argp, sizeof(cap)))
2480 break;
2481 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2482 break;
2483 }
Thomas Huth41408c282015-02-06 15:01:21 +01002484 case KVM_S390_MEM_OP: {
2485 struct kvm_s390_mem_op mem_op;
2486
2487 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2488 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2489 else
2490 r = -EFAULT;
2491 break;
2492 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002493 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002494 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002495 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002496 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002497}
2498
Carsten Otte5b1c1492012-01-04 10:25:23 +01002499int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2500{
2501#ifdef CONFIG_KVM_S390_UCONTROL
2502 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2503 && (kvm_is_ucontrol(vcpu->kvm))) {
2504 vmf->page = virt_to_page(vcpu->arch.sie_block);
2505 get_page(vmf->page);
2506 return 0;
2507 }
2508#endif
2509 return VM_FAULT_SIGBUS;
2510}
2511
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302512int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2513 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002514{
2515 return 0;
2516}
2517
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002518/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002519int kvm_arch_prepare_memory_region(struct kvm *kvm,
2520 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002521 struct kvm_userspace_memory_region *mem,
2522 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002523{
Nick Wangdd2887e2013-03-25 17:22:57 +01002524 /* A few sanity checks. We can have memory slots which have to be
2525 located/ended at a segment boundary (1MB). The memory in userland is
2526 ok to be fragmented into various different vmas. It is okay to mmap()
2527 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002528
Carsten Otte598841c2011-07-24 10:48:21 +02002529 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002530 return -EINVAL;
2531
Carsten Otte598841c2011-07-24 10:48:21 +02002532 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002533 return -EINVAL;
2534
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002535 return 0;
2536}
2537
2538void kvm_arch_commit_memory_region(struct kvm *kvm,
2539 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002540 const struct kvm_memory_slot *old,
2541 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002542{
Carsten Ottef7850c92011-07-24 10:48:23 +02002543 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002544
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002545 /* If the basics of the memslot do not change, we do not want
2546 * to update the gmap. Every update causes several unnecessary
2547 * segment translation exceptions. This is usually handled just
2548 * fine by the normal fault handler + gmap, but it will also
2549 * cause faults on the prefix page of running guest CPUs.
2550 */
2551 if (old->userspace_addr == mem->userspace_addr &&
2552 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2553 old->npages * PAGE_SIZE == mem->memory_size)
2554 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002555
2556 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2557 mem->guest_phys_addr, mem->memory_size);
2558 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02002559 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002560 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002561}
2562
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002563static int __init kvm_s390_init(void)
2564{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002565 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002566}
2567
2568static void __exit kvm_s390_exit(void)
2569{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002570 kvm_exit();
2571}
2572
2573module_init(kvm_s390_init);
2574module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002575
2576/*
2577 * Enable autoloading of the kvm module.
2578 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2579 * since x86 takes a different approach.
2580 */
2581#include <linux/miscdevice.h>
2582MODULE_ALIAS_MISCDEV(KVM_MINOR);
2583MODULE_ALIAS("devname:kvm");