blob: 3acf08ba88e4ffecce9b0dbaa32f438d999543ae [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010028#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010029#include <asm/lowcore.h>
30#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010031#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010032#include <asm/switch_to.h>
Michael Mueller78c4b59f2013-07-26 15:04:04 +020033#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020034#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010035#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include "gaccess.h"
37
Cornelia Huck5786fff2012-07-23 17:20:29 +020038#define CREATE_TRACE_POINTS
39#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020040#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020041
Heiko Carstensb0c632d2008-03-25 18:47:20 +010042#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43
44struct kvm_stats_debugfs_item debugfs_entries[] = {
45 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020046 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047 { "exit_validity", VCPU_STAT(exit_validity) },
48 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
49 { "exit_external_request", VCPU_STAT(exit_external_request) },
50 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010051 { "exit_instruction", VCPU_STAT(exit_instruction) },
52 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
53 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010054 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020055 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020056 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010057 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010058 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
59 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010060 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020061 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010062 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
63 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
64 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
65 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
66 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
67 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
68 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020069 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010070 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
71 { "instruction_spx", VCPU_STAT(instruction_spx) },
72 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
73 { "instruction_stap", VCPU_STAT(instruction_stap) },
74 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010075 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010076 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
77 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020078 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010079 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
80 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020081 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010082 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010083 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020084 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010085 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020086 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
87 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010088 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020089 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
90 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010091 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
92 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
93 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020094 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
95 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
96 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010097 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010098 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020099 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100100 { NULL }
101};
102
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200103unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200104static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100105
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200106/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +0100107int test_vfacility(unsigned long nr)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200108{
109 return __test_facility(nr, (void *) vfacilities);
110}
111
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100112/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200113int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100114{
115 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200116 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100117}
118
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200119static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
120
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121int kvm_arch_hardware_setup(void)
122{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200123 gmap_notifier.notifier_call = kvm_gmap_notifier;
124 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100125 return 0;
126}
127
128void kvm_arch_hardware_unsetup(void)
129{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200130 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100131}
132
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100133int kvm_arch_init(void *opaque)
134{
Cornelia Huck84877d92014-09-02 10:27:35 +0100135 /* Register floating interrupt controller interface. */
136 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100137}
138
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100139/* Section: device related */
140long kvm_arch_dev_ioctl(struct file *filp,
141 unsigned int ioctl, unsigned long arg)
142{
143 if (ioctl == KVM_S390_ENABLE_SIE)
144 return s390_enable_sie();
145 return -EINVAL;
146}
147
Alexander Graf784aa3d2014-07-14 18:27:35 +0200148int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100149{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100150 int r;
151
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200152 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100153 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200154 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100155 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100156#ifdef CONFIG_KVM_S390_UCONTROL
157 case KVM_CAP_S390_UCONTROL:
158#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200159 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100160 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200161 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100162 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100163 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200164 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100165 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200166 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200167 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200168 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200169 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200170 case KVM_CAP_MP_STATE:
David Hildenbrand2444b352014-10-09 14:10:13 +0200171 case KVM_CAP_S390_USER_SIGP:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100172 r = 1;
173 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200174 case KVM_CAP_NR_VCPUS:
175 case KVM_CAP_MAX_VCPUS:
176 r = KVM_MAX_VCPUS;
177 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100178 case KVM_CAP_NR_MEMSLOTS:
179 r = KVM_USER_MEM_SLOTS;
180 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200181 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100182 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200183 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200184 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100185 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200186 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100187 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100188}
189
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400190static void kvm_s390_sync_dirty_log(struct kvm *kvm,
191 struct kvm_memory_slot *memslot)
192{
193 gfn_t cur_gfn, last_gfn;
194 unsigned long address;
195 struct gmap *gmap = kvm->arch.gmap;
196
197 down_read(&gmap->mm->mmap_sem);
198 /* Loop over all guest pages */
199 last_gfn = memslot->base_gfn + memslot->npages;
200 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
201 address = gfn_to_hva_memslot(memslot, cur_gfn);
202
203 if (gmap_test_and_clear_dirty(address, gmap))
204 mark_page_dirty(kvm, cur_gfn);
205 }
206 up_read(&gmap->mm->mmap_sem);
207}
208
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100209/* Section: vm related */
210/*
211 * Get (and clear) the dirty memory log for a memory slot.
212 */
213int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
214 struct kvm_dirty_log *log)
215{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400216 int r;
217 unsigned long n;
218 struct kvm_memory_slot *memslot;
219 int is_dirty = 0;
220
221 mutex_lock(&kvm->slots_lock);
222
223 r = -EINVAL;
224 if (log->slot >= KVM_USER_MEM_SLOTS)
225 goto out;
226
227 memslot = id_to_memslot(kvm->memslots, log->slot);
228 r = -ENOENT;
229 if (!memslot->dirty_bitmap)
230 goto out;
231
232 kvm_s390_sync_dirty_log(kvm, memslot);
233 r = kvm_get_dirty_log(kvm, log, &is_dirty);
234 if (r)
235 goto out;
236
237 /* Clear the dirty log */
238 if (is_dirty) {
239 n = kvm_dirty_bitmap_bytes(memslot);
240 memset(memslot->dirty_bitmap, 0, n);
241 }
242 r = 0;
243out:
244 mutex_unlock(&kvm->slots_lock);
245 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100246}
247
Cornelia Huckd938dc52013-10-23 18:26:34 +0200248static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
249{
250 int r;
251
252 if (cap->flags)
253 return -EINVAL;
254
255 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200256 case KVM_CAP_S390_IRQCHIP:
257 kvm->arch.use_irqchip = 1;
258 r = 0;
259 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200260 case KVM_CAP_S390_USER_SIGP:
261 kvm->arch.user_sigp = 1;
262 r = 0;
263 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200264 default:
265 r = -EINVAL;
266 break;
267 }
268 return r;
269}
270
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100271static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
272{
273 int ret;
274
275 switch (attr->attr) {
276 case KVM_S390_VM_MEM_LIMIT_SIZE:
277 ret = 0;
278 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
279 ret = -EFAULT;
280 break;
281 default:
282 ret = -ENXIO;
283 break;
284 }
285 return ret;
286}
287
288static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200289{
290 int ret;
291 unsigned int idx;
292 switch (attr->attr) {
293 case KVM_S390_VM_MEM_ENABLE_CMMA:
294 ret = -EBUSY;
295 mutex_lock(&kvm->lock);
296 if (atomic_read(&kvm->online_vcpus) == 0) {
297 kvm->arch.use_cmma = 1;
298 ret = 0;
299 }
300 mutex_unlock(&kvm->lock);
301 break;
302 case KVM_S390_VM_MEM_CLR_CMMA:
303 mutex_lock(&kvm->lock);
304 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200305 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200306 srcu_read_unlock(&kvm->srcu, idx);
307 mutex_unlock(&kvm->lock);
308 ret = 0;
309 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100310 case KVM_S390_VM_MEM_LIMIT_SIZE: {
311 unsigned long new_limit;
312
313 if (kvm_is_ucontrol(kvm))
314 return -EINVAL;
315
316 if (get_user(new_limit, (u64 __user *)attr->addr))
317 return -EFAULT;
318
319 if (new_limit > kvm->arch.gmap->asce_end)
320 return -E2BIG;
321
322 ret = -EBUSY;
323 mutex_lock(&kvm->lock);
324 if (atomic_read(&kvm->online_vcpus) == 0) {
325 /* gmap_alloc will round the limit up */
326 struct gmap *new = gmap_alloc(current->mm, new_limit);
327
328 if (!new) {
329 ret = -ENOMEM;
330 } else {
331 gmap_free(kvm->arch.gmap);
332 new->private = kvm;
333 kvm->arch.gmap = new;
334 ret = 0;
335 }
336 }
337 mutex_unlock(&kvm->lock);
338 break;
339 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200340 default:
341 ret = -ENXIO;
342 break;
343 }
344 return ret;
345}
346
Tony Krowiaka374e892014-09-03 10:13:53 +0200347static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
348
349static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
350{
351 struct kvm_vcpu *vcpu;
352 int i;
353
354 if (!test_vfacility(76))
355 return -EINVAL;
356
357 mutex_lock(&kvm->lock);
358 switch (attr->attr) {
359 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
360 get_random_bytes(
361 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
362 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
363 kvm->arch.crypto.aes_kw = 1;
364 break;
365 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
366 get_random_bytes(
367 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
368 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
369 kvm->arch.crypto.dea_kw = 1;
370 break;
371 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
372 kvm->arch.crypto.aes_kw = 0;
373 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
374 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
375 break;
376 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
377 kvm->arch.crypto.dea_kw = 0;
378 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
379 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
380 break;
381 default:
382 mutex_unlock(&kvm->lock);
383 return -ENXIO;
384 }
385
386 kvm_for_each_vcpu(i, vcpu, kvm) {
387 kvm_s390_vcpu_crypto_setup(vcpu);
388 exit_sie(vcpu);
389 }
390 mutex_unlock(&kvm->lock);
391 return 0;
392}
393
Jason J. Herne72f25022014-11-25 09:46:02 -0500394static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
395{
396 u8 gtod_high;
397
398 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
399 sizeof(gtod_high)))
400 return -EFAULT;
401
402 if (gtod_high != 0)
403 return -EINVAL;
404
405 return 0;
406}
407
408static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
409{
410 struct kvm_vcpu *cur_vcpu;
411 unsigned int vcpu_idx;
412 u64 host_tod, gtod;
413 int r;
414
415 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
416 return -EFAULT;
417
418 r = store_tod_clock(&host_tod);
419 if (r)
420 return r;
421
422 mutex_lock(&kvm->lock);
423 kvm->arch.epoch = gtod - host_tod;
424 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
425 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
426 exit_sie(cur_vcpu);
427 }
428 mutex_unlock(&kvm->lock);
429 return 0;
430}
431
432static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
433{
434 int ret;
435
436 if (attr->flags)
437 return -EINVAL;
438
439 switch (attr->attr) {
440 case KVM_S390_VM_TOD_HIGH:
441 ret = kvm_s390_set_tod_high(kvm, attr);
442 break;
443 case KVM_S390_VM_TOD_LOW:
444 ret = kvm_s390_set_tod_low(kvm, attr);
445 break;
446 default:
447 ret = -ENXIO;
448 break;
449 }
450 return ret;
451}
452
453static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
454{
455 u8 gtod_high = 0;
456
457 if (copy_to_user((void __user *)attr->addr, &gtod_high,
458 sizeof(gtod_high)))
459 return -EFAULT;
460
461 return 0;
462}
463
464static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
465{
466 u64 host_tod, gtod;
467 int r;
468
469 r = store_tod_clock(&host_tod);
470 if (r)
471 return r;
472
473 gtod = host_tod + kvm->arch.epoch;
474 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
475 return -EFAULT;
476
477 return 0;
478}
479
480static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
481{
482 int ret;
483
484 if (attr->flags)
485 return -EINVAL;
486
487 switch (attr->attr) {
488 case KVM_S390_VM_TOD_HIGH:
489 ret = kvm_s390_get_tod_high(kvm, attr);
490 break;
491 case KVM_S390_VM_TOD_LOW:
492 ret = kvm_s390_get_tod_low(kvm, attr);
493 break;
494 default:
495 ret = -ENXIO;
496 break;
497 }
498 return ret;
499}
500
Dominik Dingelf2061652014-04-09 13:13:00 +0200501static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
502{
503 int ret;
504
505 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200506 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100507 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200508 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500509 case KVM_S390_VM_TOD:
510 ret = kvm_s390_set_tod(kvm, attr);
511 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200512 case KVM_S390_VM_CRYPTO:
513 ret = kvm_s390_vm_set_crypto(kvm, attr);
514 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200515 default:
516 ret = -ENXIO;
517 break;
518 }
519
520 return ret;
521}
522
523static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
524{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100525 int ret;
526
527 switch (attr->group) {
528 case KVM_S390_VM_MEM_CTRL:
529 ret = kvm_s390_get_mem_control(kvm, attr);
530 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500531 case KVM_S390_VM_TOD:
532 ret = kvm_s390_get_tod(kvm, attr);
533 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100534 default:
535 ret = -ENXIO;
536 break;
537 }
538
539 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200540}
541
542static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
543{
544 int ret;
545
546 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200547 case KVM_S390_VM_MEM_CTRL:
548 switch (attr->attr) {
549 case KVM_S390_VM_MEM_ENABLE_CMMA:
550 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100551 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200552 ret = 0;
553 break;
554 default:
555 ret = -ENXIO;
556 break;
557 }
558 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500559 case KVM_S390_VM_TOD:
560 switch (attr->attr) {
561 case KVM_S390_VM_TOD_LOW:
562 case KVM_S390_VM_TOD_HIGH:
563 ret = 0;
564 break;
565 default:
566 ret = -ENXIO;
567 break;
568 }
569 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200570 case KVM_S390_VM_CRYPTO:
571 switch (attr->attr) {
572 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
573 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
574 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
575 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
576 ret = 0;
577 break;
578 default:
579 ret = -ENXIO;
580 break;
581 }
582 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200583 default:
584 ret = -ENXIO;
585 break;
586 }
587
588 return ret;
589}
590
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100591long kvm_arch_vm_ioctl(struct file *filp,
592 unsigned int ioctl, unsigned long arg)
593{
594 struct kvm *kvm = filp->private_data;
595 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200596 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100597 int r;
598
599 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100600 case KVM_S390_INTERRUPT: {
601 struct kvm_s390_interrupt s390int;
602
603 r = -EFAULT;
604 if (copy_from_user(&s390int, argp, sizeof(s390int)))
605 break;
606 r = kvm_s390_inject_vm(kvm, &s390int);
607 break;
608 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200609 case KVM_ENABLE_CAP: {
610 struct kvm_enable_cap cap;
611 r = -EFAULT;
612 if (copy_from_user(&cap, argp, sizeof(cap)))
613 break;
614 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
615 break;
616 }
Cornelia Huck84223592013-07-15 13:36:01 +0200617 case KVM_CREATE_IRQCHIP: {
618 struct kvm_irq_routing_entry routing;
619
620 r = -EINVAL;
621 if (kvm->arch.use_irqchip) {
622 /* Set up dummy routing. */
623 memset(&routing, 0, sizeof(routing));
624 kvm_set_irq_routing(kvm, &routing, 0, 0);
625 r = 0;
626 }
627 break;
628 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200629 case KVM_SET_DEVICE_ATTR: {
630 r = -EFAULT;
631 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
632 break;
633 r = kvm_s390_vm_set_attr(kvm, &attr);
634 break;
635 }
636 case KVM_GET_DEVICE_ATTR: {
637 r = -EFAULT;
638 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
639 break;
640 r = kvm_s390_vm_get_attr(kvm, &attr);
641 break;
642 }
643 case KVM_HAS_DEVICE_ATTR: {
644 r = -EFAULT;
645 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
646 break;
647 r = kvm_s390_vm_has_attr(kvm, &attr);
648 break;
649 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100650 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300651 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100652 }
653
654 return r;
655}
656
Tony Krowiak5102ee82014-06-27 14:46:01 -0400657static int kvm_s390_crypto_init(struct kvm *kvm)
658{
659 if (!test_vfacility(76))
660 return 0;
661
662 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
663 GFP_KERNEL | GFP_DMA);
664 if (!kvm->arch.crypto.crycb)
665 return -ENOMEM;
666
667 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
668 CRYCB_FORMAT1;
669
Tony Krowiaka374e892014-09-03 10:13:53 +0200670 /* Disable AES/DEA protected key functions by default */
671 kvm->arch.crypto.aes_kw = 0;
672 kvm->arch.crypto.dea_kw = 0;
673
Tony Krowiak5102ee82014-06-27 14:46:01 -0400674 return 0;
675}
676
Carsten Ottee08b9632012-01-04 10:25:20 +0100677int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100678{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100679 int rc;
680 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100681 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100682
Carsten Ottee08b9632012-01-04 10:25:20 +0100683 rc = -EINVAL;
684#ifdef CONFIG_KVM_S390_UCONTROL
685 if (type & ~KVM_VM_S390_UCONTROL)
686 goto out_err;
687 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
688 goto out_err;
689#else
690 if (type)
691 goto out_err;
692#endif
693
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100694 rc = s390_enable_sie();
695 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100696 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100697
Carsten Otteb2904112011-10-18 12:27:13 +0200698 rc = -ENOMEM;
699
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100700 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
701 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100702 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100703 spin_lock(&kvm_lock);
704 sca_offset = (sca_offset + 16) & 0x7f0;
705 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
706 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100707
708 sprintf(debug_name, "kvm-%u", current->pid);
709
710 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
711 if (!kvm->arch.dbf)
712 goto out_nodbf;
713
Tony Krowiak5102ee82014-06-27 14:46:01 -0400714 if (kvm_s390_crypto_init(kvm) < 0)
715 goto out_crypto;
716
Carsten Otteba5c1e92008-03-25 18:47:26 +0100717 spin_lock_init(&kvm->arch.float_int.lock);
718 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100719 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +0200720 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100721
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100722 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
723 VM_EVENT(kvm, 3, "%s", "vm created");
724
Carsten Ottee08b9632012-01-04 10:25:20 +0100725 if (type & KVM_VM_S390_UCONTROL) {
726 kvm->arch.gmap = NULL;
727 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +0200728 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +0100729 if (!kvm->arch.gmap)
730 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200731 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200732 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100733 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100734
735 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200736 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -0500737 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100738
David Hildenbrand8ad35752014-03-14 11:00:21 +0100739 spin_lock_init(&kvm->arch.start_stop_lock);
740
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100741 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200742out_nogmap:
Tony Krowiak5102ee82014-06-27 14:46:01 -0400743 kfree(kvm->arch.crypto.crycb);
744out_crypto:
Carsten Otte598841c2011-07-24 10:48:21 +0200745 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100746out_nodbf:
747 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100748out_err:
749 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100750}
751
Christian Borntraegerd329c032008-11-26 14:50:27 +0100752void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
753{
754 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200755 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100756 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200757 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100758 if (!kvm_is_ucontrol(vcpu->kvm)) {
759 clear_bit(63 - vcpu->vcpu_id,
760 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
761 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
762 (__u64) vcpu->arch.sie_block)
763 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
764 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200765 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100766
767 if (kvm_is_ucontrol(vcpu->kvm))
768 gmap_free(vcpu->arch.gmap);
769
Dominik Dingelb31605c2014-03-25 13:47:11 +0100770 if (kvm_s390_cmma_enabled(vcpu->kvm))
771 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100772 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200773
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100774 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200775 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100776}
777
778static void kvm_free_vcpus(struct kvm *kvm)
779{
780 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300781 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100782
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300783 kvm_for_each_vcpu(i, vcpu, kvm)
784 kvm_arch_vcpu_destroy(vcpu);
785
786 mutex_lock(&kvm->lock);
787 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
788 kvm->vcpus[i] = NULL;
789
790 atomic_set(&kvm->online_vcpus, 0);
791 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100792}
793
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100794void kvm_arch_destroy_vm(struct kvm *kvm)
795{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100796 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100797 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100798 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400799 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +0100800 if (!kvm_is_ucontrol(kvm))
801 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200802 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100803 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100804}
805
806/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +0100807static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
808{
809 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
810 if (!vcpu->arch.gmap)
811 return -ENOMEM;
812 vcpu->arch.gmap->private = vcpu->kvm;
813
814 return 0;
815}
816
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100817int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
818{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200819 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
820 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100821 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
822 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100823 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +0200824 KVM_SYNC_CRS |
825 KVM_SYNC_ARCH0 |
826 KVM_SYNC_PFAULT;
Dominik Dingeldafd0322014-12-02 16:53:21 +0100827
828 if (kvm_is_ucontrol(vcpu->kvm))
829 return __kvm_ucontrol_vcpu_init(vcpu);
830
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100831 return 0;
832}
833
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100834void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
835{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200836 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
837 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100838 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200839 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
840 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100841 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200842 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100843 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100844}
845
846void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
847{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100848 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200849 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200850 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
851 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100852 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200853 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
854 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100855 restore_access_regs(vcpu->arch.host_acrs);
856}
857
858static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
859{
860 /* this equals initial cpu reset in pop, but we don't switch to ESA */
861 vcpu->arch.sie_block->gpsw.mask = 0UL;
862 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100863 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100864 vcpu->arch.sie_block->cputm = 0UL;
865 vcpu->arch.sie_block->ckc = 0UL;
866 vcpu->arch.sie_block->todpr = 0;
867 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
868 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
869 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
870 vcpu->arch.guest_fpregs.fpc = 0;
871 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
872 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100873 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200874 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
875 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200876 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
877 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100878 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100879}
880
Dominik Dingel31928aa2014-12-04 15:47:07 +0100881void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200882{
Jason J. Herne72f25022014-11-25 09:46:02 -0500883 mutex_lock(&vcpu->kvm->lock);
884 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
885 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +0100886 if (!kvm_is_ucontrol(vcpu->kvm))
887 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200888}
889
Tony Krowiak5102ee82014-06-27 14:46:01 -0400890static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
891{
892 if (!test_vfacility(76))
893 return;
894
Tony Krowiaka374e892014-09-03 10:13:53 +0200895 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
896
897 if (vcpu->kvm->arch.crypto.aes_kw)
898 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
899 if (vcpu->kvm->arch.crypto.dea_kw)
900 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
901
Tony Krowiak5102ee82014-06-27 14:46:01 -0400902 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
903}
904
Dominik Dingelb31605c2014-03-25 13:47:11 +0100905void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
906{
907 free_page(vcpu->arch.sie_block->cbrlo);
908 vcpu->arch.sie_block->cbrlo = 0;
909}
910
911int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
912{
913 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
914 if (!vcpu->arch.sie_block->cbrlo)
915 return -ENOMEM;
916
917 vcpu->arch.sie_block->ecb2 |= 0x80;
918 vcpu->arch.sie_block->ecb2 &= ~0x08;
919 return 0;
920}
921
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100922int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
923{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100924 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200925
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100926 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
927 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200928 CPUSTAT_STOPPED |
929 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200930 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200931 if (test_vfacility(50) && test_vfacility(73))
932 vcpu->arch.sie_block->ecb |= 0x10;
933
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200934 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +0200935 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +0100936 if (sclp_has_siif())
937 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +0200938 if (sclp_has_sigpif())
939 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200940 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Matthew Rosato5a5e6532013-01-29 11:48:20 -0500941 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
942 ICTL_TPROT;
943
Dominik Dingelb31605c2014-03-25 13:47:11 +0100944 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
945 rc = kvm_s390_vcpu_setup_cmma(vcpu);
946 if (rc)
947 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200948 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +0100949 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +0200950 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100951 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100952 vcpu->arch.cpu_id.version = 0xff;
Tony Krowiak5102ee82014-06-27 14:46:01 -0400953
954 kvm_s390_vcpu_crypto_setup(vcpu);
955
Dominik Dingelb31605c2014-03-25 13:47:11 +0100956 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100957}
958
959struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
960 unsigned int id)
961{
Carsten Otte4d475552011-10-18 12:27:12 +0200962 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200963 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200964 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100965
Carsten Otte4d475552011-10-18 12:27:12 +0200966 if (id >= KVM_MAX_VCPUS)
967 goto out;
968
969 rc = -ENOMEM;
970
Michael Muellerb110fea2013-06-12 13:54:54 +0200971 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100972 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200973 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100974
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200975 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
976 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100977 goto out_free_cpu;
978
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200979 vcpu->arch.sie_block = &sie_page->sie_block;
980 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
981
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100982 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100983 if (!kvm_is_ucontrol(kvm)) {
984 if (!kvm->arch.sca) {
985 WARN_ON_ONCE(1);
986 goto out_free_cpu;
987 }
988 if (!kvm->arch.sca->cpu[id].sda)
989 kvm->arch.sca->cpu[id].sda =
990 (__u64) vcpu->arch.sie_block;
991 vcpu->arch.sie_block->scaoh =
992 (__u32)(((__u64)kvm->arch.sca) >> 32);
993 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
994 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
995 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100996
Carsten Otteba5c1e92008-03-25 18:47:26 +0100997 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100998 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200999 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001000 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001001
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001002 rc = kvm_vcpu_init(vcpu, kvm, id);
1003 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001004 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001005 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1006 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001007 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001008
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001009 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001010out_free_sie_block:
1011 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001012out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001013 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001014out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001015 return ERR_PTR(rc);
1016}
1017
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001018int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1019{
David Hildenbrand9a022062014-08-05 17:40:47 +02001020 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001021}
1022
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001023void s390_vcpu_block(struct kvm_vcpu *vcpu)
1024{
1025 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1026}
1027
1028void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1029{
1030 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1031}
1032
1033/*
1034 * Kick a guest cpu out of SIE and wait until SIE is not running.
1035 * If the CPU is not running (e.g. waiting as idle) the function will
1036 * return immediately. */
1037void exit_sie(struct kvm_vcpu *vcpu)
1038{
1039 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1040 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1041 cpu_relax();
1042}
1043
1044/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1045void exit_sie_sync(struct kvm_vcpu *vcpu)
1046{
1047 s390_vcpu_block(vcpu);
1048 exit_sie(vcpu);
1049}
1050
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001051static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1052{
1053 int i;
1054 struct kvm *kvm = gmap->private;
1055 struct kvm_vcpu *vcpu;
1056
1057 kvm_for_each_vcpu(i, vcpu, kvm) {
1058 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001059 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001060 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1061 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1062 exit_sie_sync(vcpu);
1063 }
1064 }
1065}
1066
Christoffer Dallb6d33832012-03-08 16:44:24 -05001067int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1068{
1069 /* kvm common code refers to this, but never calls it */
1070 BUG();
1071 return 0;
1072}
1073
Carsten Otte14eebd92012-05-15 14:15:26 +02001074static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1075 struct kvm_one_reg *reg)
1076{
1077 int r = -EINVAL;
1078
1079 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001080 case KVM_REG_S390_TODPR:
1081 r = put_user(vcpu->arch.sie_block->todpr,
1082 (u32 __user *)reg->addr);
1083 break;
1084 case KVM_REG_S390_EPOCHDIFF:
1085 r = put_user(vcpu->arch.sie_block->epoch,
1086 (u64 __user *)reg->addr);
1087 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001088 case KVM_REG_S390_CPU_TIMER:
1089 r = put_user(vcpu->arch.sie_block->cputm,
1090 (u64 __user *)reg->addr);
1091 break;
1092 case KVM_REG_S390_CLOCK_COMP:
1093 r = put_user(vcpu->arch.sie_block->ckc,
1094 (u64 __user *)reg->addr);
1095 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001096 case KVM_REG_S390_PFTOKEN:
1097 r = put_user(vcpu->arch.pfault_token,
1098 (u64 __user *)reg->addr);
1099 break;
1100 case KVM_REG_S390_PFCOMPARE:
1101 r = put_user(vcpu->arch.pfault_compare,
1102 (u64 __user *)reg->addr);
1103 break;
1104 case KVM_REG_S390_PFSELECT:
1105 r = put_user(vcpu->arch.pfault_select,
1106 (u64 __user *)reg->addr);
1107 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001108 case KVM_REG_S390_PP:
1109 r = put_user(vcpu->arch.sie_block->pp,
1110 (u64 __user *)reg->addr);
1111 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001112 case KVM_REG_S390_GBEA:
1113 r = put_user(vcpu->arch.sie_block->gbea,
1114 (u64 __user *)reg->addr);
1115 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001116 default:
1117 break;
1118 }
1119
1120 return r;
1121}
1122
1123static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1124 struct kvm_one_reg *reg)
1125{
1126 int r = -EINVAL;
1127
1128 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001129 case KVM_REG_S390_TODPR:
1130 r = get_user(vcpu->arch.sie_block->todpr,
1131 (u32 __user *)reg->addr);
1132 break;
1133 case KVM_REG_S390_EPOCHDIFF:
1134 r = get_user(vcpu->arch.sie_block->epoch,
1135 (u64 __user *)reg->addr);
1136 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001137 case KVM_REG_S390_CPU_TIMER:
1138 r = get_user(vcpu->arch.sie_block->cputm,
1139 (u64 __user *)reg->addr);
1140 break;
1141 case KVM_REG_S390_CLOCK_COMP:
1142 r = get_user(vcpu->arch.sie_block->ckc,
1143 (u64 __user *)reg->addr);
1144 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001145 case KVM_REG_S390_PFTOKEN:
1146 r = get_user(vcpu->arch.pfault_token,
1147 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001148 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1149 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001150 break;
1151 case KVM_REG_S390_PFCOMPARE:
1152 r = get_user(vcpu->arch.pfault_compare,
1153 (u64 __user *)reg->addr);
1154 break;
1155 case KVM_REG_S390_PFSELECT:
1156 r = get_user(vcpu->arch.pfault_select,
1157 (u64 __user *)reg->addr);
1158 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001159 case KVM_REG_S390_PP:
1160 r = get_user(vcpu->arch.sie_block->pp,
1161 (u64 __user *)reg->addr);
1162 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001163 case KVM_REG_S390_GBEA:
1164 r = get_user(vcpu->arch.sie_block->gbea,
1165 (u64 __user *)reg->addr);
1166 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001167 default:
1168 break;
1169 }
1170
1171 return r;
1172}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001173
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001174static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1175{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001176 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001177 return 0;
1178}
1179
1180int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1181{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001182 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001183 return 0;
1184}
1185
1186int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1187{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001188 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001189 return 0;
1190}
1191
1192int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1193 struct kvm_sregs *sregs)
1194{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001195 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001196 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001197 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001198 return 0;
1199}
1200
1201int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1202 struct kvm_sregs *sregs)
1203{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001204 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001205 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001206 return 0;
1207}
1208
1209int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1210{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001211 if (test_fp_ctl(fpu->fpc))
1212 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001213 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001214 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1215 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1216 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001217 return 0;
1218}
1219
1220int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1221{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001222 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1223 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001224 return 0;
1225}
1226
1227static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1228{
1229 int rc = 0;
1230
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001231 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001232 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001233 else {
1234 vcpu->run->psw_mask = psw.mask;
1235 vcpu->run->psw_addr = psw.addr;
1236 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001237 return rc;
1238}
1239
1240int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1241 struct kvm_translation *tr)
1242{
1243 return -EINVAL; /* not implemented yet */
1244}
1245
David Hildenbrand27291e22014-01-23 12:26:52 +01001246#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1247 KVM_GUESTDBG_USE_HW_BP | \
1248 KVM_GUESTDBG_ENABLE)
1249
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001250int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1251 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001252{
David Hildenbrand27291e22014-01-23 12:26:52 +01001253 int rc = 0;
1254
1255 vcpu->guest_debug = 0;
1256 kvm_s390_clear_bp_data(vcpu);
1257
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001258 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001259 return -EINVAL;
1260
1261 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1262 vcpu->guest_debug = dbg->control;
1263 /* enforce guest PER */
1264 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1265
1266 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1267 rc = kvm_s390_import_bp_data(vcpu, dbg);
1268 } else {
1269 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1270 vcpu->arch.guestdbg.last_bp = 0;
1271 }
1272
1273 if (rc) {
1274 vcpu->guest_debug = 0;
1275 kvm_s390_clear_bp_data(vcpu);
1276 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1277 }
1278
1279 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001280}
1281
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001282int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1283 struct kvm_mp_state *mp_state)
1284{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001285 /* CHECK_STOP and LOAD are not supported yet */
1286 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1287 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001288}
1289
1290int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1291 struct kvm_mp_state *mp_state)
1292{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001293 int rc = 0;
1294
1295 /* user space knows about this interface - let it control the state */
1296 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1297
1298 switch (mp_state->mp_state) {
1299 case KVM_MP_STATE_STOPPED:
1300 kvm_s390_vcpu_stop(vcpu);
1301 break;
1302 case KVM_MP_STATE_OPERATING:
1303 kvm_s390_vcpu_start(vcpu);
1304 break;
1305 case KVM_MP_STATE_LOAD:
1306 case KVM_MP_STATE_CHECK_STOP:
1307 /* fall through - CHECK_STOP and LOAD are not supported yet */
1308 default:
1309 rc = -ENXIO;
1310 }
1311
1312 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001313}
1314
Dominik Dingelb31605c2014-03-25 13:47:11 +01001315bool kvm_s390_cmma_enabled(struct kvm *kvm)
1316{
1317 if (!MACHINE_IS_LPAR)
1318 return false;
1319 /* only enable for z10 and later */
1320 if (!MACHINE_HAS_EDAT1)
1321 return false;
1322 if (!kvm->arch.use_cmma)
1323 return false;
1324 return true;
1325}
1326
David Hildenbrand8ad35752014-03-14 11:00:21 +01001327static bool ibs_enabled(struct kvm_vcpu *vcpu)
1328{
1329 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1330}
1331
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001332static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1333{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001334retry:
1335 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001336 /*
1337 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1338 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1339 * This ensures that the ipte instruction for this request has
1340 * already finished. We might race against a second unmapper that
1341 * wants to set the blocking bit. Lets just retry the request loop.
1342 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001343 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001344 int rc;
1345 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001346 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001347 PAGE_SIZE * 2);
1348 if (rc)
1349 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001350 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001351 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001352
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001353 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1354 vcpu->arch.sie_block->ihcpu = 0xffff;
1355 goto retry;
1356 }
1357
David Hildenbrand8ad35752014-03-14 11:00:21 +01001358 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1359 if (!ibs_enabled(vcpu)) {
1360 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1361 atomic_set_mask(CPUSTAT_IBS,
1362 &vcpu->arch.sie_block->cpuflags);
1363 }
1364 goto retry;
1365 }
1366
1367 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1368 if (ibs_enabled(vcpu)) {
1369 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1370 atomic_clear_mask(CPUSTAT_IBS,
1371 &vcpu->arch.sie_block->cpuflags);
1372 }
1373 goto retry;
1374 }
1375
David Hildenbrand0759d062014-05-13 16:54:32 +02001376 /* nothing to do, just clear the request */
1377 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1378
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001379 return 0;
1380}
1381
Thomas Huthfa576c52014-05-06 17:20:16 +02001382/**
1383 * kvm_arch_fault_in_page - fault-in guest page if necessary
1384 * @vcpu: The corresponding virtual cpu
1385 * @gpa: Guest physical address
1386 * @writable: Whether the page should be writable or not
1387 *
1388 * Make sure that a guest page has been faulted-in on the host.
1389 *
1390 * Return: Zero on success, negative error code otherwise.
1391 */
1392long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001393{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001394 return gmap_fault(vcpu->arch.gmap, gpa,
1395 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001396}
1397
Dominik Dingel3c038e62013-10-07 17:11:48 +02001398static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1399 unsigned long token)
1400{
1401 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001402 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001403
1404 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001405 irq.u.ext.ext_params2 = token;
1406 irq.type = KVM_S390_INT_PFAULT_INIT;
1407 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001408 } else {
1409 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001410 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001411 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1412 }
1413}
1414
1415void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1416 struct kvm_async_pf *work)
1417{
1418 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1419 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1420}
1421
1422void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1423 struct kvm_async_pf *work)
1424{
1425 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1426 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1427}
1428
1429void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1430 struct kvm_async_pf *work)
1431{
1432 /* s390 will always inject the page directly */
1433}
1434
1435bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1436{
1437 /*
1438 * s390 will always inject the page directly,
1439 * but we still want check_async_completion to cleanup
1440 */
1441 return true;
1442}
1443
1444static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1445{
1446 hva_t hva;
1447 struct kvm_arch_async_pf arch;
1448 int rc;
1449
1450 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1451 return 0;
1452 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1453 vcpu->arch.pfault_compare)
1454 return 0;
1455 if (psw_extint_disabled(vcpu))
1456 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001457 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001458 return 0;
1459 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1460 return 0;
1461 if (!vcpu->arch.gmap->pfault_enabled)
1462 return 0;
1463
Heiko Carstens81480cc2014-01-01 16:36:07 +01001464 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1465 hva += current->thread.gmap_addr & ~PAGE_MASK;
1466 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001467 return 0;
1468
1469 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1470 return rc;
1471}
1472
Thomas Huth3fb4c402013-09-12 10:33:43 +02001473static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001474{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001475 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001476
Dominik Dingel3c038e62013-10-07 17:11:48 +02001477 /*
1478 * On s390 notifications for arriving pages will be delivered directly
1479 * to the guest but the house keeping for completed pfaults is
1480 * handled outside the worker.
1481 */
1482 kvm_check_async_pf_completion(vcpu);
1483
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001484 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001485
1486 if (need_resched())
1487 schedule();
1488
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001489 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001490 s390_handle_mcck();
1491
Jens Freimann79395032014-04-17 10:10:30 +02001492 if (!kvm_is_ucontrol(vcpu->kvm)) {
1493 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1494 if (rc)
1495 return rc;
1496 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001497
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001498 rc = kvm_s390_handle_requests(vcpu);
1499 if (rc)
1500 return rc;
1501
David Hildenbrand27291e22014-01-23 12:26:52 +01001502 if (guestdbg_enabled(vcpu)) {
1503 kvm_s390_backup_guest_per_regs(vcpu);
1504 kvm_s390_patch_guest_per_regs(vcpu);
1505 }
1506
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001507 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001508 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1509 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1510 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001511
Thomas Huth3fb4c402013-09-12 10:33:43 +02001512 return 0;
1513}
1514
1515static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1516{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001517 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001518
1519 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1520 vcpu->arch.sie_block->icptcode);
1521 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1522
David Hildenbrand27291e22014-01-23 12:26:52 +01001523 if (guestdbg_enabled(vcpu))
1524 kvm_s390_restore_guest_per_regs(vcpu);
1525
Thomas Huth3fb4c402013-09-12 10:33:43 +02001526 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001527 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001528 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1529 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1530 vcpu->run->s390_ucontrol.trans_exc_code =
1531 current->thread.gmap_addr;
1532 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1533 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001534
1535 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001536 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001537 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001538 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001539 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001540 } else {
1541 gpa_t gpa = current->thread.gmap_addr;
1542 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1543 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001544 }
1545
1546 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001547 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1548 trace_kvm_s390_sie_fault(vcpu);
1549 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001550 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001551
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001552 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001553
Thomas Hutha76ccff2013-09-12 10:33:44 +02001554 if (rc == 0) {
1555 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001556 /* Don't exit for host interrupts. */
1557 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001558 else
1559 rc = kvm_handle_sie_intercept(vcpu);
1560 }
1561
Thomas Huth3fb4c402013-09-12 10:33:43 +02001562 return rc;
1563}
1564
1565static int __vcpu_run(struct kvm_vcpu *vcpu)
1566{
1567 int rc, exit_reason;
1568
Thomas Huth800c1062013-09-12 10:33:45 +02001569 /*
1570 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1571 * ning the guest), so that memslots (and other stuff) are protected
1572 */
1573 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1574
Thomas Hutha76ccff2013-09-12 10:33:44 +02001575 do {
1576 rc = vcpu_pre_run(vcpu);
1577 if (rc)
1578 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001579
Thomas Huth800c1062013-09-12 10:33:45 +02001580 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001581 /*
1582 * As PF_VCPU will be used in fault handler, between
1583 * guest_enter and guest_exit should be no uaccess.
1584 */
1585 preempt_disable();
1586 kvm_guest_enter();
1587 preempt_enable();
1588 exit_reason = sie64a(vcpu->arch.sie_block,
1589 vcpu->run->s.regs.gprs);
1590 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001591 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001592
Thomas Hutha76ccff2013-09-12 10:33:44 +02001593 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001594 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001595
Thomas Huth800c1062013-09-12 10:33:45 +02001596 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001597 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001598}
1599
David Hildenbrandb028ee32014-07-17 10:47:43 +02001600static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1601{
1602 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1603 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1604 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1605 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1606 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1607 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001608 /* some control register changes require a tlb flush */
1609 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001610 }
1611 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1612 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1613 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1614 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1615 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1616 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1617 }
1618 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1619 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1620 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1621 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001622 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1623 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001624 }
1625 kvm_run->kvm_dirty_regs = 0;
1626}
1627
1628static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1629{
1630 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1631 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1632 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1633 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1634 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1635 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1636 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1637 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1638 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1639 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1640 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1641 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1642}
1643
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001644int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1645{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001646 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001647 sigset_t sigsaved;
1648
David Hildenbrand27291e22014-01-23 12:26:52 +01001649 if (guestdbg_exit_pending(vcpu)) {
1650 kvm_s390_prepare_debug_exit(vcpu);
1651 return 0;
1652 }
1653
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001654 if (vcpu->sigset_active)
1655 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1656
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001657 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1658 kvm_s390_vcpu_start(vcpu);
1659 } else if (is_vcpu_stopped(vcpu)) {
1660 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1661 vcpu->vcpu_id);
1662 return -EINVAL;
1663 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001664
David Hildenbrandb028ee32014-07-17 10:47:43 +02001665 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001666
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001667 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001668 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001669
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001670 if (signal_pending(current) && !rc) {
1671 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001672 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001673 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001674
David Hildenbrand27291e22014-01-23 12:26:52 +01001675 if (guestdbg_exit_pending(vcpu) && !rc) {
1676 kvm_s390_prepare_debug_exit(vcpu);
1677 rc = 0;
1678 }
1679
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001680 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001681 /* intercept cannot be handled in-kernel, prepare kvm-run */
1682 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1683 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001684 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1685 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1686 rc = 0;
1687 }
1688
1689 if (rc == -EREMOTE) {
1690 /* intercept was handled, but userspace support is needed
1691 * kvm_run has been prepared by the handler */
1692 rc = 0;
1693 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001694
David Hildenbrandb028ee32014-07-17 10:47:43 +02001695 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001696
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001697 if (vcpu->sigset_active)
1698 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1699
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001700 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001701 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001702}
1703
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001704/*
1705 * store status at address
1706 * we use have two special cases:
1707 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1708 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1709 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001710int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001711{
Carsten Otte092670c2011-07-24 10:48:22 +02001712 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001713 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001714 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001715 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001716
Heiko Carstensd0bce602014-01-01 16:45:58 +01001717 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1718 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001719 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001720 gpa = SAVE_AREA_BASE;
1721 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1722 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001723 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001724 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1725 }
1726 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1727 vcpu->arch.guest_fpregs.fprs, 128);
1728 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1729 vcpu->run->s.regs.gprs, 128);
1730 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1731 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001732 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001733 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001734 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001735 rc |= write_guest_abs(vcpu,
1736 gpa + offsetof(struct save_area, fp_ctrl_reg),
1737 &vcpu->arch.guest_fpregs.fpc, 4);
1738 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1739 &vcpu->arch.sie_block->todpr, 4);
1740 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1741 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001742 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001743 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1744 &clkcomp, 8);
1745 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1746 &vcpu->run->s.regs.acrs, 64);
1747 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1748 &vcpu->arch.sie_block->gcr, 128);
1749 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001750}
1751
Thomas Huthe8798922013-11-06 15:46:33 +01001752int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1753{
1754 /*
1755 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1756 * copying in vcpu load/put. Lets update our copies before we save
1757 * it into the save area
1758 */
1759 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1760 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1761 save_access_regs(vcpu->run->s.regs.acrs);
1762
1763 return kvm_s390_store_status_unloaded(vcpu, addr);
1764}
1765
David Hildenbrand8ad35752014-03-14 11:00:21 +01001766static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1767{
1768 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1769 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1770 exit_sie_sync(vcpu);
1771}
1772
1773static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1774{
1775 unsigned int i;
1776 struct kvm_vcpu *vcpu;
1777
1778 kvm_for_each_vcpu(i, vcpu, kvm) {
1779 __disable_ibs_on_vcpu(vcpu);
1780 }
1781}
1782
1783static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1784{
1785 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1786 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1787 exit_sie_sync(vcpu);
1788}
1789
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001790void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1791{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001792 int i, online_vcpus, started_vcpus = 0;
1793
1794 if (!is_vcpu_stopped(vcpu))
1795 return;
1796
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001797 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001798 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001799 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001800 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1801
1802 for (i = 0; i < online_vcpus; i++) {
1803 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1804 started_vcpus++;
1805 }
1806
1807 if (started_vcpus == 0) {
1808 /* we're the only active VCPU -> speed it up */
1809 __enable_ibs_on_vcpu(vcpu);
1810 } else if (started_vcpus == 1) {
1811 /*
1812 * As we are starting a second VCPU, we have to disable
1813 * the IBS facility on all VCPUs to remove potentially
1814 * oustanding ENABLE requests.
1815 */
1816 __disable_ibs_on_all_vcpus(vcpu->kvm);
1817 }
1818
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001819 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001820 /*
1821 * Another VCPU might have used IBS while we were offline.
1822 * Let's play safe and flush the VCPU at startup.
1823 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001824 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001825 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001826 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001827}
1828
1829void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1830{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001831 int i, online_vcpus, started_vcpus = 0;
1832 struct kvm_vcpu *started_vcpu = NULL;
1833
1834 if (is_vcpu_stopped(vcpu))
1835 return;
1836
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001837 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001838 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001839 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001840 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1841
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001842 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02001843 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001844
David Hildenbrand6cddd432014-10-15 16:48:53 +02001845 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001846 __disable_ibs_on_vcpu(vcpu);
1847
1848 for (i = 0; i < online_vcpus; i++) {
1849 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1850 started_vcpus++;
1851 started_vcpu = vcpu->kvm->vcpus[i];
1852 }
1853 }
1854
1855 if (started_vcpus == 1) {
1856 /*
1857 * As we only have one VCPU left, we want to enable the
1858 * IBS facility for that VCPU to speed it up.
1859 */
1860 __enable_ibs_on_vcpu(started_vcpu);
1861 }
1862
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001863 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001864 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001865}
1866
Cornelia Huckd6712df2012-12-20 15:32:11 +01001867static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1868 struct kvm_enable_cap *cap)
1869{
1870 int r;
1871
1872 if (cap->flags)
1873 return -EINVAL;
1874
1875 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001876 case KVM_CAP_S390_CSS_SUPPORT:
1877 if (!vcpu->kvm->arch.css_support) {
1878 vcpu->kvm->arch.css_support = 1;
1879 trace_kvm_s390_enable_css(vcpu->kvm);
1880 }
1881 r = 0;
1882 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001883 default:
1884 r = -EINVAL;
1885 break;
1886 }
1887 return r;
1888}
1889
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001890long kvm_arch_vcpu_ioctl(struct file *filp,
1891 unsigned int ioctl, unsigned long arg)
1892{
1893 struct kvm_vcpu *vcpu = filp->private_data;
1894 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001895 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001896 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001897
Avi Kivity937366242010-05-13 12:35:17 +03001898 switch (ioctl) {
1899 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001900 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001901 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001902
Avi Kivity937366242010-05-13 12:35:17 +03001903 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001904 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03001905 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02001906 if (s390int_to_s390irq(&s390int, &s390irq))
1907 return -EINVAL;
1908 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03001909 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001910 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001911 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001912 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001913 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001914 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001915 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001916 case KVM_S390_SET_INITIAL_PSW: {
1917 psw_t psw;
1918
Avi Kivitybc923cc2010-05-13 12:21:46 +03001919 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001920 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001921 break;
1922 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1923 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001924 }
1925 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001926 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1927 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001928 case KVM_SET_ONE_REG:
1929 case KVM_GET_ONE_REG: {
1930 struct kvm_one_reg reg;
1931 r = -EFAULT;
1932 if (copy_from_user(&reg, argp, sizeof(reg)))
1933 break;
1934 if (ioctl == KVM_SET_ONE_REG)
1935 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1936 else
1937 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1938 break;
1939 }
Carsten Otte27e03932012-01-04 10:25:21 +01001940#ifdef CONFIG_KVM_S390_UCONTROL
1941 case KVM_S390_UCAS_MAP: {
1942 struct kvm_s390_ucas_mapping ucasmap;
1943
1944 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1945 r = -EFAULT;
1946 break;
1947 }
1948
1949 if (!kvm_is_ucontrol(vcpu->kvm)) {
1950 r = -EINVAL;
1951 break;
1952 }
1953
1954 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1955 ucasmap.vcpu_addr, ucasmap.length);
1956 break;
1957 }
1958 case KVM_S390_UCAS_UNMAP: {
1959 struct kvm_s390_ucas_mapping ucasmap;
1960
1961 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1962 r = -EFAULT;
1963 break;
1964 }
1965
1966 if (!kvm_is_ucontrol(vcpu->kvm)) {
1967 r = -EINVAL;
1968 break;
1969 }
1970
1971 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1972 ucasmap.length);
1973 break;
1974 }
1975#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001976 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001977 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01001978 break;
1979 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001980 case KVM_ENABLE_CAP:
1981 {
1982 struct kvm_enable_cap cap;
1983 r = -EFAULT;
1984 if (copy_from_user(&cap, argp, sizeof(cap)))
1985 break;
1986 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1987 break;
1988 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001989 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001990 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001991 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001992 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001993}
1994
Carsten Otte5b1c1492012-01-04 10:25:23 +01001995int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1996{
1997#ifdef CONFIG_KVM_S390_UCONTROL
1998 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1999 && (kvm_is_ucontrol(vcpu->kvm))) {
2000 vmf->page = virt_to_page(vcpu->arch.sie_block);
2001 get_page(vmf->page);
2002 return 0;
2003 }
2004#endif
2005 return VM_FAULT_SIGBUS;
2006}
2007
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302008int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2009 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002010{
2011 return 0;
2012}
2013
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002014/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002015int kvm_arch_prepare_memory_region(struct kvm *kvm,
2016 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002017 struct kvm_userspace_memory_region *mem,
2018 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002019{
Nick Wangdd2887e2013-03-25 17:22:57 +01002020 /* A few sanity checks. We can have memory slots which have to be
2021 located/ended at a segment boundary (1MB). The memory in userland is
2022 ok to be fragmented into various different vmas. It is okay to mmap()
2023 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002024
Carsten Otte598841c2011-07-24 10:48:21 +02002025 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002026 return -EINVAL;
2027
Carsten Otte598841c2011-07-24 10:48:21 +02002028 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002029 return -EINVAL;
2030
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002031 return 0;
2032}
2033
2034void kvm_arch_commit_memory_region(struct kvm *kvm,
2035 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002036 const struct kvm_memory_slot *old,
2037 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002038{
Carsten Ottef7850c92011-07-24 10:48:23 +02002039 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002040
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002041 /* If the basics of the memslot do not change, we do not want
2042 * to update the gmap. Every update causes several unnecessary
2043 * segment translation exceptions. This is usually handled just
2044 * fine by the normal fault handler + gmap, but it will also
2045 * cause faults on the prefix page of running guest CPUs.
2046 */
2047 if (old->userspace_addr == mem->userspace_addr &&
2048 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2049 old->npages * PAGE_SIZE == mem->memory_size)
2050 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002051
2052 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2053 mem->guest_phys_addr, mem->memory_size);
2054 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02002055 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002056 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002057}
2058
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002059static int __init kvm_s390_init(void)
2060{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002061 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03002062 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002063 if (ret)
2064 return ret;
2065
2066 /*
2067 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002068 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002069 * only set facilities that are known to work in KVM.
2070 */
Michael Mueller78c4b59f2013-07-26 15:04:04 +02002071 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
2072 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002073 kvm_exit();
2074 return -ENOMEM;
2075 }
Michael Mueller78c4b59f2013-07-26 15:04:04 +02002076 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Christian Borntraegerc23f3972015-01-29 14:09:54 +01002077 vfacilities[0] &= 0xff82fffbf4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002078 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002079 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002080}
2081
2082static void __exit kvm_s390_exit(void)
2083{
Michael Mueller78c4b59f2013-07-26 15:04:04 +02002084 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002085 kvm_exit();
2086}
2087
2088module_init(kvm_s390_init);
2089module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002090
2091/*
2092 * Enable autoloading of the kvm module.
2093 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2094 * since x86 takes a different approach.
2095 */
2096#include <linux/miscdevice.h>
2097MODULE_ALIAS_MISCDEV(KVM_MINOR);
2098MODULE_ALIAS("devname:kvm");