blob: deac47378f777b2864ae951b8c172662707c251f [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010028#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010029#include <asm/lowcore.h>
30#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010031#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010032#include <asm/switch_to.h>
Michael Mueller78c4b59f2013-07-26 15:04:04 +020033#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020034#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010035#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include "gaccess.h"
37
Cornelia Huck5786fff2012-07-23 17:20:29 +020038#define CREATE_TRACE_POINTS
39#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020040#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020041
Heiko Carstensb0c632d2008-03-25 18:47:20 +010042#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43
44struct kvm_stats_debugfs_item debugfs_entries[] = {
45 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020046 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047 { "exit_validity", VCPU_STAT(exit_validity) },
48 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
49 { "exit_external_request", VCPU_STAT(exit_external_request) },
50 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010051 { "exit_instruction", VCPU_STAT(exit_instruction) },
52 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
53 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010054 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020055 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020056 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010057 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010058 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
59 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010060 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020061 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010062 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
63 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
64 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
65 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
66 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
67 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
68 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020069 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010070 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
71 { "instruction_spx", VCPU_STAT(instruction_spx) },
72 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
73 { "instruction_stap", VCPU_STAT(instruction_stap) },
74 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010075 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010076 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
77 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020078 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010079 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
80 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020081 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010082 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010083 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020084 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010085 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020086 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
87 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010088 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020089 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
90 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010091 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
92 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
93 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020094 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
95 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
96 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010097 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010098 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020099 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100100 { NULL }
101};
102
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200103unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200104static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100105
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200106/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +0100107int test_vfacility(unsigned long nr)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200108{
109 return __test_facility(nr, (void *) vfacilities);
110}
111
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100112/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200113int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100114{
115 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200116 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100117}
118
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200119static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
120
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121int kvm_arch_hardware_setup(void)
122{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200123 gmap_notifier.notifier_call = kvm_gmap_notifier;
124 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100125 return 0;
126}
127
128void kvm_arch_hardware_unsetup(void)
129{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200130 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100131}
132
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100133int kvm_arch_init(void *opaque)
134{
Cornelia Huck84877d92014-09-02 10:27:35 +0100135 /* Register floating interrupt controller interface. */
136 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100137}
138
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100139/* Section: device related */
140long kvm_arch_dev_ioctl(struct file *filp,
141 unsigned int ioctl, unsigned long arg)
142{
143 if (ioctl == KVM_S390_ENABLE_SIE)
144 return s390_enable_sie();
145 return -EINVAL;
146}
147
Alexander Graf784aa3d2014-07-14 18:27:35 +0200148int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100149{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100150 int r;
151
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200152 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100153 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200154 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100155 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100156#ifdef CONFIG_KVM_S390_UCONTROL
157 case KVM_CAP_S390_UCONTROL:
158#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200159 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100160 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200161 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100162 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100163 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200164 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100165 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200166 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200167 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200168 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200169 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200170 case KVM_CAP_MP_STATE:
David Hildenbrand2444b352014-10-09 14:10:13 +0200171 case KVM_CAP_S390_USER_SIGP:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100172 r = 1;
173 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200174 case KVM_CAP_NR_VCPUS:
175 case KVM_CAP_MAX_VCPUS:
176 r = KVM_MAX_VCPUS;
177 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100178 case KVM_CAP_NR_MEMSLOTS:
179 r = KVM_USER_MEM_SLOTS;
180 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200181 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100182 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200183 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200184 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100185 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200186 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100187 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100188}
189
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400190static void kvm_s390_sync_dirty_log(struct kvm *kvm,
191 struct kvm_memory_slot *memslot)
192{
193 gfn_t cur_gfn, last_gfn;
194 unsigned long address;
195 struct gmap *gmap = kvm->arch.gmap;
196
197 down_read(&gmap->mm->mmap_sem);
198 /* Loop over all guest pages */
199 last_gfn = memslot->base_gfn + memslot->npages;
200 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
201 address = gfn_to_hva_memslot(memslot, cur_gfn);
202
203 if (gmap_test_and_clear_dirty(address, gmap))
204 mark_page_dirty(kvm, cur_gfn);
205 }
206 up_read(&gmap->mm->mmap_sem);
207}
208
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100209/* Section: vm related */
210/*
211 * Get (and clear) the dirty memory log for a memory slot.
212 */
213int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
214 struct kvm_dirty_log *log)
215{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400216 int r;
217 unsigned long n;
218 struct kvm_memory_slot *memslot;
219 int is_dirty = 0;
220
221 mutex_lock(&kvm->slots_lock);
222
223 r = -EINVAL;
224 if (log->slot >= KVM_USER_MEM_SLOTS)
225 goto out;
226
227 memslot = id_to_memslot(kvm->memslots, log->slot);
228 r = -ENOENT;
229 if (!memslot->dirty_bitmap)
230 goto out;
231
232 kvm_s390_sync_dirty_log(kvm, memslot);
233 r = kvm_get_dirty_log(kvm, log, &is_dirty);
234 if (r)
235 goto out;
236
237 /* Clear the dirty log */
238 if (is_dirty) {
239 n = kvm_dirty_bitmap_bytes(memslot);
240 memset(memslot->dirty_bitmap, 0, n);
241 }
242 r = 0;
243out:
244 mutex_unlock(&kvm->slots_lock);
245 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100246}
247
Cornelia Huckd938dc52013-10-23 18:26:34 +0200248static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
249{
250 int r;
251
252 if (cap->flags)
253 return -EINVAL;
254
255 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200256 case KVM_CAP_S390_IRQCHIP:
257 kvm->arch.use_irqchip = 1;
258 r = 0;
259 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200260 case KVM_CAP_S390_USER_SIGP:
261 kvm->arch.user_sigp = 1;
262 r = 0;
263 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200264 default:
265 r = -EINVAL;
266 break;
267 }
268 return r;
269}
270
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100271static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
272{
273 int ret;
274
275 switch (attr->attr) {
276 case KVM_S390_VM_MEM_LIMIT_SIZE:
277 ret = 0;
278 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
279 ret = -EFAULT;
280 break;
281 default:
282 ret = -ENXIO;
283 break;
284 }
285 return ret;
286}
287
288static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200289{
290 int ret;
291 unsigned int idx;
292 switch (attr->attr) {
293 case KVM_S390_VM_MEM_ENABLE_CMMA:
294 ret = -EBUSY;
295 mutex_lock(&kvm->lock);
296 if (atomic_read(&kvm->online_vcpus) == 0) {
297 kvm->arch.use_cmma = 1;
298 ret = 0;
299 }
300 mutex_unlock(&kvm->lock);
301 break;
302 case KVM_S390_VM_MEM_CLR_CMMA:
303 mutex_lock(&kvm->lock);
304 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200305 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200306 srcu_read_unlock(&kvm->srcu, idx);
307 mutex_unlock(&kvm->lock);
308 ret = 0;
309 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100310 case KVM_S390_VM_MEM_LIMIT_SIZE: {
311 unsigned long new_limit;
312
313 if (kvm_is_ucontrol(kvm))
314 return -EINVAL;
315
316 if (get_user(new_limit, (u64 __user *)attr->addr))
317 return -EFAULT;
318
319 if (new_limit > kvm->arch.gmap->asce_end)
320 return -E2BIG;
321
322 ret = -EBUSY;
323 mutex_lock(&kvm->lock);
324 if (atomic_read(&kvm->online_vcpus) == 0) {
325 /* gmap_alloc will round the limit up */
326 struct gmap *new = gmap_alloc(current->mm, new_limit);
327
328 if (!new) {
329 ret = -ENOMEM;
330 } else {
331 gmap_free(kvm->arch.gmap);
332 new->private = kvm;
333 kvm->arch.gmap = new;
334 ret = 0;
335 }
336 }
337 mutex_unlock(&kvm->lock);
338 break;
339 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200340 default:
341 ret = -ENXIO;
342 break;
343 }
344 return ret;
345}
346
Tony Krowiaka374e892014-09-03 10:13:53 +0200347static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
348
349static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
350{
351 struct kvm_vcpu *vcpu;
352 int i;
353
354 if (!test_vfacility(76))
355 return -EINVAL;
356
357 mutex_lock(&kvm->lock);
358 switch (attr->attr) {
359 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
360 get_random_bytes(
361 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
362 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
363 kvm->arch.crypto.aes_kw = 1;
364 break;
365 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
366 get_random_bytes(
367 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
368 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
369 kvm->arch.crypto.dea_kw = 1;
370 break;
371 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
372 kvm->arch.crypto.aes_kw = 0;
373 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
374 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
375 break;
376 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
377 kvm->arch.crypto.dea_kw = 0;
378 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
379 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
380 break;
381 default:
382 mutex_unlock(&kvm->lock);
383 return -ENXIO;
384 }
385
386 kvm_for_each_vcpu(i, vcpu, kvm) {
387 kvm_s390_vcpu_crypto_setup(vcpu);
388 exit_sie(vcpu);
389 }
390 mutex_unlock(&kvm->lock);
391 return 0;
392}
393
Jason J. Herne72f25022014-11-25 09:46:02 -0500394static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
395{
396 u8 gtod_high;
397
398 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
399 sizeof(gtod_high)))
400 return -EFAULT;
401
402 if (gtod_high != 0)
403 return -EINVAL;
404
405 return 0;
406}
407
408static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
409{
410 struct kvm_vcpu *cur_vcpu;
411 unsigned int vcpu_idx;
412 u64 host_tod, gtod;
413 int r;
414
415 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
416 return -EFAULT;
417
418 r = store_tod_clock(&host_tod);
419 if (r)
420 return r;
421
422 mutex_lock(&kvm->lock);
423 kvm->arch.epoch = gtod - host_tod;
424 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
425 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
426 exit_sie(cur_vcpu);
427 }
428 mutex_unlock(&kvm->lock);
429 return 0;
430}
431
432static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
433{
434 int ret;
435
436 if (attr->flags)
437 return -EINVAL;
438
439 switch (attr->attr) {
440 case KVM_S390_VM_TOD_HIGH:
441 ret = kvm_s390_set_tod_high(kvm, attr);
442 break;
443 case KVM_S390_VM_TOD_LOW:
444 ret = kvm_s390_set_tod_low(kvm, attr);
445 break;
446 default:
447 ret = -ENXIO;
448 break;
449 }
450 return ret;
451}
452
453static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
454{
455 u8 gtod_high = 0;
456
457 if (copy_to_user((void __user *)attr->addr, &gtod_high,
458 sizeof(gtod_high)))
459 return -EFAULT;
460
461 return 0;
462}
463
464static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
465{
466 u64 host_tod, gtod;
467 int r;
468
469 r = store_tod_clock(&host_tod);
470 if (r)
471 return r;
472
473 gtod = host_tod + kvm->arch.epoch;
474 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
475 return -EFAULT;
476
477 return 0;
478}
479
480static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
481{
482 int ret;
483
484 if (attr->flags)
485 return -EINVAL;
486
487 switch (attr->attr) {
488 case KVM_S390_VM_TOD_HIGH:
489 ret = kvm_s390_get_tod_high(kvm, attr);
490 break;
491 case KVM_S390_VM_TOD_LOW:
492 ret = kvm_s390_get_tod_low(kvm, attr);
493 break;
494 default:
495 ret = -ENXIO;
496 break;
497 }
498 return ret;
499}
500
Dominik Dingelf2061652014-04-09 13:13:00 +0200501static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
502{
503 int ret;
504
505 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200506 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100507 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200508 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500509 case KVM_S390_VM_TOD:
510 ret = kvm_s390_set_tod(kvm, attr);
511 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200512 case KVM_S390_VM_CRYPTO:
513 ret = kvm_s390_vm_set_crypto(kvm, attr);
514 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200515 default:
516 ret = -ENXIO;
517 break;
518 }
519
520 return ret;
521}
522
523static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
524{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100525 int ret;
526
527 switch (attr->group) {
528 case KVM_S390_VM_MEM_CTRL:
529 ret = kvm_s390_get_mem_control(kvm, attr);
530 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500531 case KVM_S390_VM_TOD:
532 ret = kvm_s390_get_tod(kvm, attr);
533 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100534 default:
535 ret = -ENXIO;
536 break;
537 }
538
539 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200540}
541
542static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
543{
544 int ret;
545
546 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200547 case KVM_S390_VM_MEM_CTRL:
548 switch (attr->attr) {
549 case KVM_S390_VM_MEM_ENABLE_CMMA:
550 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100551 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200552 ret = 0;
553 break;
554 default:
555 ret = -ENXIO;
556 break;
557 }
558 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500559 case KVM_S390_VM_TOD:
560 switch (attr->attr) {
561 case KVM_S390_VM_TOD_LOW:
562 case KVM_S390_VM_TOD_HIGH:
563 ret = 0;
564 break;
565 default:
566 ret = -ENXIO;
567 break;
568 }
569 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200570 case KVM_S390_VM_CRYPTO:
571 switch (attr->attr) {
572 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
573 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
574 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
575 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
576 ret = 0;
577 break;
578 default:
579 ret = -ENXIO;
580 break;
581 }
582 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200583 default:
584 ret = -ENXIO;
585 break;
586 }
587
588 return ret;
589}
590
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100591long kvm_arch_vm_ioctl(struct file *filp,
592 unsigned int ioctl, unsigned long arg)
593{
594 struct kvm *kvm = filp->private_data;
595 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200596 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100597 int r;
598
599 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100600 case KVM_S390_INTERRUPT: {
601 struct kvm_s390_interrupt s390int;
602
603 r = -EFAULT;
604 if (copy_from_user(&s390int, argp, sizeof(s390int)))
605 break;
606 r = kvm_s390_inject_vm(kvm, &s390int);
607 break;
608 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200609 case KVM_ENABLE_CAP: {
610 struct kvm_enable_cap cap;
611 r = -EFAULT;
612 if (copy_from_user(&cap, argp, sizeof(cap)))
613 break;
614 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
615 break;
616 }
Cornelia Huck84223592013-07-15 13:36:01 +0200617 case KVM_CREATE_IRQCHIP: {
618 struct kvm_irq_routing_entry routing;
619
620 r = -EINVAL;
621 if (kvm->arch.use_irqchip) {
622 /* Set up dummy routing. */
623 memset(&routing, 0, sizeof(routing));
624 kvm_set_irq_routing(kvm, &routing, 0, 0);
625 r = 0;
626 }
627 break;
628 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200629 case KVM_SET_DEVICE_ATTR: {
630 r = -EFAULT;
631 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
632 break;
633 r = kvm_s390_vm_set_attr(kvm, &attr);
634 break;
635 }
636 case KVM_GET_DEVICE_ATTR: {
637 r = -EFAULT;
638 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
639 break;
640 r = kvm_s390_vm_get_attr(kvm, &attr);
641 break;
642 }
643 case KVM_HAS_DEVICE_ATTR: {
644 r = -EFAULT;
645 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
646 break;
647 r = kvm_s390_vm_has_attr(kvm, &attr);
648 break;
649 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100650 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300651 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100652 }
653
654 return r;
655}
656
Tony Krowiak45c9b472015-01-13 11:33:26 -0500657static int kvm_s390_query_ap_config(u8 *config)
658{
659 u32 fcn_code = 0x04000000UL;
660 u32 cc;
661
662 asm volatile(
663 "lgr 0,%1\n"
664 "lgr 2,%2\n"
665 ".long 0xb2af0000\n" /* PQAP(QCI) */
666 "ipm %0\n"
667 "srl %0,28\n"
668 : "=r" (cc)
669 : "r" (fcn_code), "r" (config)
670 : "cc", "0", "2", "memory"
671 );
672
673 return cc;
674}
675
676static int kvm_s390_apxa_installed(void)
677{
678 u8 config[128];
679 int cc;
680
681 if (test_facility(2) && test_facility(12)) {
682 cc = kvm_s390_query_ap_config(config);
683
684 if (cc)
685 pr_err("PQAP(QCI) failed with cc=%d", cc);
686 else
687 return config[0] & 0x40;
688 }
689
690 return 0;
691}
692
693static void kvm_s390_set_crycb_format(struct kvm *kvm)
694{
695 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
696
697 if (kvm_s390_apxa_installed())
698 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
699 else
700 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
701}
702
Tony Krowiak5102ee82014-06-27 14:46:01 -0400703static int kvm_s390_crypto_init(struct kvm *kvm)
704{
705 if (!test_vfacility(76))
706 return 0;
707
708 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
709 GFP_KERNEL | GFP_DMA);
710 if (!kvm->arch.crypto.crycb)
711 return -ENOMEM;
712
Tony Krowiak45c9b472015-01-13 11:33:26 -0500713 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400714
Tony Krowiaka374e892014-09-03 10:13:53 +0200715 /* Disable AES/DEA protected key functions by default */
716 kvm->arch.crypto.aes_kw = 0;
717 kvm->arch.crypto.dea_kw = 0;
718
Tony Krowiak5102ee82014-06-27 14:46:01 -0400719 return 0;
720}
721
Carsten Ottee08b9632012-01-04 10:25:20 +0100722int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100723{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100724 int rc;
725 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100726 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100727
Carsten Ottee08b9632012-01-04 10:25:20 +0100728 rc = -EINVAL;
729#ifdef CONFIG_KVM_S390_UCONTROL
730 if (type & ~KVM_VM_S390_UCONTROL)
731 goto out_err;
732 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
733 goto out_err;
734#else
735 if (type)
736 goto out_err;
737#endif
738
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100739 rc = s390_enable_sie();
740 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100741 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100742
Carsten Otteb2904112011-10-18 12:27:13 +0200743 rc = -ENOMEM;
744
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100745 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
746 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100747 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100748 spin_lock(&kvm_lock);
749 sca_offset = (sca_offset + 16) & 0x7f0;
750 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
751 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100752
753 sprintf(debug_name, "kvm-%u", current->pid);
754
755 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
756 if (!kvm->arch.dbf)
757 goto out_nodbf;
758
Tony Krowiak5102ee82014-06-27 14:46:01 -0400759 if (kvm_s390_crypto_init(kvm) < 0)
760 goto out_crypto;
761
Carsten Otteba5c1e92008-03-25 18:47:26 +0100762 spin_lock_init(&kvm->arch.float_int.lock);
763 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100764 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +0200765 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100766
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100767 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
768 VM_EVENT(kvm, 3, "%s", "vm created");
769
Carsten Ottee08b9632012-01-04 10:25:20 +0100770 if (type & KVM_VM_S390_UCONTROL) {
771 kvm->arch.gmap = NULL;
772 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +0200773 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +0100774 if (!kvm->arch.gmap)
775 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200776 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200777 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100778 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100779
780 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200781 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -0500782 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100783
David Hildenbrand8ad35752014-03-14 11:00:21 +0100784 spin_lock_init(&kvm->arch.start_stop_lock);
785
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100786 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200787out_nogmap:
Tony Krowiak5102ee82014-06-27 14:46:01 -0400788 kfree(kvm->arch.crypto.crycb);
789out_crypto:
Carsten Otte598841c2011-07-24 10:48:21 +0200790 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100791out_nodbf:
792 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100793out_err:
794 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100795}
796
Christian Borntraegerd329c032008-11-26 14:50:27 +0100797void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
798{
799 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200800 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100801 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200802 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100803 if (!kvm_is_ucontrol(vcpu->kvm)) {
804 clear_bit(63 - vcpu->vcpu_id,
805 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
806 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
807 (__u64) vcpu->arch.sie_block)
808 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
809 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200810 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100811
812 if (kvm_is_ucontrol(vcpu->kvm))
813 gmap_free(vcpu->arch.gmap);
814
Dominik Dingelb31605c2014-03-25 13:47:11 +0100815 if (kvm_s390_cmma_enabled(vcpu->kvm))
816 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100817 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200818
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100819 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200820 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100821}
822
823static void kvm_free_vcpus(struct kvm *kvm)
824{
825 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300826 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100827
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300828 kvm_for_each_vcpu(i, vcpu, kvm)
829 kvm_arch_vcpu_destroy(vcpu);
830
831 mutex_lock(&kvm->lock);
832 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
833 kvm->vcpus[i] = NULL;
834
835 atomic_set(&kvm->online_vcpus, 0);
836 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100837}
838
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100839void kvm_arch_destroy_vm(struct kvm *kvm)
840{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100841 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100842 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100843 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400844 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +0100845 if (!kvm_is_ucontrol(kvm))
846 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200847 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100848 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100849}
850
851/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +0100852static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
853{
854 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
855 if (!vcpu->arch.gmap)
856 return -ENOMEM;
857 vcpu->arch.gmap->private = vcpu->kvm;
858
859 return 0;
860}
861
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100862int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
863{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200864 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
865 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100866 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
867 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100868 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +0200869 KVM_SYNC_CRS |
870 KVM_SYNC_ARCH0 |
871 KVM_SYNC_PFAULT;
Dominik Dingeldafd0322014-12-02 16:53:21 +0100872
873 if (kvm_is_ucontrol(vcpu->kvm))
874 return __kvm_ucontrol_vcpu_init(vcpu);
875
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100876 return 0;
877}
878
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100879void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
880{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200881 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
882 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100883 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200884 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
885 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100886 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200887 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100888 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100889}
890
891void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
892{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100893 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200894 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200895 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
896 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100897 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200898 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
899 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100900 restore_access_regs(vcpu->arch.host_acrs);
901}
902
903static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
904{
905 /* this equals initial cpu reset in pop, but we don't switch to ESA */
906 vcpu->arch.sie_block->gpsw.mask = 0UL;
907 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100908 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100909 vcpu->arch.sie_block->cputm = 0UL;
910 vcpu->arch.sie_block->ckc = 0UL;
911 vcpu->arch.sie_block->todpr = 0;
912 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
913 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
914 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
915 vcpu->arch.guest_fpregs.fpc = 0;
916 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
917 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100918 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200919 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
920 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200921 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
922 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100923 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100924}
925
Dominik Dingel31928aa2014-12-04 15:47:07 +0100926void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200927{
Jason J. Herne72f25022014-11-25 09:46:02 -0500928 mutex_lock(&vcpu->kvm->lock);
929 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
930 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +0100931 if (!kvm_is_ucontrol(vcpu->kvm))
932 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200933}
934
Tony Krowiak5102ee82014-06-27 14:46:01 -0400935static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
936{
937 if (!test_vfacility(76))
938 return;
939
Tony Krowiaka374e892014-09-03 10:13:53 +0200940 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
941
942 if (vcpu->kvm->arch.crypto.aes_kw)
943 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
944 if (vcpu->kvm->arch.crypto.dea_kw)
945 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
946
Tony Krowiak5102ee82014-06-27 14:46:01 -0400947 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
948}
949
Dominik Dingelb31605c2014-03-25 13:47:11 +0100950void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
951{
952 free_page(vcpu->arch.sie_block->cbrlo);
953 vcpu->arch.sie_block->cbrlo = 0;
954}
955
956int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
957{
958 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
959 if (!vcpu->arch.sie_block->cbrlo)
960 return -ENOMEM;
961
962 vcpu->arch.sie_block->ecb2 |= 0x80;
963 vcpu->arch.sie_block->ecb2 &= ~0x08;
964 return 0;
965}
966
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100967int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
968{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100969 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200970
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100971 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
972 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200973 CPUSTAT_STOPPED |
974 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200975 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200976 if (test_vfacility(50) && test_vfacility(73))
977 vcpu->arch.sie_block->ecb |= 0x10;
978
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200979 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +0200980 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +0100981 if (sclp_has_siif())
982 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +0200983 if (sclp_has_sigpif())
984 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200985 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Matthew Rosato5a5e6532013-01-29 11:48:20 -0500986 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
987 ICTL_TPROT;
988
Dominik Dingelb31605c2014-03-25 13:47:11 +0100989 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
990 rc = kvm_s390_vcpu_setup_cmma(vcpu);
991 if (rc)
992 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200993 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +0100994 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +0200995 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100996 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100997 vcpu->arch.cpu_id.version = 0xff;
Tony Krowiak5102ee82014-06-27 14:46:01 -0400998
999 kvm_s390_vcpu_crypto_setup(vcpu);
1000
Dominik Dingelb31605c2014-03-25 13:47:11 +01001001 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001002}
1003
1004struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1005 unsigned int id)
1006{
Carsten Otte4d475552011-10-18 12:27:12 +02001007 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001008 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001009 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001010
Carsten Otte4d475552011-10-18 12:27:12 +02001011 if (id >= KVM_MAX_VCPUS)
1012 goto out;
1013
1014 rc = -ENOMEM;
1015
Michael Muellerb110fea2013-06-12 13:54:54 +02001016 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001017 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001018 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001019
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001020 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1021 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001022 goto out_free_cpu;
1023
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001024 vcpu->arch.sie_block = &sie_page->sie_block;
1025 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1026
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001027 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001028 if (!kvm_is_ucontrol(kvm)) {
1029 if (!kvm->arch.sca) {
1030 WARN_ON_ONCE(1);
1031 goto out_free_cpu;
1032 }
1033 if (!kvm->arch.sca->cpu[id].sda)
1034 kvm->arch.sca->cpu[id].sda =
1035 (__u64) vcpu->arch.sie_block;
1036 vcpu->arch.sie_block->scaoh =
1037 (__u32)(((__u64)kvm->arch.sca) >> 32);
1038 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1039 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1040 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001041
Carsten Otteba5c1e92008-03-25 18:47:26 +01001042 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001043 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001044 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001045 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001046
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001047 rc = kvm_vcpu_init(vcpu, kvm, id);
1048 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001049 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001050 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1051 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001052 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001053
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001054 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001055out_free_sie_block:
1056 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001057out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001058 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001059out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001060 return ERR_PTR(rc);
1061}
1062
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001063int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1064{
David Hildenbrand9a022062014-08-05 17:40:47 +02001065 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001066}
1067
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001068void s390_vcpu_block(struct kvm_vcpu *vcpu)
1069{
1070 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1071}
1072
1073void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1074{
1075 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1076}
1077
1078/*
1079 * Kick a guest cpu out of SIE and wait until SIE is not running.
1080 * If the CPU is not running (e.g. waiting as idle) the function will
1081 * return immediately. */
1082void exit_sie(struct kvm_vcpu *vcpu)
1083{
1084 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1085 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1086 cpu_relax();
1087}
1088
1089/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1090void exit_sie_sync(struct kvm_vcpu *vcpu)
1091{
1092 s390_vcpu_block(vcpu);
1093 exit_sie(vcpu);
1094}
1095
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001096static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1097{
1098 int i;
1099 struct kvm *kvm = gmap->private;
1100 struct kvm_vcpu *vcpu;
1101
1102 kvm_for_each_vcpu(i, vcpu, kvm) {
1103 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001104 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001105 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1106 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1107 exit_sie_sync(vcpu);
1108 }
1109 }
1110}
1111
Christoffer Dallb6d33832012-03-08 16:44:24 -05001112int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1113{
1114 /* kvm common code refers to this, but never calls it */
1115 BUG();
1116 return 0;
1117}
1118
Carsten Otte14eebd92012-05-15 14:15:26 +02001119static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1120 struct kvm_one_reg *reg)
1121{
1122 int r = -EINVAL;
1123
1124 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001125 case KVM_REG_S390_TODPR:
1126 r = put_user(vcpu->arch.sie_block->todpr,
1127 (u32 __user *)reg->addr);
1128 break;
1129 case KVM_REG_S390_EPOCHDIFF:
1130 r = put_user(vcpu->arch.sie_block->epoch,
1131 (u64 __user *)reg->addr);
1132 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001133 case KVM_REG_S390_CPU_TIMER:
1134 r = put_user(vcpu->arch.sie_block->cputm,
1135 (u64 __user *)reg->addr);
1136 break;
1137 case KVM_REG_S390_CLOCK_COMP:
1138 r = put_user(vcpu->arch.sie_block->ckc,
1139 (u64 __user *)reg->addr);
1140 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001141 case KVM_REG_S390_PFTOKEN:
1142 r = put_user(vcpu->arch.pfault_token,
1143 (u64 __user *)reg->addr);
1144 break;
1145 case KVM_REG_S390_PFCOMPARE:
1146 r = put_user(vcpu->arch.pfault_compare,
1147 (u64 __user *)reg->addr);
1148 break;
1149 case KVM_REG_S390_PFSELECT:
1150 r = put_user(vcpu->arch.pfault_select,
1151 (u64 __user *)reg->addr);
1152 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001153 case KVM_REG_S390_PP:
1154 r = put_user(vcpu->arch.sie_block->pp,
1155 (u64 __user *)reg->addr);
1156 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001157 case KVM_REG_S390_GBEA:
1158 r = put_user(vcpu->arch.sie_block->gbea,
1159 (u64 __user *)reg->addr);
1160 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001161 default:
1162 break;
1163 }
1164
1165 return r;
1166}
1167
1168static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1169 struct kvm_one_reg *reg)
1170{
1171 int r = -EINVAL;
1172
1173 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001174 case KVM_REG_S390_TODPR:
1175 r = get_user(vcpu->arch.sie_block->todpr,
1176 (u32 __user *)reg->addr);
1177 break;
1178 case KVM_REG_S390_EPOCHDIFF:
1179 r = get_user(vcpu->arch.sie_block->epoch,
1180 (u64 __user *)reg->addr);
1181 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001182 case KVM_REG_S390_CPU_TIMER:
1183 r = get_user(vcpu->arch.sie_block->cputm,
1184 (u64 __user *)reg->addr);
1185 break;
1186 case KVM_REG_S390_CLOCK_COMP:
1187 r = get_user(vcpu->arch.sie_block->ckc,
1188 (u64 __user *)reg->addr);
1189 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001190 case KVM_REG_S390_PFTOKEN:
1191 r = get_user(vcpu->arch.pfault_token,
1192 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001193 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1194 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001195 break;
1196 case KVM_REG_S390_PFCOMPARE:
1197 r = get_user(vcpu->arch.pfault_compare,
1198 (u64 __user *)reg->addr);
1199 break;
1200 case KVM_REG_S390_PFSELECT:
1201 r = get_user(vcpu->arch.pfault_select,
1202 (u64 __user *)reg->addr);
1203 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001204 case KVM_REG_S390_PP:
1205 r = get_user(vcpu->arch.sie_block->pp,
1206 (u64 __user *)reg->addr);
1207 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001208 case KVM_REG_S390_GBEA:
1209 r = get_user(vcpu->arch.sie_block->gbea,
1210 (u64 __user *)reg->addr);
1211 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001212 default:
1213 break;
1214 }
1215
1216 return r;
1217}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001218
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001219static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1220{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001221 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001222 return 0;
1223}
1224
1225int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1226{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001227 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001228 return 0;
1229}
1230
1231int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1232{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001233 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001234 return 0;
1235}
1236
1237int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1238 struct kvm_sregs *sregs)
1239{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001240 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001241 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001242 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001243 return 0;
1244}
1245
1246int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1247 struct kvm_sregs *sregs)
1248{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001249 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001250 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001251 return 0;
1252}
1253
1254int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1255{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001256 if (test_fp_ctl(fpu->fpc))
1257 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001258 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001259 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1260 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1261 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001262 return 0;
1263}
1264
1265int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1266{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001267 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1268 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001269 return 0;
1270}
1271
1272static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1273{
1274 int rc = 0;
1275
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001276 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001277 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001278 else {
1279 vcpu->run->psw_mask = psw.mask;
1280 vcpu->run->psw_addr = psw.addr;
1281 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001282 return rc;
1283}
1284
1285int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1286 struct kvm_translation *tr)
1287{
1288 return -EINVAL; /* not implemented yet */
1289}
1290
David Hildenbrand27291e22014-01-23 12:26:52 +01001291#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1292 KVM_GUESTDBG_USE_HW_BP | \
1293 KVM_GUESTDBG_ENABLE)
1294
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001295int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1296 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001297{
David Hildenbrand27291e22014-01-23 12:26:52 +01001298 int rc = 0;
1299
1300 vcpu->guest_debug = 0;
1301 kvm_s390_clear_bp_data(vcpu);
1302
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001303 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001304 return -EINVAL;
1305
1306 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1307 vcpu->guest_debug = dbg->control;
1308 /* enforce guest PER */
1309 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1310
1311 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1312 rc = kvm_s390_import_bp_data(vcpu, dbg);
1313 } else {
1314 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1315 vcpu->arch.guestdbg.last_bp = 0;
1316 }
1317
1318 if (rc) {
1319 vcpu->guest_debug = 0;
1320 kvm_s390_clear_bp_data(vcpu);
1321 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1322 }
1323
1324 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001325}
1326
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001327int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1328 struct kvm_mp_state *mp_state)
1329{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001330 /* CHECK_STOP and LOAD are not supported yet */
1331 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1332 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001333}
1334
1335int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1336 struct kvm_mp_state *mp_state)
1337{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001338 int rc = 0;
1339
1340 /* user space knows about this interface - let it control the state */
1341 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1342
1343 switch (mp_state->mp_state) {
1344 case KVM_MP_STATE_STOPPED:
1345 kvm_s390_vcpu_stop(vcpu);
1346 break;
1347 case KVM_MP_STATE_OPERATING:
1348 kvm_s390_vcpu_start(vcpu);
1349 break;
1350 case KVM_MP_STATE_LOAD:
1351 case KVM_MP_STATE_CHECK_STOP:
1352 /* fall through - CHECK_STOP and LOAD are not supported yet */
1353 default:
1354 rc = -ENXIO;
1355 }
1356
1357 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001358}
1359
Dominik Dingelb31605c2014-03-25 13:47:11 +01001360bool kvm_s390_cmma_enabled(struct kvm *kvm)
1361{
1362 if (!MACHINE_IS_LPAR)
1363 return false;
1364 /* only enable for z10 and later */
1365 if (!MACHINE_HAS_EDAT1)
1366 return false;
1367 if (!kvm->arch.use_cmma)
1368 return false;
1369 return true;
1370}
1371
David Hildenbrand8ad35752014-03-14 11:00:21 +01001372static bool ibs_enabled(struct kvm_vcpu *vcpu)
1373{
1374 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1375}
1376
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001377static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1378{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001379retry:
1380 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001381 /*
1382 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1383 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1384 * This ensures that the ipte instruction for this request has
1385 * already finished. We might race against a second unmapper that
1386 * wants to set the blocking bit. Lets just retry the request loop.
1387 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001388 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001389 int rc;
1390 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001391 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001392 PAGE_SIZE * 2);
1393 if (rc)
1394 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001395 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001396 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001397
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001398 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1399 vcpu->arch.sie_block->ihcpu = 0xffff;
1400 goto retry;
1401 }
1402
David Hildenbrand8ad35752014-03-14 11:00:21 +01001403 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1404 if (!ibs_enabled(vcpu)) {
1405 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1406 atomic_set_mask(CPUSTAT_IBS,
1407 &vcpu->arch.sie_block->cpuflags);
1408 }
1409 goto retry;
1410 }
1411
1412 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1413 if (ibs_enabled(vcpu)) {
1414 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1415 atomic_clear_mask(CPUSTAT_IBS,
1416 &vcpu->arch.sie_block->cpuflags);
1417 }
1418 goto retry;
1419 }
1420
David Hildenbrand0759d062014-05-13 16:54:32 +02001421 /* nothing to do, just clear the request */
1422 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1423
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001424 return 0;
1425}
1426
Thomas Huthfa576c52014-05-06 17:20:16 +02001427/**
1428 * kvm_arch_fault_in_page - fault-in guest page if necessary
1429 * @vcpu: The corresponding virtual cpu
1430 * @gpa: Guest physical address
1431 * @writable: Whether the page should be writable or not
1432 *
1433 * Make sure that a guest page has been faulted-in on the host.
1434 *
1435 * Return: Zero on success, negative error code otherwise.
1436 */
1437long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001438{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001439 return gmap_fault(vcpu->arch.gmap, gpa,
1440 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001441}
1442
Dominik Dingel3c038e62013-10-07 17:11:48 +02001443static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1444 unsigned long token)
1445{
1446 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001447 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001448
1449 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001450 irq.u.ext.ext_params2 = token;
1451 irq.type = KVM_S390_INT_PFAULT_INIT;
1452 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001453 } else {
1454 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001455 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001456 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1457 }
1458}
1459
1460void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1461 struct kvm_async_pf *work)
1462{
1463 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1464 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1465}
1466
1467void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1468 struct kvm_async_pf *work)
1469{
1470 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1471 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1472}
1473
1474void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1475 struct kvm_async_pf *work)
1476{
1477 /* s390 will always inject the page directly */
1478}
1479
1480bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1481{
1482 /*
1483 * s390 will always inject the page directly,
1484 * but we still want check_async_completion to cleanup
1485 */
1486 return true;
1487}
1488
1489static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1490{
1491 hva_t hva;
1492 struct kvm_arch_async_pf arch;
1493 int rc;
1494
1495 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1496 return 0;
1497 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1498 vcpu->arch.pfault_compare)
1499 return 0;
1500 if (psw_extint_disabled(vcpu))
1501 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001502 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001503 return 0;
1504 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1505 return 0;
1506 if (!vcpu->arch.gmap->pfault_enabled)
1507 return 0;
1508
Heiko Carstens81480cc2014-01-01 16:36:07 +01001509 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1510 hva += current->thread.gmap_addr & ~PAGE_MASK;
1511 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001512 return 0;
1513
1514 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1515 return rc;
1516}
1517
Thomas Huth3fb4c402013-09-12 10:33:43 +02001518static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001519{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001520 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001521
Dominik Dingel3c038e62013-10-07 17:11:48 +02001522 /*
1523 * On s390 notifications for arriving pages will be delivered directly
1524 * to the guest but the house keeping for completed pfaults is
1525 * handled outside the worker.
1526 */
1527 kvm_check_async_pf_completion(vcpu);
1528
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001529 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001530
1531 if (need_resched())
1532 schedule();
1533
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001534 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001535 s390_handle_mcck();
1536
Jens Freimann79395032014-04-17 10:10:30 +02001537 if (!kvm_is_ucontrol(vcpu->kvm)) {
1538 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1539 if (rc)
1540 return rc;
1541 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001542
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001543 rc = kvm_s390_handle_requests(vcpu);
1544 if (rc)
1545 return rc;
1546
David Hildenbrand27291e22014-01-23 12:26:52 +01001547 if (guestdbg_enabled(vcpu)) {
1548 kvm_s390_backup_guest_per_regs(vcpu);
1549 kvm_s390_patch_guest_per_regs(vcpu);
1550 }
1551
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001552 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001553 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1554 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1555 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001556
Thomas Huth3fb4c402013-09-12 10:33:43 +02001557 return 0;
1558}
1559
1560static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1561{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001562 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001563
1564 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1565 vcpu->arch.sie_block->icptcode);
1566 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1567
David Hildenbrand27291e22014-01-23 12:26:52 +01001568 if (guestdbg_enabled(vcpu))
1569 kvm_s390_restore_guest_per_regs(vcpu);
1570
Thomas Huth3fb4c402013-09-12 10:33:43 +02001571 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001572 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001573 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1574 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1575 vcpu->run->s390_ucontrol.trans_exc_code =
1576 current->thread.gmap_addr;
1577 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1578 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001579
1580 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001581 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001582 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001583 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001584 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001585 } else {
1586 gpa_t gpa = current->thread.gmap_addr;
1587 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1588 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001589 }
1590
1591 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001592 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1593 trace_kvm_s390_sie_fault(vcpu);
1594 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001595 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001596
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001597 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001598
Thomas Hutha76ccff2013-09-12 10:33:44 +02001599 if (rc == 0) {
1600 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001601 /* Don't exit for host interrupts. */
1602 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001603 else
1604 rc = kvm_handle_sie_intercept(vcpu);
1605 }
1606
Thomas Huth3fb4c402013-09-12 10:33:43 +02001607 return rc;
1608}
1609
1610static int __vcpu_run(struct kvm_vcpu *vcpu)
1611{
1612 int rc, exit_reason;
1613
Thomas Huth800c1062013-09-12 10:33:45 +02001614 /*
1615 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1616 * ning the guest), so that memslots (and other stuff) are protected
1617 */
1618 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1619
Thomas Hutha76ccff2013-09-12 10:33:44 +02001620 do {
1621 rc = vcpu_pre_run(vcpu);
1622 if (rc)
1623 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001624
Thomas Huth800c1062013-09-12 10:33:45 +02001625 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001626 /*
1627 * As PF_VCPU will be used in fault handler, between
1628 * guest_enter and guest_exit should be no uaccess.
1629 */
1630 preempt_disable();
1631 kvm_guest_enter();
1632 preempt_enable();
1633 exit_reason = sie64a(vcpu->arch.sie_block,
1634 vcpu->run->s.regs.gprs);
1635 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001636 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001637
Thomas Hutha76ccff2013-09-12 10:33:44 +02001638 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001639 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001640
Thomas Huth800c1062013-09-12 10:33:45 +02001641 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001642 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001643}
1644
David Hildenbrandb028ee32014-07-17 10:47:43 +02001645static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1646{
1647 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1648 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1649 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1650 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1651 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1652 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001653 /* some control register changes require a tlb flush */
1654 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001655 }
1656 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1657 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1658 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1659 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1660 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1661 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1662 }
1663 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1664 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1665 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1666 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001667 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1668 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001669 }
1670 kvm_run->kvm_dirty_regs = 0;
1671}
1672
1673static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1674{
1675 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1676 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1677 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1678 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1679 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1680 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1681 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1682 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1683 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1684 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1685 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1686 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1687}
1688
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001689int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1690{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001691 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001692 sigset_t sigsaved;
1693
David Hildenbrand27291e22014-01-23 12:26:52 +01001694 if (guestdbg_exit_pending(vcpu)) {
1695 kvm_s390_prepare_debug_exit(vcpu);
1696 return 0;
1697 }
1698
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001699 if (vcpu->sigset_active)
1700 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1701
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001702 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1703 kvm_s390_vcpu_start(vcpu);
1704 } else if (is_vcpu_stopped(vcpu)) {
1705 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1706 vcpu->vcpu_id);
1707 return -EINVAL;
1708 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001709
David Hildenbrandb028ee32014-07-17 10:47:43 +02001710 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001711
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001712 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001713 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001714
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001715 if (signal_pending(current) && !rc) {
1716 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001717 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001718 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001719
David Hildenbrand27291e22014-01-23 12:26:52 +01001720 if (guestdbg_exit_pending(vcpu) && !rc) {
1721 kvm_s390_prepare_debug_exit(vcpu);
1722 rc = 0;
1723 }
1724
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001725 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001726 /* intercept cannot be handled in-kernel, prepare kvm-run */
1727 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1728 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001729 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1730 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1731 rc = 0;
1732 }
1733
1734 if (rc == -EREMOTE) {
1735 /* intercept was handled, but userspace support is needed
1736 * kvm_run has been prepared by the handler */
1737 rc = 0;
1738 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001739
David Hildenbrandb028ee32014-07-17 10:47:43 +02001740 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001741
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001742 if (vcpu->sigset_active)
1743 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1744
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001745 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001746 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001747}
1748
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001749/*
1750 * store status at address
1751 * we use have two special cases:
1752 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1753 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1754 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001755int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001756{
Carsten Otte092670c2011-07-24 10:48:22 +02001757 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001758 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001759 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001760 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001761
Heiko Carstensd0bce602014-01-01 16:45:58 +01001762 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1763 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001764 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001765 gpa = SAVE_AREA_BASE;
1766 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1767 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001768 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001769 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1770 }
1771 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1772 vcpu->arch.guest_fpregs.fprs, 128);
1773 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1774 vcpu->run->s.regs.gprs, 128);
1775 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1776 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001777 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001778 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001779 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001780 rc |= write_guest_abs(vcpu,
1781 gpa + offsetof(struct save_area, fp_ctrl_reg),
1782 &vcpu->arch.guest_fpregs.fpc, 4);
1783 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1784 &vcpu->arch.sie_block->todpr, 4);
1785 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1786 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001787 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001788 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1789 &clkcomp, 8);
1790 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1791 &vcpu->run->s.regs.acrs, 64);
1792 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1793 &vcpu->arch.sie_block->gcr, 128);
1794 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001795}
1796
Thomas Huthe8798922013-11-06 15:46:33 +01001797int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1798{
1799 /*
1800 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1801 * copying in vcpu load/put. Lets update our copies before we save
1802 * it into the save area
1803 */
1804 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1805 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1806 save_access_regs(vcpu->run->s.regs.acrs);
1807
1808 return kvm_s390_store_status_unloaded(vcpu, addr);
1809}
1810
David Hildenbrand8ad35752014-03-14 11:00:21 +01001811static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1812{
1813 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1814 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1815 exit_sie_sync(vcpu);
1816}
1817
1818static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1819{
1820 unsigned int i;
1821 struct kvm_vcpu *vcpu;
1822
1823 kvm_for_each_vcpu(i, vcpu, kvm) {
1824 __disable_ibs_on_vcpu(vcpu);
1825 }
1826}
1827
1828static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1829{
1830 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1831 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1832 exit_sie_sync(vcpu);
1833}
1834
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001835void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1836{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001837 int i, online_vcpus, started_vcpus = 0;
1838
1839 if (!is_vcpu_stopped(vcpu))
1840 return;
1841
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001842 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001843 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001844 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001845 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1846
1847 for (i = 0; i < online_vcpus; i++) {
1848 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1849 started_vcpus++;
1850 }
1851
1852 if (started_vcpus == 0) {
1853 /* we're the only active VCPU -> speed it up */
1854 __enable_ibs_on_vcpu(vcpu);
1855 } else if (started_vcpus == 1) {
1856 /*
1857 * As we are starting a second VCPU, we have to disable
1858 * the IBS facility on all VCPUs to remove potentially
1859 * oustanding ENABLE requests.
1860 */
1861 __disable_ibs_on_all_vcpus(vcpu->kvm);
1862 }
1863
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001864 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001865 /*
1866 * Another VCPU might have used IBS while we were offline.
1867 * Let's play safe and flush the VCPU at startup.
1868 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001869 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001870 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001871 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001872}
1873
1874void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1875{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001876 int i, online_vcpus, started_vcpus = 0;
1877 struct kvm_vcpu *started_vcpu = NULL;
1878
1879 if (is_vcpu_stopped(vcpu))
1880 return;
1881
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001882 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001883 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001884 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001885 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1886
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001887 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02001888 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001889
David Hildenbrand6cddd432014-10-15 16:48:53 +02001890 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001891 __disable_ibs_on_vcpu(vcpu);
1892
1893 for (i = 0; i < online_vcpus; i++) {
1894 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1895 started_vcpus++;
1896 started_vcpu = vcpu->kvm->vcpus[i];
1897 }
1898 }
1899
1900 if (started_vcpus == 1) {
1901 /*
1902 * As we only have one VCPU left, we want to enable the
1903 * IBS facility for that VCPU to speed it up.
1904 */
1905 __enable_ibs_on_vcpu(started_vcpu);
1906 }
1907
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001908 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001909 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001910}
1911
Cornelia Huckd6712df2012-12-20 15:32:11 +01001912static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1913 struct kvm_enable_cap *cap)
1914{
1915 int r;
1916
1917 if (cap->flags)
1918 return -EINVAL;
1919
1920 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001921 case KVM_CAP_S390_CSS_SUPPORT:
1922 if (!vcpu->kvm->arch.css_support) {
1923 vcpu->kvm->arch.css_support = 1;
1924 trace_kvm_s390_enable_css(vcpu->kvm);
1925 }
1926 r = 0;
1927 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001928 default:
1929 r = -EINVAL;
1930 break;
1931 }
1932 return r;
1933}
1934
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001935long kvm_arch_vcpu_ioctl(struct file *filp,
1936 unsigned int ioctl, unsigned long arg)
1937{
1938 struct kvm_vcpu *vcpu = filp->private_data;
1939 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001940 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001941 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001942
Avi Kivity937366242010-05-13 12:35:17 +03001943 switch (ioctl) {
1944 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001945 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001946 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001947
Avi Kivity937366242010-05-13 12:35:17 +03001948 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001949 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03001950 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02001951 if (s390int_to_s390irq(&s390int, &s390irq))
1952 return -EINVAL;
1953 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03001954 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001955 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001956 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001957 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001958 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001959 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001960 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001961 case KVM_S390_SET_INITIAL_PSW: {
1962 psw_t psw;
1963
Avi Kivitybc923cc2010-05-13 12:21:46 +03001964 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001965 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001966 break;
1967 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1968 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001969 }
1970 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001971 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1972 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001973 case KVM_SET_ONE_REG:
1974 case KVM_GET_ONE_REG: {
1975 struct kvm_one_reg reg;
1976 r = -EFAULT;
1977 if (copy_from_user(&reg, argp, sizeof(reg)))
1978 break;
1979 if (ioctl == KVM_SET_ONE_REG)
1980 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1981 else
1982 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1983 break;
1984 }
Carsten Otte27e03932012-01-04 10:25:21 +01001985#ifdef CONFIG_KVM_S390_UCONTROL
1986 case KVM_S390_UCAS_MAP: {
1987 struct kvm_s390_ucas_mapping ucasmap;
1988
1989 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1990 r = -EFAULT;
1991 break;
1992 }
1993
1994 if (!kvm_is_ucontrol(vcpu->kvm)) {
1995 r = -EINVAL;
1996 break;
1997 }
1998
1999 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2000 ucasmap.vcpu_addr, ucasmap.length);
2001 break;
2002 }
2003 case KVM_S390_UCAS_UNMAP: {
2004 struct kvm_s390_ucas_mapping ucasmap;
2005
2006 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2007 r = -EFAULT;
2008 break;
2009 }
2010
2011 if (!kvm_is_ucontrol(vcpu->kvm)) {
2012 r = -EINVAL;
2013 break;
2014 }
2015
2016 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2017 ucasmap.length);
2018 break;
2019 }
2020#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002021 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002022 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002023 break;
2024 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002025 case KVM_ENABLE_CAP:
2026 {
2027 struct kvm_enable_cap cap;
2028 r = -EFAULT;
2029 if (copy_from_user(&cap, argp, sizeof(cap)))
2030 break;
2031 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2032 break;
2033 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002034 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002035 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002036 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002037 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002038}
2039
Carsten Otte5b1c1492012-01-04 10:25:23 +01002040int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2041{
2042#ifdef CONFIG_KVM_S390_UCONTROL
2043 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2044 && (kvm_is_ucontrol(vcpu->kvm))) {
2045 vmf->page = virt_to_page(vcpu->arch.sie_block);
2046 get_page(vmf->page);
2047 return 0;
2048 }
2049#endif
2050 return VM_FAULT_SIGBUS;
2051}
2052
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302053int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2054 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002055{
2056 return 0;
2057}
2058
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002059/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002060int kvm_arch_prepare_memory_region(struct kvm *kvm,
2061 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002062 struct kvm_userspace_memory_region *mem,
2063 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002064{
Nick Wangdd2887e2013-03-25 17:22:57 +01002065 /* A few sanity checks. We can have memory slots which have to be
2066 located/ended at a segment boundary (1MB). The memory in userland is
2067 ok to be fragmented into various different vmas. It is okay to mmap()
2068 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002069
Carsten Otte598841c2011-07-24 10:48:21 +02002070 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002071 return -EINVAL;
2072
Carsten Otte598841c2011-07-24 10:48:21 +02002073 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002074 return -EINVAL;
2075
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002076 return 0;
2077}
2078
2079void kvm_arch_commit_memory_region(struct kvm *kvm,
2080 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002081 const struct kvm_memory_slot *old,
2082 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002083{
Carsten Ottef7850c92011-07-24 10:48:23 +02002084 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002085
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002086 /* If the basics of the memslot do not change, we do not want
2087 * to update the gmap. Every update causes several unnecessary
2088 * segment translation exceptions. This is usually handled just
2089 * fine by the normal fault handler + gmap, but it will also
2090 * cause faults on the prefix page of running guest CPUs.
2091 */
2092 if (old->userspace_addr == mem->userspace_addr &&
2093 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2094 old->npages * PAGE_SIZE == mem->memory_size)
2095 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002096
2097 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2098 mem->guest_phys_addr, mem->memory_size);
2099 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02002100 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002101 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002102}
2103
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002104static int __init kvm_s390_init(void)
2105{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002106 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03002107 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002108 if (ret)
2109 return ret;
2110
2111 /*
2112 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002113 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002114 * only set facilities that are known to work in KVM.
2115 */
Michael Mueller78c4b59f2013-07-26 15:04:04 +02002116 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
2117 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002118 kvm_exit();
2119 return -ENOMEM;
2120 }
Michael Mueller78c4b59f2013-07-26 15:04:04 +02002121 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Christian Borntraegerc23f3972015-01-29 14:09:54 +01002122 vfacilities[0] &= 0xff82fffbf4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002123 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002124 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002125}
2126
2127static void __exit kvm_s390_exit(void)
2128{
Michael Mueller78c4b59f2013-07-26 15:04:04 +02002129 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002130 kvm_exit();
2131}
2132
2133module_init(kvm_s390_init);
2134module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002135
2136/*
2137 * Enable autoloading of the kvm module.
2138 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2139 * since x86 takes a different approach.
2140 */
2141#include <linux/miscdevice.h>
2142MODULE_ALIAS_MISCDEV(KVM_MINOR);
2143MODULE_ALIAS("devname:kvm");