blob: 56a411c0245a7db1be05034e8e1d82e127aff5e2 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010027#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <asm/lowcore.h>
29#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010030#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010031#include <asm/switch_to.h>
Michael Mueller78c4b592013-07-26 15:04:04 +020032#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020033#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include "gaccess.h"
36
Cornelia Huck5786fff2012-07-23 17:20:29 +020037#define CREATE_TRACE_POINTS
38#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020039#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020040
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020045 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010050 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020053 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010054 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010055 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010057 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020058 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020066 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010067 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010072 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010073 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020075 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010076 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020078 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010079 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010080 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020081 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010082 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010087 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010088 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020089 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010090 { NULL }
91};
92
Michael Mueller78c4b592013-07-26 15:04:04 +020093unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020094static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010095
Michael Mueller78c4b592013-07-26 15:04:04 +020096/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +010097int test_vfacility(unsigned long nr)
Michael Mueller78c4b592013-07-26 15:04:04 +020098{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100102/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200103int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100104{
105 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200106 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100107}
108
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200109static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
110
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100111int kvm_arch_hardware_setup(void)
112{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200113 gmap_notifier.notifier_call = kvm_gmap_notifier;
114 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100115 return 0;
116}
117
118void kvm_arch_hardware_unsetup(void)
119{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200120 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121}
122
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123int kvm_arch_init(void *opaque)
124{
Cornelia Huck84877d92014-09-02 10:27:35 +0100125 /* Register floating interrupt controller interface. */
126 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100127}
128
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100129/* Section: device related */
130long kvm_arch_dev_ioctl(struct file *filp,
131 unsigned int ioctl, unsigned long arg)
132{
133 if (ioctl == KVM_S390_ENABLE_SIE)
134 return s390_enable_sie();
135 return -EINVAL;
136}
137
Alexander Graf784aa3d2014-07-14 18:27:35 +0200138int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100139{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100140 int r;
141
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200142 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100143 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200144 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100145 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100146#ifdef CONFIG_KVM_S390_UCONTROL
147 case KVM_CAP_S390_UCONTROL:
148#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200149 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100150 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200151 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100152 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100153 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200154 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100155 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200156 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200157 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200158 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200159 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200160 case KVM_CAP_MP_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100161 r = 1;
162 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200163 case KVM_CAP_NR_VCPUS:
164 case KVM_CAP_MAX_VCPUS:
165 r = KVM_MAX_VCPUS;
166 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100167 case KVM_CAP_NR_MEMSLOTS:
168 r = KVM_USER_MEM_SLOTS;
169 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200170 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100171 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200172 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200173 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100174 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200175 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100176 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100177}
178
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400179static void kvm_s390_sync_dirty_log(struct kvm *kvm,
180 struct kvm_memory_slot *memslot)
181{
182 gfn_t cur_gfn, last_gfn;
183 unsigned long address;
184 struct gmap *gmap = kvm->arch.gmap;
185
186 down_read(&gmap->mm->mmap_sem);
187 /* Loop over all guest pages */
188 last_gfn = memslot->base_gfn + memslot->npages;
189 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
190 address = gfn_to_hva_memslot(memslot, cur_gfn);
191
192 if (gmap_test_and_clear_dirty(address, gmap))
193 mark_page_dirty(kvm, cur_gfn);
194 }
195 up_read(&gmap->mm->mmap_sem);
196}
197
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100198/* Section: vm related */
199/*
200 * Get (and clear) the dirty memory log for a memory slot.
201 */
202int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
203 struct kvm_dirty_log *log)
204{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400205 int r;
206 unsigned long n;
207 struct kvm_memory_slot *memslot;
208 int is_dirty = 0;
209
210 mutex_lock(&kvm->slots_lock);
211
212 r = -EINVAL;
213 if (log->slot >= KVM_USER_MEM_SLOTS)
214 goto out;
215
216 memslot = id_to_memslot(kvm->memslots, log->slot);
217 r = -ENOENT;
218 if (!memslot->dirty_bitmap)
219 goto out;
220
221 kvm_s390_sync_dirty_log(kvm, memslot);
222 r = kvm_get_dirty_log(kvm, log, &is_dirty);
223 if (r)
224 goto out;
225
226 /* Clear the dirty log */
227 if (is_dirty) {
228 n = kvm_dirty_bitmap_bytes(memslot);
229 memset(memslot->dirty_bitmap, 0, n);
230 }
231 r = 0;
232out:
233 mutex_unlock(&kvm->slots_lock);
234 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100235}
236
Cornelia Huckd938dc52013-10-23 18:26:34 +0200237static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
238{
239 int r;
240
241 if (cap->flags)
242 return -EINVAL;
243
244 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200245 case KVM_CAP_S390_IRQCHIP:
246 kvm->arch.use_irqchip = 1;
247 r = 0;
248 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200249 default:
250 r = -EINVAL;
251 break;
252 }
253 return r;
254}
255
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200256static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
257{
258 int ret;
259 unsigned int idx;
260 switch (attr->attr) {
261 case KVM_S390_VM_MEM_ENABLE_CMMA:
262 ret = -EBUSY;
263 mutex_lock(&kvm->lock);
264 if (atomic_read(&kvm->online_vcpus) == 0) {
265 kvm->arch.use_cmma = 1;
266 ret = 0;
267 }
268 mutex_unlock(&kvm->lock);
269 break;
270 case KVM_S390_VM_MEM_CLR_CMMA:
271 mutex_lock(&kvm->lock);
272 idx = srcu_read_lock(&kvm->srcu);
273 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
274 srcu_read_unlock(&kvm->srcu, idx);
275 mutex_unlock(&kvm->lock);
276 ret = 0;
277 break;
278 default:
279 ret = -ENXIO;
280 break;
281 }
282 return ret;
283}
284
Dominik Dingelf2061652014-04-09 13:13:00 +0200285static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
286{
287 int ret;
288
289 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200290 case KVM_S390_VM_MEM_CTRL:
291 ret = kvm_s390_mem_control(kvm, attr);
292 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200293 default:
294 ret = -ENXIO;
295 break;
296 }
297
298 return ret;
299}
300
301static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
302{
303 return -ENXIO;
304}
305
306static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
307{
308 int ret;
309
310 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200311 case KVM_S390_VM_MEM_CTRL:
312 switch (attr->attr) {
313 case KVM_S390_VM_MEM_ENABLE_CMMA:
314 case KVM_S390_VM_MEM_CLR_CMMA:
315 ret = 0;
316 break;
317 default:
318 ret = -ENXIO;
319 break;
320 }
321 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200322 default:
323 ret = -ENXIO;
324 break;
325 }
326
327 return ret;
328}
329
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330long kvm_arch_vm_ioctl(struct file *filp,
331 unsigned int ioctl, unsigned long arg)
332{
333 struct kvm *kvm = filp->private_data;
334 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200335 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100336 int r;
337
338 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100339 case KVM_S390_INTERRUPT: {
340 struct kvm_s390_interrupt s390int;
341
342 r = -EFAULT;
343 if (copy_from_user(&s390int, argp, sizeof(s390int)))
344 break;
345 r = kvm_s390_inject_vm(kvm, &s390int);
346 break;
347 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200348 case KVM_ENABLE_CAP: {
349 struct kvm_enable_cap cap;
350 r = -EFAULT;
351 if (copy_from_user(&cap, argp, sizeof(cap)))
352 break;
353 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
354 break;
355 }
Cornelia Huck84223592013-07-15 13:36:01 +0200356 case KVM_CREATE_IRQCHIP: {
357 struct kvm_irq_routing_entry routing;
358
359 r = -EINVAL;
360 if (kvm->arch.use_irqchip) {
361 /* Set up dummy routing. */
362 memset(&routing, 0, sizeof(routing));
363 kvm_set_irq_routing(kvm, &routing, 0, 0);
364 r = 0;
365 }
366 break;
367 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200368 case KVM_SET_DEVICE_ATTR: {
369 r = -EFAULT;
370 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
371 break;
372 r = kvm_s390_vm_set_attr(kvm, &attr);
373 break;
374 }
375 case KVM_GET_DEVICE_ATTR: {
376 r = -EFAULT;
377 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
378 break;
379 r = kvm_s390_vm_get_attr(kvm, &attr);
380 break;
381 }
382 case KVM_HAS_DEVICE_ATTR: {
383 r = -EFAULT;
384 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
385 break;
386 r = kvm_s390_vm_has_attr(kvm, &attr);
387 break;
388 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100389 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300390 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100391 }
392
393 return r;
394}
395
Tony Krowiak5102ee82014-06-27 14:46:01 -0400396static int kvm_s390_crypto_init(struct kvm *kvm)
397{
398 if (!test_vfacility(76))
399 return 0;
400
401 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
402 GFP_KERNEL | GFP_DMA);
403 if (!kvm->arch.crypto.crycb)
404 return -ENOMEM;
405
406 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
407 CRYCB_FORMAT1;
408
409 return 0;
410}
411
Carsten Ottee08b9632012-01-04 10:25:20 +0100412int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100413{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100414 int rc;
415 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100416 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100417
Carsten Ottee08b9632012-01-04 10:25:20 +0100418 rc = -EINVAL;
419#ifdef CONFIG_KVM_S390_UCONTROL
420 if (type & ~KVM_VM_S390_UCONTROL)
421 goto out_err;
422 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
423 goto out_err;
424#else
425 if (type)
426 goto out_err;
427#endif
428
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100429 rc = s390_enable_sie();
430 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100431 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100432
Carsten Otteb2904112011-10-18 12:27:13 +0200433 rc = -ENOMEM;
434
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100435 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
436 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100437 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100438 spin_lock(&kvm_lock);
439 sca_offset = (sca_offset + 16) & 0x7f0;
440 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
441 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100442
443 sprintf(debug_name, "kvm-%u", current->pid);
444
445 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
446 if (!kvm->arch.dbf)
447 goto out_nodbf;
448
Tony Krowiak5102ee82014-06-27 14:46:01 -0400449 if (kvm_s390_crypto_init(kvm) < 0)
450 goto out_crypto;
451
Carsten Otteba5c1e92008-03-25 18:47:26 +0100452 spin_lock_init(&kvm->arch.float_int.lock);
453 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100454 init_waitqueue_head(&kvm->arch.ipte_wq);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100455
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100456 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
457 VM_EVENT(kvm, 3, "%s", "vm created");
458
Carsten Ottee08b9632012-01-04 10:25:20 +0100459 if (type & KVM_VM_S390_UCONTROL) {
460 kvm->arch.gmap = NULL;
461 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +0200462 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +0100463 if (!kvm->arch.gmap)
464 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200465 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200466 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100467 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100468
469 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200470 kvm->arch.use_irqchip = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100471
David Hildenbrand8ad35752014-03-14 11:00:21 +0100472 spin_lock_init(&kvm->arch.start_stop_lock);
473
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100474 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200475out_nogmap:
Tony Krowiak5102ee82014-06-27 14:46:01 -0400476 kfree(kvm->arch.crypto.crycb);
477out_crypto:
Carsten Otte598841c2011-07-24 10:48:21 +0200478 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100479out_nodbf:
480 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100481out_err:
482 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100483}
484
Christian Borntraegerd329c032008-11-26 14:50:27 +0100485void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
486{
487 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200488 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100489 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200490 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100491 if (!kvm_is_ucontrol(vcpu->kvm)) {
492 clear_bit(63 - vcpu->vcpu_id,
493 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
494 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
495 (__u64) vcpu->arch.sie_block)
496 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
497 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200498 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100499
500 if (kvm_is_ucontrol(vcpu->kvm))
501 gmap_free(vcpu->arch.gmap);
502
Dominik Dingelb31605c2014-03-25 13:47:11 +0100503 if (kvm_s390_cmma_enabled(vcpu->kvm))
504 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100505 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200506
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100507 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200508 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100509}
510
511static void kvm_free_vcpus(struct kvm *kvm)
512{
513 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300514 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100515
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300516 kvm_for_each_vcpu(i, vcpu, kvm)
517 kvm_arch_vcpu_destroy(vcpu);
518
519 mutex_lock(&kvm->lock);
520 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
521 kvm->vcpus[i] = NULL;
522
523 atomic_set(&kvm->online_vcpus, 0);
524 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100525}
526
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100527void kvm_arch_destroy_vm(struct kvm *kvm)
528{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100529 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100530 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100531 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400532 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +0100533 if (!kvm_is_ucontrol(kvm))
534 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200535 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100536 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100537}
538
539/* Section: vcpu related */
540int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
541{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200542 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
543 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100544 if (kvm_is_ucontrol(vcpu->kvm)) {
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200545 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
Carsten Otte27e03932012-01-04 10:25:21 +0100546 if (!vcpu->arch.gmap)
547 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200548 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100549 return 0;
550 }
551
Carsten Otte598841c2011-07-24 10:48:21 +0200552 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100553 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
554 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100555 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +0200556 KVM_SYNC_CRS |
557 KVM_SYNC_ARCH0 |
558 KVM_SYNC_PFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100559 return 0;
560}
561
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100562void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
563{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200564 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
565 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100566 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200567 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
568 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100569 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200570 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100571 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100572}
573
574void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
575{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100576 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200577 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200578 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
579 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100580 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200581 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
582 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100583 restore_access_regs(vcpu->arch.host_acrs);
584}
585
586static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
587{
588 /* this equals initial cpu reset in pop, but we don't switch to ESA */
589 vcpu->arch.sie_block->gpsw.mask = 0UL;
590 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100591 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100592 vcpu->arch.sie_block->cputm = 0UL;
593 vcpu->arch.sie_block->ckc = 0UL;
594 vcpu->arch.sie_block->todpr = 0;
595 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
596 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
597 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
598 vcpu->arch.guest_fpregs.fpc = 0;
599 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
600 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100601 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200602 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
603 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200604 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
605 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100606 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100607}
608
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200609int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
610{
611 return 0;
612}
613
Tony Krowiak5102ee82014-06-27 14:46:01 -0400614static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
615{
616 if (!test_vfacility(76))
617 return;
618
619 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
620}
621
Dominik Dingelb31605c2014-03-25 13:47:11 +0100622void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
623{
624 free_page(vcpu->arch.sie_block->cbrlo);
625 vcpu->arch.sie_block->cbrlo = 0;
626}
627
628int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
629{
630 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
631 if (!vcpu->arch.sie_block->cbrlo)
632 return -ENOMEM;
633
634 vcpu->arch.sie_block->ecb2 |= 0x80;
635 vcpu->arch.sie_block->ecb2 &= ~0x08;
636 return 0;
637}
638
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100639int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
640{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100641 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200642
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100643 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
644 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200645 CPUSTAT_STOPPED |
646 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200647 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200648 if (test_vfacility(50) && test_vfacility(73))
649 vcpu->arch.sie_block->ecb |= 0x10;
650
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200651 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrand49539192014-02-21 08:59:59 +0100652 vcpu->arch.sie_block->eca = 0xD1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +0100653 if (sclp_has_siif())
654 vcpu->arch.sie_block->eca |= 1;
Michael Mueller78c4b592013-07-26 15:04:04 +0200655 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Matthew Rosato5a5e6532013-01-29 11:48:20 -0500656 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
657 ICTL_TPROT;
658
Dominik Dingelb31605c2014-03-25 13:47:11 +0100659 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
660 rc = kvm_s390_vcpu_setup_cmma(vcpu);
661 if (rc)
662 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200663 }
Christian Borntraegerca872302009-05-12 17:21:49 +0200664 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
Christian Borntraegerca872302009-05-12 17:21:49 +0200665 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100666 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100667 vcpu->arch.cpu_id.version = 0xff;
Tony Krowiak5102ee82014-06-27 14:46:01 -0400668
669 kvm_s390_vcpu_crypto_setup(vcpu);
670
Dominik Dingelb31605c2014-03-25 13:47:11 +0100671 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100672}
673
674struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
675 unsigned int id)
676{
Carsten Otte4d475552011-10-18 12:27:12 +0200677 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200678 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200679 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100680
Carsten Otte4d475552011-10-18 12:27:12 +0200681 if (id >= KVM_MAX_VCPUS)
682 goto out;
683
684 rc = -ENOMEM;
685
Michael Muellerb110fea2013-06-12 13:54:54 +0200686 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100687 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200688 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100689
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200690 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
691 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100692 goto out_free_cpu;
693
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200694 vcpu->arch.sie_block = &sie_page->sie_block;
695 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
696
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100697 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100698 if (!kvm_is_ucontrol(kvm)) {
699 if (!kvm->arch.sca) {
700 WARN_ON_ONCE(1);
701 goto out_free_cpu;
702 }
703 if (!kvm->arch.sca->cpu[id].sda)
704 kvm->arch.sca->cpu[id].sda =
705 (__u64) vcpu->arch.sie_block;
706 vcpu->arch.sie_block->scaoh =
707 (__u32)(((__u64)kvm->arch.sca) >> 32);
708 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
709 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
710 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100711
Carsten Otteba5c1e92008-03-25 18:47:26 +0100712 spin_lock_init(&vcpu->arch.local_int.lock);
713 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
714 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200715 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100716 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100717
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100718 rc = kvm_vcpu_init(vcpu, kvm, id);
719 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800720 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100721 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
722 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200723 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100724
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100725 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800726out_free_sie_block:
727 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100728out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200729 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200730out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100731 return ERR_PTR(rc);
732}
733
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100734int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
735{
Michael Muellerf87618e2014-02-26 16:14:17 +0100736 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100737}
738
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200739void s390_vcpu_block(struct kvm_vcpu *vcpu)
740{
741 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
742}
743
744void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
745{
746 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
747}
748
749/*
750 * Kick a guest cpu out of SIE and wait until SIE is not running.
751 * If the CPU is not running (e.g. waiting as idle) the function will
752 * return immediately. */
753void exit_sie(struct kvm_vcpu *vcpu)
754{
755 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
756 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
757 cpu_relax();
758}
759
760/* Kick a guest cpu out of SIE and prevent SIE-reentry */
761void exit_sie_sync(struct kvm_vcpu *vcpu)
762{
763 s390_vcpu_block(vcpu);
764 exit_sie(vcpu);
765}
766
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200767static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
768{
769 int i;
770 struct kvm *kvm = gmap->private;
771 struct kvm_vcpu *vcpu;
772
773 kvm_for_each_vcpu(i, vcpu, kvm) {
774 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +0200775 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200776 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
777 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
778 exit_sie_sync(vcpu);
779 }
780 }
781}
782
Christoffer Dallb6d33832012-03-08 16:44:24 -0500783int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
784{
785 /* kvm common code refers to this, but never calls it */
786 BUG();
787 return 0;
788}
789
Carsten Otte14eebd92012-05-15 14:15:26 +0200790static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
791 struct kvm_one_reg *reg)
792{
793 int r = -EINVAL;
794
795 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200796 case KVM_REG_S390_TODPR:
797 r = put_user(vcpu->arch.sie_block->todpr,
798 (u32 __user *)reg->addr);
799 break;
800 case KVM_REG_S390_EPOCHDIFF:
801 r = put_user(vcpu->arch.sie_block->epoch,
802 (u64 __user *)reg->addr);
803 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200804 case KVM_REG_S390_CPU_TIMER:
805 r = put_user(vcpu->arch.sie_block->cputm,
806 (u64 __user *)reg->addr);
807 break;
808 case KVM_REG_S390_CLOCK_COMP:
809 r = put_user(vcpu->arch.sie_block->ckc,
810 (u64 __user *)reg->addr);
811 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200812 case KVM_REG_S390_PFTOKEN:
813 r = put_user(vcpu->arch.pfault_token,
814 (u64 __user *)reg->addr);
815 break;
816 case KVM_REG_S390_PFCOMPARE:
817 r = put_user(vcpu->arch.pfault_compare,
818 (u64 __user *)reg->addr);
819 break;
820 case KVM_REG_S390_PFSELECT:
821 r = put_user(vcpu->arch.pfault_select,
822 (u64 __user *)reg->addr);
823 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100824 case KVM_REG_S390_PP:
825 r = put_user(vcpu->arch.sie_block->pp,
826 (u64 __user *)reg->addr);
827 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100828 case KVM_REG_S390_GBEA:
829 r = put_user(vcpu->arch.sie_block->gbea,
830 (u64 __user *)reg->addr);
831 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200832 default:
833 break;
834 }
835
836 return r;
837}
838
839static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
840 struct kvm_one_reg *reg)
841{
842 int r = -EINVAL;
843
844 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200845 case KVM_REG_S390_TODPR:
846 r = get_user(vcpu->arch.sie_block->todpr,
847 (u32 __user *)reg->addr);
848 break;
849 case KVM_REG_S390_EPOCHDIFF:
850 r = get_user(vcpu->arch.sie_block->epoch,
851 (u64 __user *)reg->addr);
852 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200853 case KVM_REG_S390_CPU_TIMER:
854 r = get_user(vcpu->arch.sie_block->cputm,
855 (u64 __user *)reg->addr);
856 break;
857 case KVM_REG_S390_CLOCK_COMP:
858 r = get_user(vcpu->arch.sie_block->ckc,
859 (u64 __user *)reg->addr);
860 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200861 case KVM_REG_S390_PFTOKEN:
862 r = get_user(vcpu->arch.pfault_token,
863 (u64 __user *)reg->addr);
864 break;
865 case KVM_REG_S390_PFCOMPARE:
866 r = get_user(vcpu->arch.pfault_compare,
867 (u64 __user *)reg->addr);
868 break;
869 case KVM_REG_S390_PFSELECT:
870 r = get_user(vcpu->arch.pfault_select,
871 (u64 __user *)reg->addr);
872 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100873 case KVM_REG_S390_PP:
874 r = get_user(vcpu->arch.sie_block->pp,
875 (u64 __user *)reg->addr);
876 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100877 case KVM_REG_S390_GBEA:
878 r = get_user(vcpu->arch.sie_block->gbea,
879 (u64 __user *)reg->addr);
880 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200881 default:
882 break;
883 }
884
885 return r;
886}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500887
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100888static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
889{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100890 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100891 return 0;
892}
893
894int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
895{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100896 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100897 return 0;
898}
899
900int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
901{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100902 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100903 return 0;
904}
905
906int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
907 struct kvm_sregs *sregs)
908{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100909 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100910 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100911 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100912 return 0;
913}
914
915int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
916 struct kvm_sregs *sregs)
917{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100918 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100919 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100920 return 0;
921}
922
923int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
924{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200925 if (test_fp_ctl(fpu->fpc))
926 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100927 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200928 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
929 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
930 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100931 return 0;
932}
933
934int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
935{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100936 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
937 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100938 return 0;
939}
940
941static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
942{
943 int rc = 0;
944
David Hildenbrand7a42fdc2014-05-05 16:26:19 +0200945 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100946 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100947 else {
948 vcpu->run->psw_mask = psw.mask;
949 vcpu->run->psw_addr = psw.addr;
950 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100951 return rc;
952}
953
954int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
955 struct kvm_translation *tr)
956{
957 return -EINVAL; /* not implemented yet */
958}
959
David Hildenbrand27291e22014-01-23 12:26:52 +0100960#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
961 KVM_GUESTDBG_USE_HW_BP | \
962 KVM_GUESTDBG_ENABLE)
963
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100964int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
965 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100966{
David Hildenbrand27291e22014-01-23 12:26:52 +0100967 int rc = 0;
968
969 vcpu->guest_debug = 0;
970 kvm_s390_clear_bp_data(vcpu);
971
David Hildenbrand2de3bfc2014-05-20 17:25:20 +0200972 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +0100973 return -EINVAL;
974
975 if (dbg->control & KVM_GUESTDBG_ENABLE) {
976 vcpu->guest_debug = dbg->control;
977 /* enforce guest PER */
978 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
979
980 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
981 rc = kvm_s390_import_bp_data(vcpu, dbg);
982 } else {
983 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
984 vcpu->arch.guestdbg.last_bp = 0;
985 }
986
987 if (rc) {
988 vcpu->guest_debug = 0;
989 kvm_s390_clear_bp_data(vcpu);
990 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
991 }
992
993 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100994}
995
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300996int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
997 struct kvm_mp_state *mp_state)
998{
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200999 /* CHECK_STOP and LOAD are not supported yet */
1000 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1001 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001002}
1003
1004int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1005 struct kvm_mp_state *mp_state)
1006{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001007 int rc = 0;
1008
1009 /* user space knows about this interface - let it control the state */
1010 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1011
1012 switch (mp_state->mp_state) {
1013 case KVM_MP_STATE_STOPPED:
1014 kvm_s390_vcpu_stop(vcpu);
1015 break;
1016 case KVM_MP_STATE_OPERATING:
1017 kvm_s390_vcpu_start(vcpu);
1018 break;
1019 case KVM_MP_STATE_LOAD:
1020 case KVM_MP_STATE_CHECK_STOP:
1021 /* fall through - CHECK_STOP and LOAD are not supported yet */
1022 default:
1023 rc = -ENXIO;
1024 }
1025
1026 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001027}
1028
Dominik Dingelb31605c2014-03-25 13:47:11 +01001029bool kvm_s390_cmma_enabled(struct kvm *kvm)
1030{
1031 if (!MACHINE_IS_LPAR)
1032 return false;
1033 /* only enable for z10 and later */
1034 if (!MACHINE_HAS_EDAT1)
1035 return false;
1036 if (!kvm->arch.use_cmma)
1037 return false;
1038 return true;
1039}
1040
David Hildenbrand8ad35752014-03-14 11:00:21 +01001041static bool ibs_enabled(struct kvm_vcpu *vcpu)
1042{
1043 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1044}
1045
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001046static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1047{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001048retry:
1049 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001050 /*
1051 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1052 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1053 * This ensures that the ipte instruction for this request has
1054 * already finished. We might race against a second unmapper that
1055 * wants to set the blocking bit. Lets just retry the request loop.
1056 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001057 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001058 int rc;
1059 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001060 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001061 PAGE_SIZE * 2);
1062 if (rc)
1063 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001064 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001065 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001066
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001067 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1068 vcpu->arch.sie_block->ihcpu = 0xffff;
1069 goto retry;
1070 }
1071
David Hildenbrand8ad35752014-03-14 11:00:21 +01001072 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1073 if (!ibs_enabled(vcpu)) {
1074 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1075 atomic_set_mask(CPUSTAT_IBS,
1076 &vcpu->arch.sie_block->cpuflags);
1077 }
1078 goto retry;
1079 }
1080
1081 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1082 if (ibs_enabled(vcpu)) {
1083 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1084 atomic_clear_mask(CPUSTAT_IBS,
1085 &vcpu->arch.sie_block->cpuflags);
1086 }
1087 goto retry;
1088 }
1089
David Hildenbrand0759d062014-05-13 16:54:32 +02001090 /* nothing to do, just clear the request */
1091 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1092
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001093 return 0;
1094}
1095
Thomas Huthfa576c52014-05-06 17:20:16 +02001096/**
1097 * kvm_arch_fault_in_page - fault-in guest page if necessary
1098 * @vcpu: The corresponding virtual cpu
1099 * @gpa: Guest physical address
1100 * @writable: Whether the page should be writable or not
1101 *
1102 * Make sure that a guest page has been faulted-in on the host.
1103 *
1104 * Return: Zero on success, negative error code otherwise.
1105 */
1106long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001107{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001108 return gmap_fault(vcpu->arch.gmap, gpa,
1109 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001110}
1111
Dominik Dingel3c038e62013-10-07 17:11:48 +02001112static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1113 unsigned long token)
1114{
1115 struct kvm_s390_interrupt inti;
1116 inti.parm64 = token;
1117
1118 if (start_token) {
1119 inti.type = KVM_S390_INT_PFAULT_INIT;
1120 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1121 } else {
1122 inti.type = KVM_S390_INT_PFAULT_DONE;
1123 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1124 }
1125}
1126
1127void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1128 struct kvm_async_pf *work)
1129{
1130 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1131 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1132}
1133
1134void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1135 struct kvm_async_pf *work)
1136{
1137 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1138 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1139}
1140
1141void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1142 struct kvm_async_pf *work)
1143{
1144 /* s390 will always inject the page directly */
1145}
1146
1147bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1148{
1149 /*
1150 * s390 will always inject the page directly,
1151 * but we still want check_async_completion to cleanup
1152 */
1153 return true;
1154}
1155
1156static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1157{
1158 hva_t hva;
1159 struct kvm_arch_async_pf arch;
1160 int rc;
1161
1162 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1163 return 0;
1164 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1165 vcpu->arch.pfault_compare)
1166 return 0;
1167 if (psw_extint_disabled(vcpu))
1168 return 0;
1169 if (kvm_cpu_has_interrupt(vcpu))
1170 return 0;
1171 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1172 return 0;
1173 if (!vcpu->arch.gmap->pfault_enabled)
1174 return 0;
1175
Heiko Carstens81480cc2014-01-01 16:36:07 +01001176 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1177 hva += current->thread.gmap_addr & ~PAGE_MASK;
1178 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001179 return 0;
1180
1181 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1182 return rc;
1183}
1184
Thomas Huth3fb4c402013-09-12 10:33:43 +02001185static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001186{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001187 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001188
Dominik Dingel3c038e62013-10-07 17:11:48 +02001189 /*
1190 * On s390 notifications for arriving pages will be delivered directly
1191 * to the guest but the house keeping for completed pfaults is
1192 * handled outside the worker.
1193 */
1194 kvm_check_async_pf_completion(vcpu);
1195
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001196 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001197
1198 if (need_resched())
1199 schedule();
1200
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001201 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001202 s390_handle_mcck();
1203
Jens Freimann79395032014-04-17 10:10:30 +02001204 if (!kvm_is_ucontrol(vcpu->kvm)) {
1205 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1206 if (rc)
1207 return rc;
1208 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001209
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001210 rc = kvm_s390_handle_requests(vcpu);
1211 if (rc)
1212 return rc;
1213
David Hildenbrand27291e22014-01-23 12:26:52 +01001214 if (guestdbg_enabled(vcpu)) {
1215 kvm_s390_backup_guest_per_regs(vcpu);
1216 kvm_s390_patch_guest_per_regs(vcpu);
1217 }
1218
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001219 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001220 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1221 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1222 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001223
Thomas Huth3fb4c402013-09-12 10:33:43 +02001224 return 0;
1225}
1226
1227static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1228{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001229 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001230
1231 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1232 vcpu->arch.sie_block->icptcode);
1233 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1234
David Hildenbrand27291e22014-01-23 12:26:52 +01001235 if (guestdbg_enabled(vcpu))
1236 kvm_s390_restore_guest_per_regs(vcpu);
1237
Thomas Huth3fb4c402013-09-12 10:33:43 +02001238 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001239 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001240 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1241 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1242 vcpu->run->s390_ucontrol.trans_exc_code =
1243 current->thread.gmap_addr;
1244 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1245 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001246
1247 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001248 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001249 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001250 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001251 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001252 } else {
1253 gpa_t gpa = current->thread.gmap_addr;
1254 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1255 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001256 }
1257
1258 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001259 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1260 trace_kvm_s390_sie_fault(vcpu);
1261 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001262 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001263
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001264 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001265
Thomas Hutha76ccff2013-09-12 10:33:44 +02001266 if (rc == 0) {
1267 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001268 /* Don't exit for host interrupts. */
1269 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001270 else
1271 rc = kvm_handle_sie_intercept(vcpu);
1272 }
1273
Thomas Huth3fb4c402013-09-12 10:33:43 +02001274 return rc;
1275}
1276
1277static int __vcpu_run(struct kvm_vcpu *vcpu)
1278{
1279 int rc, exit_reason;
1280
Thomas Huth800c1062013-09-12 10:33:45 +02001281 /*
1282 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1283 * ning the guest), so that memslots (and other stuff) are protected
1284 */
1285 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1286
Thomas Hutha76ccff2013-09-12 10:33:44 +02001287 do {
1288 rc = vcpu_pre_run(vcpu);
1289 if (rc)
1290 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001291
Thomas Huth800c1062013-09-12 10:33:45 +02001292 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001293 /*
1294 * As PF_VCPU will be used in fault handler, between
1295 * guest_enter and guest_exit should be no uaccess.
1296 */
1297 preempt_disable();
1298 kvm_guest_enter();
1299 preempt_enable();
1300 exit_reason = sie64a(vcpu->arch.sie_block,
1301 vcpu->run->s.regs.gprs);
1302 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001303 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001304
Thomas Hutha76ccff2013-09-12 10:33:44 +02001305 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001306 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001307
Thomas Huth800c1062013-09-12 10:33:45 +02001308 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001309 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001310}
1311
David Hildenbrandb028ee32014-07-17 10:47:43 +02001312static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1313{
1314 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1315 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1316 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1317 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1318 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1319 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001320 /* some control register changes require a tlb flush */
1321 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001322 }
1323 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1324 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1325 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1326 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1327 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1328 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1329 }
1330 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1331 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1332 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1333 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1334 }
1335 kvm_run->kvm_dirty_regs = 0;
1336}
1337
1338static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1339{
1340 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1341 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1342 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1343 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1344 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1345 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1346 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1347 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1348 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1349 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1350 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1351 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1352}
1353
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001354int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1355{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001356 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001357 sigset_t sigsaved;
1358
David Hildenbrand27291e22014-01-23 12:26:52 +01001359 if (guestdbg_exit_pending(vcpu)) {
1360 kvm_s390_prepare_debug_exit(vcpu);
1361 return 0;
1362 }
1363
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001364 if (vcpu->sigset_active)
1365 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1366
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001367 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1368 kvm_s390_vcpu_start(vcpu);
1369 } else if (is_vcpu_stopped(vcpu)) {
1370 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1371 vcpu->vcpu_id);
1372 return -EINVAL;
1373 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001374
David Hildenbrandb028ee32014-07-17 10:47:43 +02001375 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001376
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001377 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001378 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001379
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001380 if (signal_pending(current) && !rc) {
1381 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001382 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001383 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001384
David Hildenbrand27291e22014-01-23 12:26:52 +01001385 if (guestdbg_exit_pending(vcpu) && !rc) {
1386 kvm_s390_prepare_debug_exit(vcpu);
1387 rc = 0;
1388 }
1389
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001390 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001391 /* intercept cannot be handled in-kernel, prepare kvm-run */
1392 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1393 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001394 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1395 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1396 rc = 0;
1397 }
1398
1399 if (rc == -EREMOTE) {
1400 /* intercept was handled, but userspace support is needed
1401 * kvm_run has been prepared by the handler */
1402 rc = 0;
1403 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001404
David Hildenbrandb028ee32014-07-17 10:47:43 +02001405 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001406
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001407 if (vcpu->sigset_active)
1408 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1409
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001410 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001411 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001412}
1413
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001414/*
1415 * store status at address
1416 * we use have two special cases:
1417 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1418 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1419 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001420int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001421{
Carsten Otte092670c2011-07-24 10:48:22 +02001422 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001423 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001424 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001425 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001426
Heiko Carstensd0bce602014-01-01 16:45:58 +01001427 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1428 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001429 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001430 gpa = SAVE_AREA_BASE;
1431 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1432 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001433 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001434 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1435 }
1436 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1437 vcpu->arch.guest_fpregs.fprs, 128);
1438 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1439 vcpu->run->s.regs.gprs, 128);
1440 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1441 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001442 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001443 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001444 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001445 rc |= write_guest_abs(vcpu,
1446 gpa + offsetof(struct save_area, fp_ctrl_reg),
1447 &vcpu->arch.guest_fpregs.fpc, 4);
1448 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1449 &vcpu->arch.sie_block->todpr, 4);
1450 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1451 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001452 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001453 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1454 &clkcomp, 8);
1455 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1456 &vcpu->run->s.regs.acrs, 64);
1457 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1458 &vcpu->arch.sie_block->gcr, 128);
1459 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001460}
1461
Thomas Huthe8798922013-11-06 15:46:33 +01001462int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1463{
1464 /*
1465 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1466 * copying in vcpu load/put. Lets update our copies before we save
1467 * it into the save area
1468 */
1469 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1470 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1471 save_access_regs(vcpu->run->s.regs.acrs);
1472
1473 return kvm_s390_store_status_unloaded(vcpu, addr);
1474}
1475
David Hildenbrand8ad35752014-03-14 11:00:21 +01001476static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1477{
1478 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1479 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1480 exit_sie_sync(vcpu);
1481}
1482
1483static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1484{
1485 unsigned int i;
1486 struct kvm_vcpu *vcpu;
1487
1488 kvm_for_each_vcpu(i, vcpu, kvm) {
1489 __disable_ibs_on_vcpu(vcpu);
1490 }
1491}
1492
1493static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1494{
1495 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1496 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1497 exit_sie_sync(vcpu);
1498}
1499
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001500void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1501{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001502 int i, online_vcpus, started_vcpus = 0;
1503
1504 if (!is_vcpu_stopped(vcpu))
1505 return;
1506
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001507 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001508 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001509 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001510 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1511
1512 for (i = 0; i < online_vcpus; i++) {
1513 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1514 started_vcpus++;
1515 }
1516
1517 if (started_vcpus == 0) {
1518 /* we're the only active VCPU -> speed it up */
1519 __enable_ibs_on_vcpu(vcpu);
1520 } else if (started_vcpus == 1) {
1521 /*
1522 * As we are starting a second VCPU, we have to disable
1523 * the IBS facility on all VCPUs to remove potentially
1524 * oustanding ENABLE requests.
1525 */
1526 __disable_ibs_on_all_vcpus(vcpu->kvm);
1527 }
1528
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001529 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001530 /*
1531 * Another VCPU might have used IBS while we were offline.
1532 * Let's play safe and flush the VCPU at startup.
1533 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001534 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001535 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001536 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001537}
1538
1539void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1540{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001541 int i, online_vcpus, started_vcpus = 0;
1542 struct kvm_vcpu *started_vcpu = NULL;
1543
1544 if (is_vcpu_stopped(vcpu))
1545 return;
1546
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001547 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001548 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001549 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001550 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1551
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001552 /* Need to lock access to action_bits to avoid a SIGP race condition */
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001553 spin_lock(&vcpu->arch.local_int.lock);
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001554 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001555
1556 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1557 vcpu->arch.local_int.action_bits &=
1558 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001559 spin_unlock(&vcpu->arch.local_int.lock);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001560
David Hildenbrand8ad35752014-03-14 11:00:21 +01001561 __disable_ibs_on_vcpu(vcpu);
1562
1563 for (i = 0; i < online_vcpus; i++) {
1564 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1565 started_vcpus++;
1566 started_vcpu = vcpu->kvm->vcpus[i];
1567 }
1568 }
1569
1570 if (started_vcpus == 1) {
1571 /*
1572 * As we only have one VCPU left, we want to enable the
1573 * IBS facility for that VCPU to speed it up.
1574 */
1575 __enable_ibs_on_vcpu(started_vcpu);
1576 }
1577
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001578 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001579 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001580}
1581
Cornelia Huckd6712df2012-12-20 15:32:11 +01001582static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1583 struct kvm_enable_cap *cap)
1584{
1585 int r;
1586
1587 if (cap->flags)
1588 return -EINVAL;
1589
1590 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001591 case KVM_CAP_S390_CSS_SUPPORT:
1592 if (!vcpu->kvm->arch.css_support) {
1593 vcpu->kvm->arch.css_support = 1;
1594 trace_kvm_s390_enable_css(vcpu->kvm);
1595 }
1596 r = 0;
1597 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001598 default:
1599 r = -EINVAL;
1600 break;
1601 }
1602 return r;
1603}
1604
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001605long kvm_arch_vcpu_ioctl(struct file *filp,
1606 unsigned int ioctl, unsigned long arg)
1607{
1608 struct kvm_vcpu *vcpu = filp->private_data;
1609 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001610 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001611 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001612
Avi Kivity93736622010-05-13 12:35:17 +03001613 switch (ioctl) {
1614 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001615 struct kvm_s390_interrupt s390int;
1616
Avi Kivity93736622010-05-13 12:35:17 +03001617 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001618 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03001619 break;
1620 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1621 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001622 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001623 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001624 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001625 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001626 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001627 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001628 case KVM_S390_SET_INITIAL_PSW: {
1629 psw_t psw;
1630
Avi Kivitybc923cc2010-05-13 12:21:46 +03001631 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001632 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001633 break;
1634 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1635 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001636 }
1637 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001638 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1639 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001640 case KVM_SET_ONE_REG:
1641 case KVM_GET_ONE_REG: {
1642 struct kvm_one_reg reg;
1643 r = -EFAULT;
1644 if (copy_from_user(&reg, argp, sizeof(reg)))
1645 break;
1646 if (ioctl == KVM_SET_ONE_REG)
1647 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1648 else
1649 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1650 break;
1651 }
Carsten Otte27e03932012-01-04 10:25:21 +01001652#ifdef CONFIG_KVM_S390_UCONTROL
1653 case KVM_S390_UCAS_MAP: {
1654 struct kvm_s390_ucas_mapping ucasmap;
1655
1656 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1657 r = -EFAULT;
1658 break;
1659 }
1660
1661 if (!kvm_is_ucontrol(vcpu->kvm)) {
1662 r = -EINVAL;
1663 break;
1664 }
1665
1666 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1667 ucasmap.vcpu_addr, ucasmap.length);
1668 break;
1669 }
1670 case KVM_S390_UCAS_UNMAP: {
1671 struct kvm_s390_ucas_mapping ucasmap;
1672
1673 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1674 r = -EFAULT;
1675 break;
1676 }
1677
1678 if (!kvm_is_ucontrol(vcpu->kvm)) {
1679 r = -EINVAL;
1680 break;
1681 }
1682
1683 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1684 ucasmap.length);
1685 break;
1686 }
1687#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001688 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001689 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01001690 break;
1691 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001692 case KVM_ENABLE_CAP:
1693 {
1694 struct kvm_enable_cap cap;
1695 r = -EFAULT;
1696 if (copy_from_user(&cap, argp, sizeof(cap)))
1697 break;
1698 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1699 break;
1700 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001701 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001702 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001703 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001704 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001705}
1706
Carsten Otte5b1c1492012-01-04 10:25:23 +01001707int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1708{
1709#ifdef CONFIG_KVM_S390_UCONTROL
1710 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1711 && (kvm_is_ucontrol(vcpu->kvm))) {
1712 vmf->page = virt_to_page(vcpu->arch.sie_block);
1713 get_page(vmf->page);
1714 return 0;
1715 }
1716#endif
1717 return VM_FAULT_SIGBUS;
1718}
1719
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301720int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1721 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001722{
1723 return 0;
1724}
1725
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001726/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001727int kvm_arch_prepare_memory_region(struct kvm *kvm,
1728 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001729 struct kvm_userspace_memory_region *mem,
1730 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001731{
Nick Wangdd2887e2013-03-25 17:22:57 +01001732 /* A few sanity checks. We can have memory slots which have to be
1733 located/ended at a segment boundary (1MB). The memory in userland is
1734 ok to be fragmented into various different vmas. It is okay to mmap()
1735 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001736
Carsten Otte598841c2011-07-24 10:48:21 +02001737 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001738 return -EINVAL;
1739
Carsten Otte598841c2011-07-24 10:48:21 +02001740 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001741 return -EINVAL;
1742
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001743 return 0;
1744}
1745
1746void kvm_arch_commit_memory_region(struct kvm *kvm,
1747 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001748 const struct kvm_memory_slot *old,
1749 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001750{
Carsten Ottef7850c92011-07-24 10:48:23 +02001751 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001752
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001753 /* If the basics of the memslot do not change, we do not want
1754 * to update the gmap. Every update causes several unnecessary
1755 * segment translation exceptions. This is usually handled just
1756 * fine by the normal fault handler + gmap, but it will also
1757 * cause faults on the prefix page of running guest CPUs.
1758 */
1759 if (old->userspace_addr == mem->userspace_addr &&
1760 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1761 old->npages * PAGE_SIZE == mem->memory_size)
1762 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001763
1764 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1765 mem->guest_phys_addr, mem->memory_size);
1766 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001767 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001768 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001769}
1770
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001771static int __init kvm_s390_init(void)
1772{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001773 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001774 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001775 if (ret)
1776 return ret;
1777
1778 /*
1779 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001780 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001781 * only set facilities that are known to work in KVM.
1782 */
Michael Mueller78c4b592013-07-26 15:04:04 +02001783 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1784 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001785 kvm_exit();
1786 return -ENOMEM;
1787 }
Michael Mueller78c4b592013-07-26 15:04:04 +02001788 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Thomas Huthd208c792013-12-12 13:40:40 +01001789 vfacilities[0] &= 0xff82fff3f4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001790 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001791 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001792}
1793
1794static void __exit kvm_s390_exit(void)
1795{
Michael Mueller78c4b592013-07-26 15:04:04 +02001796 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001797 kvm_exit();
1798}
1799
1800module_init(kvm_s390_init);
1801module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001802
1803/*
1804 * Enable autoloading of the kvm module.
1805 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1806 * since x86 takes a different approach.
1807 */
1808#include <linux/miscdevice.h>
1809MODULE_ALIAS_MISCDEV(KVM_MINOR);
1810MODULE_ALIAS("devname:kvm");