blob: 3e83d4b357b914df6683818341222c0e65fd0d82 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010027#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <asm/lowcore.h>
29#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010030#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010031#include <asm/switch_to.h>
Michael Mueller78c4b59f2013-07-26 15:04:04 +020032#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020033#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include "gaccess.h"
36
Cornelia Huck5786fff2012-07-23 17:20:29 +020037#define CREATE_TRACE_POINTS
38#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020039#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020040
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020045 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010050 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020053 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020054 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010055 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010056 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
57 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010058 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020059 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010060 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020067 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010068 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
69 { "instruction_spx", VCPU_STAT(instruction_spx) },
70 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
71 { "instruction_stap", VCPU_STAT(instruction_stap) },
72 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010073 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010074 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
75 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020076 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010077 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
78 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020079 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010080 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010081 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020082 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010083 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
84 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
85 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
86 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
87 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010088 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010089 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020090 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010091 { NULL }
92};
93
Michael Mueller78c4b59f2013-07-26 15:04:04 +020094unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020095static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010096
Michael Mueller78c4b59f2013-07-26 15:04:04 +020097/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +010098int test_vfacility(unsigned long nr)
Michael Mueller78c4b59f2013-07-26 15:04:04 +020099{
100 return __test_facility(nr, (void *) vfacilities);
101}
102
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100103/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200104int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100105{
106 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200107 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100108}
109
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200110static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
111
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100112int kvm_arch_hardware_setup(void)
113{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200114 gmap_notifier.notifier_call = kvm_gmap_notifier;
115 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100116 return 0;
117}
118
119void kvm_arch_hardware_unsetup(void)
120{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200121 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100122}
123
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100124int kvm_arch_init(void *opaque)
125{
Cornelia Huck84877d92014-09-02 10:27:35 +0100126 /* Register floating interrupt controller interface. */
127 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100128}
129
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130/* Section: device related */
131long kvm_arch_dev_ioctl(struct file *filp,
132 unsigned int ioctl, unsigned long arg)
133{
134 if (ioctl == KVM_S390_ENABLE_SIE)
135 return s390_enable_sie();
136 return -EINVAL;
137}
138
Alexander Graf784aa3d2014-07-14 18:27:35 +0200139int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100140{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100141 int r;
142
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200143 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100144 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200145 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100146 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100147#ifdef CONFIG_KVM_S390_UCONTROL
148 case KVM_CAP_S390_UCONTROL:
149#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200150 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100151 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200152 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100153 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100154 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200155 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100156 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200157 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200158 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200159 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200160 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200161 case KVM_CAP_MP_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100162 r = 1;
163 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200164 case KVM_CAP_NR_VCPUS:
165 case KVM_CAP_MAX_VCPUS:
166 r = KVM_MAX_VCPUS;
167 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100168 case KVM_CAP_NR_MEMSLOTS:
169 r = KVM_USER_MEM_SLOTS;
170 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200171 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100172 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200173 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200174 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100175 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200176 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100177 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100178}
179
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400180static void kvm_s390_sync_dirty_log(struct kvm *kvm,
181 struct kvm_memory_slot *memslot)
182{
183 gfn_t cur_gfn, last_gfn;
184 unsigned long address;
185 struct gmap *gmap = kvm->arch.gmap;
186
187 down_read(&gmap->mm->mmap_sem);
188 /* Loop over all guest pages */
189 last_gfn = memslot->base_gfn + memslot->npages;
190 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
191 address = gfn_to_hva_memslot(memslot, cur_gfn);
192
193 if (gmap_test_and_clear_dirty(address, gmap))
194 mark_page_dirty(kvm, cur_gfn);
195 }
196 up_read(&gmap->mm->mmap_sem);
197}
198
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100199/* Section: vm related */
200/*
201 * Get (and clear) the dirty memory log for a memory slot.
202 */
203int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
204 struct kvm_dirty_log *log)
205{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400206 int r;
207 unsigned long n;
208 struct kvm_memory_slot *memslot;
209 int is_dirty = 0;
210
211 mutex_lock(&kvm->slots_lock);
212
213 r = -EINVAL;
214 if (log->slot >= KVM_USER_MEM_SLOTS)
215 goto out;
216
217 memslot = id_to_memslot(kvm->memslots, log->slot);
218 r = -ENOENT;
219 if (!memslot->dirty_bitmap)
220 goto out;
221
222 kvm_s390_sync_dirty_log(kvm, memslot);
223 r = kvm_get_dirty_log(kvm, log, &is_dirty);
224 if (r)
225 goto out;
226
227 /* Clear the dirty log */
228 if (is_dirty) {
229 n = kvm_dirty_bitmap_bytes(memslot);
230 memset(memslot->dirty_bitmap, 0, n);
231 }
232 r = 0;
233out:
234 mutex_unlock(&kvm->slots_lock);
235 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100236}
237
Cornelia Huckd938dc52013-10-23 18:26:34 +0200238static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
239{
240 int r;
241
242 if (cap->flags)
243 return -EINVAL;
244
245 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200246 case KVM_CAP_S390_IRQCHIP:
247 kvm->arch.use_irqchip = 1;
248 r = 0;
249 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200250 default:
251 r = -EINVAL;
252 break;
253 }
254 return r;
255}
256
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200257static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
258{
259 int ret;
260 unsigned int idx;
261 switch (attr->attr) {
262 case KVM_S390_VM_MEM_ENABLE_CMMA:
263 ret = -EBUSY;
264 mutex_lock(&kvm->lock);
265 if (atomic_read(&kvm->online_vcpus) == 0) {
266 kvm->arch.use_cmma = 1;
267 ret = 0;
268 }
269 mutex_unlock(&kvm->lock);
270 break;
271 case KVM_S390_VM_MEM_CLR_CMMA:
272 mutex_lock(&kvm->lock);
273 idx = srcu_read_lock(&kvm->srcu);
274 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
275 srcu_read_unlock(&kvm->srcu, idx);
276 mutex_unlock(&kvm->lock);
277 ret = 0;
278 break;
279 default:
280 ret = -ENXIO;
281 break;
282 }
283 return ret;
284}
285
Dominik Dingelf2061652014-04-09 13:13:00 +0200286static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
287{
288 int ret;
289
290 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200291 case KVM_S390_VM_MEM_CTRL:
292 ret = kvm_s390_mem_control(kvm, attr);
293 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200294 default:
295 ret = -ENXIO;
296 break;
297 }
298
299 return ret;
300}
301
302static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
303{
304 return -ENXIO;
305}
306
307static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
308{
309 int ret;
310
311 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200312 case KVM_S390_VM_MEM_CTRL:
313 switch (attr->attr) {
314 case KVM_S390_VM_MEM_ENABLE_CMMA:
315 case KVM_S390_VM_MEM_CLR_CMMA:
316 ret = 0;
317 break;
318 default:
319 ret = -ENXIO;
320 break;
321 }
322 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200323 default:
324 ret = -ENXIO;
325 break;
326 }
327
328 return ret;
329}
330
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100331long kvm_arch_vm_ioctl(struct file *filp,
332 unsigned int ioctl, unsigned long arg)
333{
334 struct kvm *kvm = filp->private_data;
335 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200336 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100337 int r;
338
339 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100340 case KVM_S390_INTERRUPT: {
341 struct kvm_s390_interrupt s390int;
342
343 r = -EFAULT;
344 if (copy_from_user(&s390int, argp, sizeof(s390int)))
345 break;
346 r = kvm_s390_inject_vm(kvm, &s390int);
347 break;
348 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200349 case KVM_ENABLE_CAP: {
350 struct kvm_enable_cap cap;
351 r = -EFAULT;
352 if (copy_from_user(&cap, argp, sizeof(cap)))
353 break;
354 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
355 break;
356 }
Cornelia Huck84223592013-07-15 13:36:01 +0200357 case KVM_CREATE_IRQCHIP: {
358 struct kvm_irq_routing_entry routing;
359
360 r = -EINVAL;
361 if (kvm->arch.use_irqchip) {
362 /* Set up dummy routing. */
363 memset(&routing, 0, sizeof(routing));
364 kvm_set_irq_routing(kvm, &routing, 0, 0);
365 r = 0;
366 }
367 break;
368 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200369 case KVM_SET_DEVICE_ATTR: {
370 r = -EFAULT;
371 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
372 break;
373 r = kvm_s390_vm_set_attr(kvm, &attr);
374 break;
375 }
376 case KVM_GET_DEVICE_ATTR: {
377 r = -EFAULT;
378 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379 break;
380 r = kvm_s390_vm_get_attr(kvm, &attr);
381 break;
382 }
383 case KVM_HAS_DEVICE_ATTR: {
384 r = -EFAULT;
385 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386 break;
387 r = kvm_s390_vm_has_attr(kvm, &attr);
388 break;
389 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100390 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300391 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100392 }
393
394 return r;
395}
396
Tony Krowiak5102ee82014-06-27 14:46:01 -0400397static int kvm_s390_crypto_init(struct kvm *kvm)
398{
399 if (!test_vfacility(76))
400 return 0;
401
402 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
403 GFP_KERNEL | GFP_DMA);
404 if (!kvm->arch.crypto.crycb)
405 return -ENOMEM;
406
407 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
408 CRYCB_FORMAT1;
409
410 return 0;
411}
412
Carsten Ottee08b9632012-01-04 10:25:20 +0100413int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100414{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100415 int rc;
416 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100417 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100418
Carsten Ottee08b9632012-01-04 10:25:20 +0100419 rc = -EINVAL;
420#ifdef CONFIG_KVM_S390_UCONTROL
421 if (type & ~KVM_VM_S390_UCONTROL)
422 goto out_err;
423 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
424 goto out_err;
425#else
426 if (type)
427 goto out_err;
428#endif
429
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100430 rc = s390_enable_sie();
431 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100432 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100433
Carsten Otteb2904112011-10-18 12:27:13 +0200434 rc = -ENOMEM;
435
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100436 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
437 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100438 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100439 spin_lock(&kvm_lock);
440 sca_offset = (sca_offset + 16) & 0x7f0;
441 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
442 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100443
444 sprintf(debug_name, "kvm-%u", current->pid);
445
446 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
447 if (!kvm->arch.dbf)
448 goto out_nodbf;
449
Tony Krowiak5102ee82014-06-27 14:46:01 -0400450 if (kvm_s390_crypto_init(kvm) < 0)
451 goto out_crypto;
452
Carsten Otteba5c1e92008-03-25 18:47:26 +0100453 spin_lock_init(&kvm->arch.float_int.lock);
454 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100455 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +0200456 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100457
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100458 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
459 VM_EVENT(kvm, 3, "%s", "vm created");
460
Carsten Ottee08b9632012-01-04 10:25:20 +0100461 if (type & KVM_VM_S390_UCONTROL) {
462 kvm->arch.gmap = NULL;
463 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +0200464 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +0100465 if (!kvm->arch.gmap)
466 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200467 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200468 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100469 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100470
471 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200472 kvm->arch.use_irqchip = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100473
David Hildenbrand8ad35752014-03-14 11:00:21 +0100474 spin_lock_init(&kvm->arch.start_stop_lock);
475
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100476 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200477out_nogmap:
Tony Krowiak5102ee82014-06-27 14:46:01 -0400478 kfree(kvm->arch.crypto.crycb);
479out_crypto:
Carsten Otte598841c2011-07-24 10:48:21 +0200480 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100481out_nodbf:
482 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100483out_err:
484 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100485}
486
Christian Borntraegerd329c032008-11-26 14:50:27 +0100487void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
488{
489 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200490 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100491 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200492 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100493 if (!kvm_is_ucontrol(vcpu->kvm)) {
494 clear_bit(63 - vcpu->vcpu_id,
495 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
496 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
497 (__u64) vcpu->arch.sie_block)
498 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
499 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200500 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100501
502 if (kvm_is_ucontrol(vcpu->kvm))
503 gmap_free(vcpu->arch.gmap);
504
Dominik Dingelb31605c2014-03-25 13:47:11 +0100505 if (kvm_s390_cmma_enabled(vcpu->kvm))
506 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100507 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200508
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100509 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200510 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100511}
512
513static void kvm_free_vcpus(struct kvm *kvm)
514{
515 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300516 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100517
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300518 kvm_for_each_vcpu(i, vcpu, kvm)
519 kvm_arch_vcpu_destroy(vcpu);
520
521 mutex_lock(&kvm->lock);
522 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
523 kvm->vcpus[i] = NULL;
524
525 atomic_set(&kvm->online_vcpus, 0);
526 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100527}
528
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100529void kvm_arch_destroy_vm(struct kvm *kvm)
530{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100531 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100532 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100533 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400534 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +0100535 if (!kvm_is_ucontrol(kvm))
536 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200537 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100538 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100539}
540
541/* Section: vcpu related */
542int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
543{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200544 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
545 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100546 if (kvm_is_ucontrol(vcpu->kvm)) {
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200547 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
Carsten Otte27e03932012-01-04 10:25:21 +0100548 if (!vcpu->arch.gmap)
549 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200550 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100551 return 0;
552 }
553
Carsten Otte598841c2011-07-24 10:48:21 +0200554 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100555 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
556 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100557 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +0200558 KVM_SYNC_CRS |
559 KVM_SYNC_ARCH0 |
560 KVM_SYNC_PFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100561 return 0;
562}
563
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100564void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
565{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200566 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
567 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100568 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200569 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
570 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100571 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200572 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100573 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100574}
575
576void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
577{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100578 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200579 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200580 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
581 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100582 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200583 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
584 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100585 restore_access_regs(vcpu->arch.host_acrs);
586}
587
588static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
589{
590 /* this equals initial cpu reset in pop, but we don't switch to ESA */
591 vcpu->arch.sie_block->gpsw.mask = 0UL;
592 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100593 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100594 vcpu->arch.sie_block->cputm = 0UL;
595 vcpu->arch.sie_block->ckc = 0UL;
596 vcpu->arch.sie_block->todpr = 0;
597 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
598 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
599 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
600 vcpu->arch.guest_fpregs.fpc = 0;
601 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
602 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100603 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200604 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
605 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200606 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
607 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100608 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100609}
610
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200611int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
612{
613 return 0;
614}
615
Tony Krowiak5102ee82014-06-27 14:46:01 -0400616static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
617{
618 if (!test_vfacility(76))
619 return;
620
621 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
622}
623
Dominik Dingelb31605c2014-03-25 13:47:11 +0100624void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
625{
626 free_page(vcpu->arch.sie_block->cbrlo);
627 vcpu->arch.sie_block->cbrlo = 0;
628}
629
630int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
631{
632 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
633 if (!vcpu->arch.sie_block->cbrlo)
634 return -ENOMEM;
635
636 vcpu->arch.sie_block->ecb2 |= 0x80;
637 vcpu->arch.sie_block->ecb2 &= ~0x08;
638 return 0;
639}
640
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100641int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
642{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100643 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200644
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100645 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
646 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200647 CPUSTAT_STOPPED |
648 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200649 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200650 if (test_vfacility(50) && test_vfacility(73))
651 vcpu->arch.sie_block->ecb |= 0x10;
652
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200653 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrand49539192014-02-21 08:59:59 +0100654 vcpu->arch.sie_block->eca = 0xD1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +0100655 if (sclp_has_siif())
656 vcpu->arch.sie_block->eca |= 1;
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200657 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Matthew Rosato5a5e6532013-01-29 11:48:20 -0500658 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
659 ICTL_TPROT;
660
Dominik Dingelb31605c2014-03-25 13:47:11 +0100661 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
662 rc = kvm_s390_vcpu_setup_cmma(vcpu);
663 if (rc)
664 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200665 }
Christian Borntraegerca872302009-05-12 17:21:49 +0200666 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
Christian Borntraegerca872302009-05-12 17:21:49 +0200667 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100668 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100669 vcpu->arch.cpu_id.version = 0xff;
Tony Krowiak5102ee82014-06-27 14:46:01 -0400670
671 kvm_s390_vcpu_crypto_setup(vcpu);
672
Dominik Dingelb31605c2014-03-25 13:47:11 +0100673 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100674}
675
676struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
677 unsigned int id)
678{
Carsten Otte4d475552011-10-18 12:27:12 +0200679 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200680 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200681 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100682
Carsten Otte4d475552011-10-18 12:27:12 +0200683 if (id >= KVM_MAX_VCPUS)
684 goto out;
685
686 rc = -ENOMEM;
687
Michael Muellerb110fea2013-06-12 13:54:54 +0200688 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100689 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200690 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100691
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200692 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
693 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100694 goto out_free_cpu;
695
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200696 vcpu->arch.sie_block = &sie_page->sie_block;
697 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
698
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100699 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100700 if (!kvm_is_ucontrol(kvm)) {
701 if (!kvm->arch.sca) {
702 WARN_ON_ONCE(1);
703 goto out_free_cpu;
704 }
705 if (!kvm->arch.sca->cpu[id].sda)
706 kvm->arch.sca->cpu[id].sda =
707 (__u64) vcpu->arch.sie_block;
708 vcpu->arch.sie_block->scaoh =
709 (__u32)(((__u64)kvm->arch.sca) >> 32);
710 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
711 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
712 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100713
Carsten Otteba5c1e92008-03-25 18:47:26 +0100714 spin_lock_init(&vcpu->arch.local_int.lock);
715 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
716 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200717 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100718 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100719
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100720 rc = kvm_vcpu_init(vcpu, kvm, id);
721 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800722 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100723 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
724 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200725 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100726
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100727 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800728out_free_sie_block:
729 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100730out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200731 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200732out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100733 return ERR_PTR(rc);
734}
735
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100736int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
737{
Michael Muellerf87618e2014-02-26 16:14:17 +0100738 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100739}
740
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200741void s390_vcpu_block(struct kvm_vcpu *vcpu)
742{
743 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
744}
745
746void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
747{
748 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
749}
750
751/*
752 * Kick a guest cpu out of SIE and wait until SIE is not running.
753 * If the CPU is not running (e.g. waiting as idle) the function will
754 * return immediately. */
755void exit_sie(struct kvm_vcpu *vcpu)
756{
757 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
758 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
759 cpu_relax();
760}
761
762/* Kick a guest cpu out of SIE and prevent SIE-reentry */
763void exit_sie_sync(struct kvm_vcpu *vcpu)
764{
765 s390_vcpu_block(vcpu);
766 exit_sie(vcpu);
767}
768
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200769static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
770{
771 int i;
772 struct kvm *kvm = gmap->private;
773 struct kvm_vcpu *vcpu;
774
775 kvm_for_each_vcpu(i, vcpu, kvm) {
776 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +0200777 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200778 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
779 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
780 exit_sie_sync(vcpu);
781 }
782 }
783}
784
Christoffer Dallb6d33832012-03-08 16:44:24 -0500785int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
786{
787 /* kvm common code refers to this, but never calls it */
788 BUG();
789 return 0;
790}
791
Carsten Otte14eebd92012-05-15 14:15:26 +0200792static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
793 struct kvm_one_reg *reg)
794{
795 int r = -EINVAL;
796
797 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200798 case KVM_REG_S390_TODPR:
799 r = put_user(vcpu->arch.sie_block->todpr,
800 (u32 __user *)reg->addr);
801 break;
802 case KVM_REG_S390_EPOCHDIFF:
803 r = put_user(vcpu->arch.sie_block->epoch,
804 (u64 __user *)reg->addr);
805 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200806 case KVM_REG_S390_CPU_TIMER:
807 r = put_user(vcpu->arch.sie_block->cputm,
808 (u64 __user *)reg->addr);
809 break;
810 case KVM_REG_S390_CLOCK_COMP:
811 r = put_user(vcpu->arch.sie_block->ckc,
812 (u64 __user *)reg->addr);
813 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200814 case KVM_REG_S390_PFTOKEN:
815 r = put_user(vcpu->arch.pfault_token,
816 (u64 __user *)reg->addr);
817 break;
818 case KVM_REG_S390_PFCOMPARE:
819 r = put_user(vcpu->arch.pfault_compare,
820 (u64 __user *)reg->addr);
821 break;
822 case KVM_REG_S390_PFSELECT:
823 r = put_user(vcpu->arch.pfault_select,
824 (u64 __user *)reg->addr);
825 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100826 case KVM_REG_S390_PP:
827 r = put_user(vcpu->arch.sie_block->pp,
828 (u64 __user *)reg->addr);
829 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100830 case KVM_REG_S390_GBEA:
831 r = put_user(vcpu->arch.sie_block->gbea,
832 (u64 __user *)reg->addr);
833 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200834 default:
835 break;
836 }
837
838 return r;
839}
840
841static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
842 struct kvm_one_reg *reg)
843{
844 int r = -EINVAL;
845
846 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200847 case KVM_REG_S390_TODPR:
848 r = get_user(vcpu->arch.sie_block->todpr,
849 (u32 __user *)reg->addr);
850 break;
851 case KVM_REG_S390_EPOCHDIFF:
852 r = get_user(vcpu->arch.sie_block->epoch,
853 (u64 __user *)reg->addr);
854 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200855 case KVM_REG_S390_CPU_TIMER:
856 r = get_user(vcpu->arch.sie_block->cputm,
857 (u64 __user *)reg->addr);
858 break;
859 case KVM_REG_S390_CLOCK_COMP:
860 r = get_user(vcpu->arch.sie_block->ckc,
861 (u64 __user *)reg->addr);
862 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200863 case KVM_REG_S390_PFTOKEN:
864 r = get_user(vcpu->arch.pfault_token,
865 (u64 __user *)reg->addr);
866 break;
867 case KVM_REG_S390_PFCOMPARE:
868 r = get_user(vcpu->arch.pfault_compare,
869 (u64 __user *)reg->addr);
870 break;
871 case KVM_REG_S390_PFSELECT:
872 r = get_user(vcpu->arch.pfault_select,
873 (u64 __user *)reg->addr);
874 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100875 case KVM_REG_S390_PP:
876 r = get_user(vcpu->arch.sie_block->pp,
877 (u64 __user *)reg->addr);
878 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100879 case KVM_REG_S390_GBEA:
880 r = get_user(vcpu->arch.sie_block->gbea,
881 (u64 __user *)reg->addr);
882 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200883 default:
884 break;
885 }
886
887 return r;
888}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500889
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100890static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
891{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100892 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100893 return 0;
894}
895
896int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
897{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100898 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100899 return 0;
900}
901
902int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
903{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100904 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100905 return 0;
906}
907
908int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
909 struct kvm_sregs *sregs)
910{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100911 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100912 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100913 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100914 return 0;
915}
916
917int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
918 struct kvm_sregs *sregs)
919{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100920 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100921 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100922 return 0;
923}
924
925int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
926{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200927 if (test_fp_ctl(fpu->fpc))
928 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100929 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200930 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
931 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
932 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100933 return 0;
934}
935
936int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
937{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100938 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
939 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100940 return 0;
941}
942
943static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
944{
945 int rc = 0;
946
David Hildenbrand7a42fdc2014-05-05 16:26:19 +0200947 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100948 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100949 else {
950 vcpu->run->psw_mask = psw.mask;
951 vcpu->run->psw_addr = psw.addr;
952 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100953 return rc;
954}
955
956int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
957 struct kvm_translation *tr)
958{
959 return -EINVAL; /* not implemented yet */
960}
961
David Hildenbrand27291e22014-01-23 12:26:52 +0100962#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
963 KVM_GUESTDBG_USE_HW_BP | \
964 KVM_GUESTDBG_ENABLE)
965
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100966int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
967 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100968{
David Hildenbrand27291e22014-01-23 12:26:52 +0100969 int rc = 0;
970
971 vcpu->guest_debug = 0;
972 kvm_s390_clear_bp_data(vcpu);
973
David Hildenbrand2de3bfc2014-05-20 17:25:20 +0200974 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +0100975 return -EINVAL;
976
977 if (dbg->control & KVM_GUESTDBG_ENABLE) {
978 vcpu->guest_debug = dbg->control;
979 /* enforce guest PER */
980 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
981
982 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
983 rc = kvm_s390_import_bp_data(vcpu, dbg);
984 } else {
985 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
986 vcpu->arch.guestdbg.last_bp = 0;
987 }
988
989 if (rc) {
990 vcpu->guest_debug = 0;
991 kvm_s390_clear_bp_data(vcpu);
992 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
993 }
994
995 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100996}
997
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300998int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
999 struct kvm_mp_state *mp_state)
1000{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001001 /* CHECK_STOP and LOAD are not supported yet */
1002 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1003 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001004}
1005
1006int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1007 struct kvm_mp_state *mp_state)
1008{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001009 int rc = 0;
1010
1011 /* user space knows about this interface - let it control the state */
1012 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1013
1014 switch (mp_state->mp_state) {
1015 case KVM_MP_STATE_STOPPED:
1016 kvm_s390_vcpu_stop(vcpu);
1017 break;
1018 case KVM_MP_STATE_OPERATING:
1019 kvm_s390_vcpu_start(vcpu);
1020 break;
1021 case KVM_MP_STATE_LOAD:
1022 case KVM_MP_STATE_CHECK_STOP:
1023 /* fall through - CHECK_STOP and LOAD are not supported yet */
1024 default:
1025 rc = -ENXIO;
1026 }
1027
1028 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001029}
1030
Dominik Dingelb31605c2014-03-25 13:47:11 +01001031bool kvm_s390_cmma_enabled(struct kvm *kvm)
1032{
1033 if (!MACHINE_IS_LPAR)
1034 return false;
1035 /* only enable for z10 and later */
1036 if (!MACHINE_HAS_EDAT1)
1037 return false;
1038 if (!kvm->arch.use_cmma)
1039 return false;
1040 return true;
1041}
1042
David Hildenbrand8ad35752014-03-14 11:00:21 +01001043static bool ibs_enabled(struct kvm_vcpu *vcpu)
1044{
1045 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1046}
1047
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001048static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1049{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001050retry:
1051 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001052 /*
1053 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1054 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1055 * This ensures that the ipte instruction for this request has
1056 * already finished. We might race against a second unmapper that
1057 * wants to set the blocking bit. Lets just retry the request loop.
1058 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001059 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001060 int rc;
1061 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001062 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001063 PAGE_SIZE * 2);
1064 if (rc)
1065 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001066 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001067 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001068
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001069 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1070 vcpu->arch.sie_block->ihcpu = 0xffff;
1071 goto retry;
1072 }
1073
David Hildenbrand8ad35752014-03-14 11:00:21 +01001074 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1075 if (!ibs_enabled(vcpu)) {
1076 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1077 atomic_set_mask(CPUSTAT_IBS,
1078 &vcpu->arch.sie_block->cpuflags);
1079 }
1080 goto retry;
1081 }
1082
1083 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1084 if (ibs_enabled(vcpu)) {
1085 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1086 atomic_clear_mask(CPUSTAT_IBS,
1087 &vcpu->arch.sie_block->cpuflags);
1088 }
1089 goto retry;
1090 }
1091
David Hildenbrand0759d062014-05-13 16:54:32 +02001092 /* nothing to do, just clear the request */
1093 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1094
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001095 return 0;
1096}
1097
Thomas Huthfa576c52014-05-06 17:20:16 +02001098/**
1099 * kvm_arch_fault_in_page - fault-in guest page if necessary
1100 * @vcpu: The corresponding virtual cpu
1101 * @gpa: Guest physical address
1102 * @writable: Whether the page should be writable or not
1103 *
1104 * Make sure that a guest page has been faulted-in on the host.
1105 *
1106 * Return: Zero on success, negative error code otherwise.
1107 */
1108long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001109{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001110 return gmap_fault(vcpu->arch.gmap, gpa,
1111 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001112}
1113
Dominik Dingel3c038e62013-10-07 17:11:48 +02001114static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1115 unsigned long token)
1116{
1117 struct kvm_s390_interrupt inti;
1118 inti.parm64 = token;
1119
1120 if (start_token) {
1121 inti.type = KVM_S390_INT_PFAULT_INIT;
1122 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1123 } else {
1124 inti.type = KVM_S390_INT_PFAULT_DONE;
1125 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1126 }
1127}
1128
1129void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1130 struct kvm_async_pf *work)
1131{
1132 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1133 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1134}
1135
1136void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1137 struct kvm_async_pf *work)
1138{
1139 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1140 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1141}
1142
1143void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1144 struct kvm_async_pf *work)
1145{
1146 /* s390 will always inject the page directly */
1147}
1148
1149bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1150{
1151 /*
1152 * s390 will always inject the page directly,
1153 * but we still want check_async_completion to cleanup
1154 */
1155 return true;
1156}
1157
1158static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1159{
1160 hva_t hva;
1161 struct kvm_arch_async_pf arch;
1162 int rc;
1163
1164 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1165 return 0;
1166 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1167 vcpu->arch.pfault_compare)
1168 return 0;
1169 if (psw_extint_disabled(vcpu))
1170 return 0;
1171 if (kvm_cpu_has_interrupt(vcpu))
1172 return 0;
1173 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1174 return 0;
1175 if (!vcpu->arch.gmap->pfault_enabled)
1176 return 0;
1177
Heiko Carstens81480cc2014-01-01 16:36:07 +01001178 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1179 hva += current->thread.gmap_addr & ~PAGE_MASK;
1180 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001181 return 0;
1182
1183 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1184 return rc;
1185}
1186
Thomas Huth3fb4c402013-09-12 10:33:43 +02001187static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001188{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001189 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001190
Dominik Dingel3c038e62013-10-07 17:11:48 +02001191 /*
1192 * On s390 notifications for arriving pages will be delivered directly
1193 * to the guest but the house keeping for completed pfaults is
1194 * handled outside the worker.
1195 */
1196 kvm_check_async_pf_completion(vcpu);
1197
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001198 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001199
1200 if (need_resched())
1201 schedule();
1202
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001203 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001204 s390_handle_mcck();
1205
Jens Freimann79395032014-04-17 10:10:30 +02001206 if (!kvm_is_ucontrol(vcpu->kvm)) {
1207 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1208 if (rc)
1209 return rc;
1210 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001211
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001212 rc = kvm_s390_handle_requests(vcpu);
1213 if (rc)
1214 return rc;
1215
David Hildenbrand27291e22014-01-23 12:26:52 +01001216 if (guestdbg_enabled(vcpu)) {
1217 kvm_s390_backup_guest_per_regs(vcpu);
1218 kvm_s390_patch_guest_per_regs(vcpu);
1219 }
1220
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001221 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001222 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1223 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1224 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001225
Thomas Huth3fb4c402013-09-12 10:33:43 +02001226 return 0;
1227}
1228
1229static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1230{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001231 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001232
1233 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1234 vcpu->arch.sie_block->icptcode);
1235 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1236
David Hildenbrand27291e22014-01-23 12:26:52 +01001237 if (guestdbg_enabled(vcpu))
1238 kvm_s390_restore_guest_per_regs(vcpu);
1239
Thomas Huth3fb4c402013-09-12 10:33:43 +02001240 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001241 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001242 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1243 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1244 vcpu->run->s390_ucontrol.trans_exc_code =
1245 current->thread.gmap_addr;
1246 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1247 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001248
1249 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001250 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001251 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001252 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001253 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001254 } else {
1255 gpa_t gpa = current->thread.gmap_addr;
1256 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1257 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001258 }
1259
1260 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001261 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1262 trace_kvm_s390_sie_fault(vcpu);
1263 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001264 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001265
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001266 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001267
Thomas Hutha76ccff2013-09-12 10:33:44 +02001268 if (rc == 0) {
1269 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001270 /* Don't exit for host interrupts. */
1271 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001272 else
1273 rc = kvm_handle_sie_intercept(vcpu);
1274 }
1275
Thomas Huth3fb4c402013-09-12 10:33:43 +02001276 return rc;
1277}
1278
1279static int __vcpu_run(struct kvm_vcpu *vcpu)
1280{
1281 int rc, exit_reason;
1282
Thomas Huth800c1062013-09-12 10:33:45 +02001283 /*
1284 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1285 * ning the guest), so that memslots (and other stuff) are protected
1286 */
1287 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1288
Thomas Hutha76ccff2013-09-12 10:33:44 +02001289 do {
1290 rc = vcpu_pre_run(vcpu);
1291 if (rc)
1292 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001293
Thomas Huth800c1062013-09-12 10:33:45 +02001294 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001295 /*
1296 * As PF_VCPU will be used in fault handler, between
1297 * guest_enter and guest_exit should be no uaccess.
1298 */
1299 preempt_disable();
1300 kvm_guest_enter();
1301 preempt_enable();
1302 exit_reason = sie64a(vcpu->arch.sie_block,
1303 vcpu->run->s.regs.gprs);
1304 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001305 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001306
Thomas Hutha76ccff2013-09-12 10:33:44 +02001307 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001308 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001309
Thomas Huth800c1062013-09-12 10:33:45 +02001310 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001311 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001312}
1313
David Hildenbrandb028ee32014-07-17 10:47:43 +02001314static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1315{
1316 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1317 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1318 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1319 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1320 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1321 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001322 /* some control register changes require a tlb flush */
1323 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001324 }
1325 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1326 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1327 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1328 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1329 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1330 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1331 }
1332 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1333 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1334 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1335 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1336 }
1337 kvm_run->kvm_dirty_regs = 0;
1338}
1339
1340static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1341{
1342 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1343 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1344 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1345 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1346 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1347 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1348 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1349 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1350 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1351 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1352 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1353 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1354}
1355
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001356int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1357{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001358 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001359 sigset_t sigsaved;
1360
David Hildenbrand27291e22014-01-23 12:26:52 +01001361 if (guestdbg_exit_pending(vcpu)) {
1362 kvm_s390_prepare_debug_exit(vcpu);
1363 return 0;
1364 }
1365
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001366 if (vcpu->sigset_active)
1367 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1368
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001369 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1370 kvm_s390_vcpu_start(vcpu);
1371 } else if (is_vcpu_stopped(vcpu)) {
1372 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1373 vcpu->vcpu_id);
1374 return -EINVAL;
1375 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001376
David Hildenbrandb028ee32014-07-17 10:47:43 +02001377 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001378
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001379 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001380 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001381
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001382 if (signal_pending(current) && !rc) {
1383 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001384 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001385 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001386
David Hildenbrand27291e22014-01-23 12:26:52 +01001387 if (guestdbg_exit_pending(vcpu) && !rc) {
1388 kvm_s390_prepare_debug_exit(vcpu);
1389 rc = 0;
1390 }
1391
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001392 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001393 /* intercept cannot be handled in-kernel, prepare kvm-run */
1394 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1395 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001396 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1397 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1398 rc = 0;
1399 }
1400
1401 if (rc == -EREMOTE) {
1402 /* intercept was handled, but userspace support is needed
1403 * kvm_run has been prepared by the handler */
1404 rc = 0;
1405 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001406
David Hildenbrandb028ee32014-07-17 10:47:43 +02001407 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001408
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001409 if (vcpu->sigset_active)
1410 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1411
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001412 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001413 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001414}
1415
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001416/*
1417 * store status at address
1418 * we use have two special cases:
1419 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1420 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1421 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001422int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001423{
Carsten Otte092670c2011-07-24 10:48:22 +02001424 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001425 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001426 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001427 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001428
Heiko Carstensd0bce602014-01-01 16:45:58 +01001429 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1430 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001431 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001432 gpa = SAVE_AREA_BASE;
1433 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1434 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001435 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001436 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1437 }
1438 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1439 vcpu->arch.guest_fpregs.fprs, 128);
1440 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1441 vcpu->run->s.regs.gprs, 128);
1442 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1443 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001444 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001445 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001446 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001447 rc |= write_guest_abs(vcpu,
1448 gpa + offsetof(struct save_area, fp_ctrl_reg),
1449 &vcpu->arch.guest_fpregs.fpc, 4);
1450 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1451 &vcpu->arch.sie_block->todpr, 4);
1452 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1453 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001454 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001455 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1456 &clkcomp, 8);
1457 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1458 &vcpu->run->s.regs.acrs, 64);
1459 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1460 &vcpu->arch.sie_block->gcr, 128);
1461 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001462}
1463
Thomas Huthe8798922013-11-06 15:46:33 +01001464int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1465{
1466 /*
1467 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1468 * copying in vcpu load/put. Lets update our copies before we save
1469 * it into the save area
1470 */
1471 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1472 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1473 save_access_regs(vcpu->run->s.regs.acrs);
1474
1475 return kvm_s390_store_status_unloaded(vcpu, addr);
1476}
1477
David Hildenbrand8ad35752014-03-14 11:00:21 +01001478static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1479{
1480 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1481 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1482 exit_sie_sync(vcpu);
1483}
1484
1485static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1486{
1487 unsigned int i;
1488 struct kvm_vcpu *vcpu;
1489
1490 kvm_for_each_vcpu(i, vcpu, kvm) {
1491 __disable_ibs_on_vcpu(vcpu);
1492 }
1493}
1494
1495static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1496{
1497 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1498 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1499 exit_sie_sync(vcpu);
1500}
1501
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001502void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1503{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001504 int i, online_vcpus, started_vcpus = 0;
1505
1506 if (!is_vcpu_stopped(vcpu))
1507 return;
1508
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001509 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001510 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001511 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001512 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1513
1514 for (i = 0; i < online_vcpus; i++) {
1515 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1516 started_vcpus++;
1517 }
1518
1519 if (started_vcpus == 0) {
1520 /* we're the only active VCPU -> speed it up */
1521 __enable_ibs_on_vcpu(vcpu);
1522 } else if (started_vcpus == 1) {
1523 /*
1524 * As we are starting a second VCPU, we have to disable
1525 * the IBS facility on all VCPUs to remove potentially
1526 * oustanding ENABLE requests.
1527 */
1528 __disable_ibs_on_all_vcpus(vcpu->kvm);
1529 }
1530
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001531 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001532 /*
1533 * Another VCPU might have used IBS while we were offline.
1534 * Let's play safe and flush the VCPU at startup.
1535 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001536 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001537 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001538 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001539}
1540
1541void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1542{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001543 int i, online_vcpus, started_vcpus = 0;
1544 struct kvm_vcpu *started_vcpu = NULL;
1545
1546 if (is_vcpu_stopped(vcpu))
1547 return;
1548
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001549 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001550 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001551 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001552 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1553
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001554 /* Need to lock access to action_bits to avoid a SIGP race condition */
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001555 spin_lock(&vcpu->arch.local_int.lock);
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001556 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001557
1558 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1559 vcpu->arch.local_int.action_bits &=
1560 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001561 spin_unlock(&vcpu->arch.local_int.lock);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001562
David Hildenbrand8ad35752014-03-14 11:00:21 +01001563 __disable_ibs_on_vcpu(vcpu);
1564
1565 for (i = 0; i < online_vcpus; i++) {
1566 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1567 started_vcpus++;
1568 started_vcpu = vcpu->kvm->vcpus[i];
1569 }
1570 }
1571
1572 if (started_vcpus == 1) {
1573 /*
1574 * As we only have one VCPU left, we want to enable the
1575 * IBS facility for that VCPU to speed it up.
1576 */
1577 __enable_ibs_on_vcpu(started_vcpu);
1578 }
1579
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001580 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001581 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001582}
1583
Cornelia Huckd6712df2012-12-20 15:32:11 +01001584static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1585 struct kvm_enable_cap *cap)
1586{
1587 int r;
1588
1589 if (cap->flags)
1590 return -EINVAL;
1591
1592 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001593 case KVM_CAP_S390_CSS_SUPPORT:
1594 if (!vcpu->kvm->arch.css_support) {
1595 vcpu->kvm->arch.css_support = 1;
1596 trace_kvm_s390_enable_css(vcpu->kvm);
1597 }
1598 r = 0;
1599 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001600 default:
1601 r = -EINVAL;
1602 break;
1603 }
1604 return r;
1605}
1606
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001607long kvm_arch_vcpu_ioctl(struct file *filp,
1608 unsigned int ioctl, unsigned long arg)
1609{
1610 struct kvm_vcpu *vcpu = filp->private_data;
1611 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001612 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001613 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001614
Avi Kivity937366242010-05-13 12:35:17 +03001615 switch (ioctl) {
1616 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001617 struct kvm_s390_interrupt s390int;
1618
Avi Kivity937366242010-05-13 12:35:17 +03001619 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001620 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03001621 break;
1622 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1623 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001624 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001625 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001626 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001627 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001628 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001629 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001630 case KVM_S390_SET_INITIAL_PSW: {
1631 psw_t psw;
1632
Avi Kivitybc923cc2010-05-13 12:21:46 +03001633 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001634 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001635 break;
1636 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1637 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001638 }
1639 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001640 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1641 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001642 case KVM_SET_ONE_REG:
1643 case KVM_GET_ONE_REG: {
1644 struct kvm_one_reg reg;
1645 r = -EFAULT;
1646 if (copy_from_user(&reg, argp, sizeof(reg)))
1647 break;
1648 if (ioctl == KVM_SET_ONE_REG)
1649 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1650 else
1651 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1652 break;
1653 }
Carsten Otte27e03932012-01-04 10:25:21 +01001654#ifdef CONFIG_KVM_S390_UCONTROL
1655 case KVM_S390_UCAS_MAP: {
1656 struct kvm_s390_ucas_mapping ucasmap;
1657
1658 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1659 r = -EFAULT;
1660 break;
1661 }
1662
1663 if (!kvm_is_ucontrol(vcpu->kvm)) {
1664 r = -EINVAL;
1665 break;
1666 }
1667
1668 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1669 ucasmap.vcpu_addr, ucasmap.length);
1670 break;
1671 }
1672 case KVM_S390_UCAS_UNMAP: {
1673 struct kvm_s390_ucas_mapping ucasmap;
1674
1675 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1676 r = -EFAULT;
1677 break;
1678 }
1679
1680 if (!kvm_is_ucontrol(vcpu->kvm)) {
1681 r = -EINVAL;
1682 break;
1683 }
1684
1685 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1686 ucasmap.length);
1687 break;
1688 }
1689#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001690 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001691 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01001692 break;
1693 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001694 case KVM_ENABLE_CAP:
1695 {
1696 struct kvm_enable_cap cap;
1697 r = -EFAULT;
1698 if (copy_from_user(&cap, argp, sizeof(cap)))
1699 break;
1700 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1701 break;
1702 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001703 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001704 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001705 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001706 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001707}
1708
Carsten Otte5b1c1492012-01-04 10:25:23 +01001709int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1710{
1711#ifdef CONFIG_KVM_S390_UCONTROL
1712 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1713 && (kvm_is_ucontrol(vcpu->kvm))) {
1714 vmf->page = virt_to_page(vcpu->arch.sie_block);
1715 get_page(vmf->page);
1716 return 0;
1717 }
1718#endif
1719 return VM_FAULT_SIGBUS;
1720}
1721
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301722int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1723 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001724{
1725 return 0;
1726}
1727
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001728/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001729int kvm_arch_prepare_memory_region(struct kvm *kvm,
1730 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001731 struct kvm_userspace_memory_region *mem,
1732 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001733{
Nick Wangdd2887e2013-03-25 17:22:57 +01001734 /* A few sanity checks. We can have memory slots which have to be
1735 located/ended at a segment boundary (1MB). The memory in userland is
1736 ok to be fragmented into various different vmas. It is okay to mmap()
1737 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001738
Carsten Otte598841c2011-07-24 10:48:21 +02001739 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001740 return -EINVAL;
1741
Carsten Otte598841c2011-07-24 10:48:21 +02001742 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001743 return -EINVAL;
1744
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001745 return 0;
1746}
1747
1748void kvm_arch_commit_memory_region(struct kvm *kvm,
1749 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001750 const struct kvm_memory_slot *old,
1751 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001752{
Carsten Ottef7850c92011-07-24 10:48:23 +02001753 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001754
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001755 /* If the basics of the memslot do not change, we do not want
1756 * to update the gmap. Every update causes several unnecessary
1757 * segment translation exceptions. This is usually handled just
1758 * fine by the normal fault handler + gmap, but it will also
1759 * cause faults on the prefix page of running guest CPUs.
1760 */
1761 if (old->userspace_addr == mem->userspace_addr &&
1762 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1763 old->npages * PAGE_SIZE == mem->memory_size)
1764 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001765
1766 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1767 mem->guest_phys_addr, mem->memory_size);
1768 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001769 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001770 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001771}
1772
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001773static int __init kvm_s390_init(void)
1774{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001775 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001776 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001777 if (ret)
1778 return ret;
1779
1780 /*
1781 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001782 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001783 * only set facilities that are known to work in KVM.
1784 */
Michael Mueller78c4b59f2013-07-26 15:04:04 +02001785 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1786 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001787 kvm_exit();
1788 return -ENOMEM;
1789 }
Michael Mueller78c4b59f2013-07-26 15:04:04 +02001790 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Christian Borntraeger7be81a42014-09-19 15:55:20 +02001791 vfacilities[0] &= 0xff82fffbf47c2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001792 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001793 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001794}
1795
1796static void __exit kvm_s390_exit(void)
1797{
Michael Mueller78c4b59f2013-07-26 15:04:04 +02001798 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001799 kvm_exit();
1800}
1801
1802module_init(kvm_s390_init);
1803module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001804
1805/*
1806 * Enable autoloading of the kvm module.
1807 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1808 * since x86 takes a different approach.
1809 */
1810#include <linux/miscdevice.h>
1811MODULE_ALIAS_MISCDEV(KVM_MINOR);
1812MODULE_ALIAS("devname:kvm");