blob: 9cf899e9a5d4a0f485eddbf0ed00816c790a9bfd [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010027#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <asm/lowcore.h>
29#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010030#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010031#include <asm/switch_to.h>
Michael Mueller78c4b592013-07-26 15:04:04 +020032#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020033#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include "gaccess.h"
36
Cornelia Huck5786fff2012-07-23 17:20:29 +020037#define CREATE_TRACE_POINTS
38#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020039#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020040
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020045 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010050 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020053 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020054 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010055 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010056 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
57 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010058 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020059 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010060 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020067 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010068 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
69 { "instruction_spx", VCPU_STAT(instruction_spx) },
70 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
71 { "instruction_stap", VCPU_STAT(instruction_stap) },
72 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010073 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010074 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
75 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020076 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010077 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
78 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020079 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010080 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010081 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020082 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010083 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020084 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
85 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010086 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020087 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
88 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010089 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
90 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
91 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020092 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
93 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
94 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010095 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010096 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020097 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010098 { NULL }
99};
100
Michael Mueller78c4b592013-07-26 15:04:04 +0200101unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200102static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100103
Michael Mueller78c4b592013-07-26 15:04:04 +0200104/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +0100105int test_vfacility(unsigned long nr)
Michael Mueller78c4b592013-07-26 15:04:04 +0200106{
107 return __test_facility(nr, (void *) vfacilities);
108}
109
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100110/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200111int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100112{
113 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200114 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100115}
116
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200117static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
118
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100119int kvm_arch_hardware_setup(void)
120{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200121 gmap_notifier.notifier_call = kvm_gmap_notifier;
122 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123 return 0;
124}
125
126void kvm_arch_hardware_unsetup(void)
127{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200128 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100129}
130
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100131int kvm_arch_init(void *opaque)
132{
Cornelia Huck84877d92014-09-02 10:27:35 +0100133 /* Register floating interrupt controller interface. */
134 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100135}
136
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100137/* Section: device related */
138long kvm_arch_dev_ioctl(struct file *filp,
139 unsigned int ioctl, unsigned long arg)
140{
141 if (ioctl == KVM_S390_ENABLE_SIE)
142 return s390_enable_sie();
143 return -EINVAL;
144}
145
Alexander Graf784aa3d2014-07-14 18:27:35 +0200146int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100147{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100148 int r;
149
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200150 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100151 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200152 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100153 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100154#ifdef CONFIG_KVM_S390_UCONTROL
155 case KVM_CAP_S390_UCONTROL:
156#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200157 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100158 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200159 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100160 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100161 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200162 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100163 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200164 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200165 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200166 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200167 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200168 case KVM_CAP_MP_STATE:
David Hildenbrand2444b352014-10-09 14:10:13 +0200169 case KVM_CAP_S390_USER_SIGP:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100170 r = 1;
171 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200172 case KVM_CAP_NR_VCPUS:
173 case KVM_CAP_MAX_VCPUS:
174 r = KVM_MAX_VCPUS;
175 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100176 case KVM_CAP_NR_MEMSLOTS:
177 r = KVM_USER_MEM_SLOTS;
178 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200179 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100180 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200181 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200182 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100183 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200184 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100185 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100186}
187
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400188static void kvm_s390_sync_dirty_log(struct kvm *kvm,
189 struct kvm_memory_slot *memslot)
190{
191 gfn_t cur_gfn, last_gfn;
192 unsigned long address;
193 struct gmap *gmap = kvm->arch.gmap;
194
195 down_read(&gmap->mm->mmap_sem);
196 /* Loop over all guest pages */
197 last_gfn = memslot->base_gfn + memslot->npages;
198 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
199 address = gfn_to_hva_memslot(memslot, cur_gfn);
200
201 if (gmap_test_and_clear_dirty(address, gmap))
202 mark_page_dirty(kvm, cur_gfn);
203 }
204 up_read(&gmap->mm->mmap_sem);
205}
206
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100207/* Section: vm related */
208/*
209 * Get (and clear) the dirty memory log for a memory slot.
210 */
211int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
212 struct kvm_dirty_log *log)
213{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400214 int r;
215 unsigned long n;
216 struct kvm_memory_slot *memslot;
217 int is_dirty = 0;
218
219 mutex_lock(&kvm->slots_lock);
220
221 r = -EINVAL;
222 if (log->slot >= KVM_USER_MEM_SLOTS)
223 goto out;
224
225 memslot = id_to_memslot(kvm->memslots, log->slot);
226 r = -ENOENT;
227 if (!memslot->dirty_bitmap)
228 goto out;
229
230 kvm_s390_sync_dirty_log(kvm, memslot);
231 r = kvm_get_dirty_log(kvm, log, &is_dirty);
232 if (r)
233 goto out;
234
235 /* Clear the dirty log */
236 if (is_dirty) {
237 n = kvm_dirty_bitmap_bytes(memslot);
238 memset(memslot->dirty_bitmap, 0, n);
239 }
240 r = 0;
241out:
242 mutex_unlock(&kvm->slots_lock);
243 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100244}
245
Cornelia Huckd938dc52013-10-23 18:26:34 +0200246static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
247{
248 int r;
249
250 if (cap->flags)
251 return -EINVAL;
252
253 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200254 case KVM_CAP_S390_IRQCHIP:
255 kvm->arch.use_irqchip = 1;
256 r = 0;
257 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200258 case KVM_CAP_S390_USER_SIGP:
259 kvm->arch.user_sigp = 1;
260 r = 0;
261 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200262 default:
263 r = -EINVAL;
264 break;
265 }
266 return r;
267}
268
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100269static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
270{
271 int ret;
272
273 switch (attr->attr) {
274 case KVM_S390_VM_MEM_LIMIT_SIZE:
275 ret = 0;
276 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
277 ret = -EFAULT;
278 break;
279 default:
280 ret = -ENXIO;
281 break;
282 }
283 return ret;
284}
285
286static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200287{
288 int ret;
289 unsigned int idx;
290 switch (attr->attr) {
291 case KVM_S390_VM_MEM_ENABLE_CMMA:
292 ret = -EBUSY;
293 mutex_lock(&kvm->lock);
294 if (atomic_read(&kvm->online_vcpus) == 0) {
295 kvm->arch.use_cmma = 1;
296 ret = 0;
297 }
298 mutex_unlock(&kvm->lock);
299 break;
300 case KVM_S390_VM_MEM_CLR_CMMA:
301 mutex_lock(&kvm->lock);
302 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200303 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200304 srcu_read_unlock(&kvm->srcu, idx);
305 mutex_unlock(&kvm->lock);
306 ret = 0;
307 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100308 case KVM_S390_VM_MEM_LIMIT_SIZE: {
309 unsigned long new_limit;
310
311 if (kvm_is_ucontrol(kvm))
312 return -EINVAL;
313
314 if (get_user(new_limit, (u64 __user *)attr->addr))
315 return -EFAULT;
316
317 if (new_limit > kvm->arch.gmap->asce_end)
318 return -E2BIG;
319
320 ret = -EBUSY;
321 mutex_lock(&kvm->lock);
322 if (atomic_read(&kvm->online_vcpus) == 0) {
323 /* gmap_alloc will round the limit up */
324 struct gmap *new = gmap_alloc(current->mm, new_limit);
325
326 if (!new) {
327 ret = -ENOMEM;
328 } else {
329 gmap_free(kvm->arch.gmap);
330 new->private = kvm;
331 kvm->arch.gmap = new;
332 ret = 0;
333 }
334 }
335 mutex_unlock(&kvm->lock);
336 break;
337 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200338 default:
339 ret = -ENXIO;
340 break;
341 }
342 return ret;
343}
344
Jason J. Herne72f25022014-11-25 09:46:02 -0500345static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
346{
347 u8 gtod_high;
348
349 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
350 sizeof(gtod_high)))
351 return -EFAULT;
352
353 if (gtod_high != 0)
354 return -EINVAL;
355
356 return 0;
357}
358
359static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
360{
361 struct kvm_vcpu *cur_vcpu;
362 unsigned int vcpu_idx;
363 u64 host_tod, gtod;
364 int r;
365
366 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
367 return -EFAULT;
368
369 r = store_tod_clock(&host_tod);
370 if (r)
371 return r;
372
373 mutex_lock(&kvm->lock);
374 kvm->arch.epoch = gtod - host_tod;
375 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
376 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
377 exit_sie(cur_vcpu);
378 }
379 mutex_unlock(&kvm->lock);
380 return 0;
381}
382
383static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
384{
385 int ret;
386
387 if (attr->flags)
388 return -EINVAL;
389
390 switch (attr->attr) {
391 case KVM_S390_VM_TOD_HIGH:
392 ret = kvm_s390_set_tod_high(kvm, attr);
393 break;
394 case KVM_S390_VM_TOD_LOW:
395 ret = kvm_s390_set_tod_low(kvm, attr);
396 break;
397 default:
398 ret = -ENXIO;
399 break;
400 }
401 return ret;
402}
403
404static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
405{
406 u8 gtod_high = 0;
407
408 if (copy_to_user((void __user *)attr->addr, &gtod_high,
409 sizeof(gtod_high)))
410 return -EFAULT;
411
412 return 0;
413}
414
415static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
416{
417 u64 host_tod, gtod;
418 int r;
419
420 r = store_tod_clock(&host_tod);
421 if (r)
422 return r;
423
424 gtod = host_tod + kvm->arch.epoch;
425 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
426 return -EFAULT;
427
428 return 0;
429}
430
431static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
432{
433 int ret;
434
435 if (attr->flags)
436 return -EINVAL;
437
438 switch (attr->attr) {
439 case KVM_S390_VM_TOD_HIGH:
440 ret = kvm_s390_get_tod_high(kvm, attr);
441 break;
442 case KVM_S390_VM_TOD_LOW:
443 ret = kvm_s390_get_tod_low(kvm, attr);
444 break;
445 default:
446 ret = -ENXIO;
447 break;
448 }
449 return ret;
450}
451
Dominik Dingelf2061652014-04-09 13:13:00 +0200452static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
453{
454 int ret;
455
456 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200457 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100458 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200459 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500460 case KVM_S390_VM_TOD:
461 ret = kvm_s390_set_tod(kvm, attr);
462 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200463 default:
464 ret = -ENXIO;
465 break;
466 }
467
468 return ret;
469}
470
471static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
472{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100473 int ret;
474
475 switch (attr->group) {
476 case KVM_S390_VM_MEM_CTRL:
477 ret = kvm_s390_get_mem_control(kvm, attr);
478 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500479 case KVM_S390_VM_TOD:
480 ret = kvm_s390_get_tod(kvm, attr);
481 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100482 default:
483 ret = -ENXIO;
484 break;
485 }
486
487 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200488}
489
490static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
491{
492 int ret;
493
494 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200495 case KVM_S390_VM_MEM_CTRL:
496 switch (attr->attr) {
497 case KVM_S390_VM_MEM_ENABLE_CMMA:
498 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100499 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200500 ret = 0;
501 break;
502 default:
503 ret = -ENXIO;
504 break;
505 }
506 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500507 case KVM_S390_VM_TOD:
508 switch (attr->attr) {
509 case KVM_S390_VM_TOD_LOW:
510 case KVM_S390_VM_TOD_HIGH:
511 ret = 0;
512 break;
513 default:
514 ret = -ENXIO;
515 break;
516 }
517 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200518 default:
519 ret = -ENXIO;
520 break;
521 }
522
523 return ret;
524}
525
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100526long kvm_arch_vm_ioctl(struct file *filp,
527 unsigned int ioctl, unsigned long arg)
528{
529 struct kvm *kvm = filp->private_data;
530 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200531 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100532 int r;
533
534 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100535 case KVM_S390_INTERRUPT: {
536 struct kvm_s390_interrupt s390int;
537
538 r = -EFAULT;
539 if (copy_from_user(&s390int, argp, sizeof(s390int)))
540 break;
541 r = kvm_s390_inject_vm(kvm, &s390int);
542 break;
543 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200544 case KVM_ENABLE_CAP: {
545 struct kvm_enable_cap cap;
546 r = -EFAULT;
547 if (copy_from_user(&cap, argp, sizeof(cap)))
548 break;
549 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
550 break;
551 }
Cornelia Huck84223592013-07-15 13:36:01 +0200552 case KVM_CREATE_IRQCHIP: {
553 struct kvm_irq_routing_entry routing;
554
555 r = -EINVAL;
556 if (kvm->arch.use_irqchip) {
557 /* Set up dummy routing. */
558 memset(&routing, 0, sizeof(routing));
559 kvm_set_irq_routing(kvm, &routing, 0, 0);
560 r = 0;
561 }
562 break;
563 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200564 case KVM_SET_DEVICE_ATTR: {
565 r = -EFAULT;
566 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
567 break;
568 r = kvm_s390_vm_set_attr(kvm, &attr);
569 break;
570 }
571 case KVM_GET_DEVICE_ATTR: {
572 r = -EFAULT;
573 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
574 break;
575 r = kvm_s390_vm_get_attr(kvm, &attr);
576 break;
577 }
578 case KVM_HAS_DEVICE_ATTR: {
579 r = -EFAULT;
580 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
581 break;
582 r = kvm_s390_vm_has_attr(kvm, &attr);
583 break;
584 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100585 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300586 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100587 }
588
589 return r;
590}
591
Tony Krowiak5102ee82014-06-27 14:46:01 -0400592static int kvm_s390_crypto_init(struct kvm *kvm)
593{
594 if (!test_vfacility(76))
595 return 0;
596
597 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
598 GFP_KERNEL | GFP_DMA);
599 if (!kvm->arch.crypto.crycb)
600 return -ENOMEM;
601
602 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
603 CRYCB_FORMAT1;
604
605 return 0;
606}
607
Carsten Ottee08b9632012-01-04 10:25:20 +0100608int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100609{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100610 int rc;
611 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100612 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100613
Carsten Ottee08b9632012-01-04 10:25:20 +0100614 rc = -EINVAL;
615#ifdef CONFIG_KVM_S390_UCONTROL
616 if (type & ~KVM_VM_S390_UCONTROL)
617 goto out_err;
618 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
619 goto out_err;
620#else
621 if (type)
622 goto out_err;
623#endif
624
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100625 rc = s390_enable_sie();
626 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100627 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100628
Carsten Otteb2904112011-10-18 12:27:13 +0200629 rc = -ENOMEM;
630
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100631 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
632 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100633 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100634 spin_lock(&kvm_lock);
635 sca_offset = (sca_offset + 16) & 0x7f0;
636 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
637 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100638
639 sprintf(debug_name, "kvm-%u", current->pid);
640
641 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
642 if (!kvm->arch.dbf)
643 goto out_nodbf;
644
Tony Krowiak5102ee82014-06-27 14:46:01 -0400645 if (kvm_s390_crypto_init(kvm) < 0)
646 goto out_crypto;
647
Carsten Otteba5c1e92008-03-25 18:47:26 +0100648 spin_lock_init(&kvm->arch.float_int.lock);
649 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100650 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +0200651 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100652
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100653 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
654 VM_EVENT(kvm, 3, "%s", "vm created");
655
Carsten Ottee08b9632012-01-04 10:25:20 +0100656 if (type & KVM_VM_S390_UCONTROL) {
657 kvm->arch.gmap = NULL;
658 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +0200659 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +0100660 if (!kvm->arch.gmap)
661 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200662 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200663 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100664 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100665
666 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200667 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -0500668 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100669
David Hildenbrand8ad35752014-03-14 11:00:21 +0100670 spin_lock_init(&kvm->arch.start_stop_lock);
671
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100672 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200673out_nogmap:
Tony Krowiak5102ee82014-06-27 14:46:01 -0400674 kfree(kvm->arch.crypto.crycb);
675out_crypto:
Carsten Otte598841c2011-07-24 10:48:21 +0200676 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100677out_nodbf:
678 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100679out_err:
680 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100681}
682
Christian Borntraegerd329c032008-11-26 14:50:27 +0100683void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
684{
685 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200686 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100687 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200688 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100689 if (!kvm_is_ucontrol(vcpu->kvm)) {
690 clear_bit(63 - vcpu->vcpu_id,
691 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
692 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
693 (__u64) vcpu->arch.sie_block)
694 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
695 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200696 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100697
698 if (kvm_is_ucontrol(vcpu->kvm))
699 gmap_free(vcpu->arch.gmap);
700
Dominik Dingelb31605c2014-03-25 13:47:11 +0100701 if (kvm_s390_cmma_enabled(vcpu->kvm))
702 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100703 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200704
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100705 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200706 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100707}
708
709static void kvm_free_vcpus(struct kvm *kvm)
710{
711 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300712 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100713
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300714 kvm_for_each_vcpu(i, vcpu, kvm)
715 kvm_arch_vcpu_destroy(vcpu);
716
717 mutex_lock(&kvm->lock);
718 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
719 kvm->vcpus[i] = NULL;
720
721 atomic_set(&kvm->online_vcpus, 0);
722 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100723}
724
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100725void kvm_arch_destroy_vm(struct kvm *kvm)
726{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100727 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100728 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100729 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400730 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +0100731 if (!kvm_is_ucontrol(kvm))
732 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200733 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100734 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100735}
736
737/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +0100738static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
739{
740 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
741 if (!vcpu->arch.gmap)
742 return -ENOMEM;
743 vcpu->arch.gmap->private = vcpu->kvm;
744
745 return 0;
746}
747
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100748int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
749{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200750 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
751 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100752 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
753 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100754 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +0200755 KVM_SYNC_CRS |
756 KVM_SYNC_ARCH0 |
757 KVM_SYNC_PFAULT;
Dominik Dingeldafd0322014-12-02 16:53:21 +0100758
759 if (kvm_is_ucontrol(vcpu->kvm))
760 return __kvm_ucontrol_vcpu_init(vcpu);
761
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100762 return 0;
763}
764
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100765void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
766{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200767 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
768 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100769 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200770 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
771 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100772 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200773 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100774 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100775}
776
777void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
778{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100779 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200780 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200781 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
782 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100783 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200784 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
785 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100786 restore_access_regs(vcpu->arch.host_acrs);
787}
788
789static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
790{
791 /* this equals initial cpu reset in pop, but we don't switch to ESA */
792 vcpu->arch.sie_block->gpsw.mask = 0UL;
793 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100794 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100795 vcpu->arch.sie_block->cputm = 0UL;
796 vcpu->arch.sie_block->ckc = 0UL;
797 vcpu->arch.sie_block->todpr = 0;
798 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
799 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
800 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
801 vcpu->arch.guest_fpregs.fpc = 0;
802 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
803 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100804 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200805 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
806 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200807 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
808 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100809 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100810}
811
Dominik Dingel31928aa2014-12-04 15:47:07 +0100812void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200813{
Jason J. Herne72f25022014-11-25 09:46:02 -0500814 mutex_lock(&vcpu->kvm->lock);
815 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
816 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +0100817 if (!kvm_is_ucontrol(vcpu->kvm))
818 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200819}
820
Tony Krowiak5102ee82014-06-27 14:46:01 -0400821static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
822{
823 if (!test_vfacility(76))
824 return;
825
826 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
827}
828
Dominik Dingelb31605c2014-03-25 13:47:11 +0100829void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
830{
831 free_page(vcpu->arch.sie_block->cbrlo);
832 vcpu->arch.sie_block->cbrlo = 0;
833}
834
835int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
836{
837 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
838 if (!vcpu->arch.sie_block->cbrlo)
839 return -ENOMEM;
840
841 vcpu->arch.sie_block->ecb2 |= 0x80;
842 vcpu->arch.sie_block->ecb2 &= ~0x08;
843 return 0;
844}
845
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100846int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
847{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100848 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200849
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100850 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
851 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200852 CPUSTAT_STOPPED |
853 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200854 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200855 if (test_vfacility(50) && test_vfacility(73))
856 vcpu->arch.sie_block->ecb |= 0x10;
857
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200858 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +0200859 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +0100860 if (sclp_has_siif())
861 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +0200862 if (sclp_has_sigpif())
863 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller78c4b592013-07-26 15:04:04 +0200864 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Matthew Rosato5a5e6532013-01-29 11:48:20 -0500865 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
866 ICTL_TPROT;
867
Dominik Dingelb31605c2014-03-25 13:47:11 +0100868 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
869 rc = kvm_s390_vcpu_setup_cmma(vcpu);
870 if (rc)
871 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200872 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +0100873 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +0200874 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100875 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100876 vcpu->arch.cpu_id.version = 0xff;
Tony Krowiak5102ee82014-06-27 14:46:01 -0400877
878 kvm_s390_vcpu_crypto_setup(vcpu);
879
Dominik Dingelb31605c2014-03-25 13:47:11 +0100880 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100881}
882
883struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
884 unsigned int id)
885{
Carsten Otte4d475552011-10-18 12:27:12 +0200886 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200887 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200888 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100889
Carsten Otte4d475552011-10-18 12:27:12 +0200890 if (id >= KVM_MAX_VCPUS)
891 goto out;
892
893 rc = -ENOMEM;
894
Michael Muellerb110fea2013-06-12 13:54:54 +0200895 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100896 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200897 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100898
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200899 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
900 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100901 goto out_free_cpu;
902
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200903 vcpu->arch.sie_block = &sie_page->sie_block;
904 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
905
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100906 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100907 if (!kvm_is_ucontrol(kvm)) {
908 if (!kvm->arch.sca) {
909 WARN_ON_ONCE(1);
910 goto out_free_cpu;
911 }
912 if (!kvm->arch.sca->cpu[id].sda)
913 kvm->arch.sca->cpu[id].sda =
914 (__u64) vcpu->arch.sie_block;
915 vcpu->arch.sie_block->scaoh =
916 (__u32)(((__u64)kvm->arch.sca) >> 32);
917 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
918 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
919 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100920
Carsten Otteba5c1e92008-03-25 18:47:26 +0100921 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100922 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200923 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100924 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100925
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100926 rc = kvm_vcpu_init(vcpu, kvm, id);
927 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800928 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100929 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
930 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200931 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100932
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100933 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800934out_free_sie_block:
935 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100936out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200937 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200938out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100939 return ERR_PTR(rc);
940}
941
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100942int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
943{
David Hildenbrand9a022062014-08-05 17:40:47 +0200944 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100945}
946
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200947void s390_vcpu_block(struct kvm_vcpu *vcpu)
948{
949 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
950}
951
952void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
953{
954 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
955}
956
957/*
958 * Kick a guest cpu out of SIE and wait until SIE is not running.
959 * If the CPU is not running (e.g. waiting as idle) the function will
960 * return immediately. */
961void exit_sie(struct kvm_vcpu *vcpu)
962{
963 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
964 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
965 cpu_relax();
966}
967
968/* Kick a guest cpu out of SIE and prevent SIE-reentry */
969void exit_sie_sync(struct kvm_vcpu *vcpu)
970{
971 s390_vcpu_block(vcpu);
972 exit_sie(vcpu);
973}
974
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200975static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
976{
977 int i;
978 struct kvm *kvm = gmap->private;
979 struct kvm_vcpu *vcpu;
980
981 kvm_for_each_vcpu(i, vcpu, kvm) {
982 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +0200983 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200984 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
985 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
986 exit_sie_sync(vcpu);
987 }
988 }
989}
990
Christoffer Dallb6d33832012-03-08 16:44:24 -0500991int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
992{
993 /* kvm common code refers to this, but never calls it */
994 BUG();
995 return 0;
996}
997
Carsten Otte14eebd92012-05-15 14:15:26 +0200998static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
999 struct kvm_one_reg *reg)
1000{
1001 int r = -EINVAL;
1002
1003 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001004 case KVM_REG_S390_TODPR:
1005 r = put_user(vcpu->arch.sie_block->todpr,
1006 (u32 __user *)reg->addr);
1007 break;
1008 case KVM_REG_S390_EPOCHDIFF:
1009 r = put_user(vcpu->arch.sie_block->epoch,
1010 (u64 __user *)reg->addr);
1011 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001012 case KVM_REG_S390_CPU_TIMER:
1013 r = put_user(vcpu->arch.sie_block->cputm,
1014 (u64 __user *)reg->addr);
1015 break;
1016 case KVM_REG_S390_CLOCK_COMP:
1017 r = put_user(vcpu->arch.sie_block->ckc,
1018 (u64 __user *)reg->addr);
1019 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001020 case KVM_REG_S390_PFTOKEN:
1021 r = put_user(vcpu->arch.pfault_token,
1022 (u64 __user *)reg->addr);
1023 break;
1024 case KVM_REG_S390_PFCOMPARE:
1025 r = put_user(vcpu->arch.pfault_compare,
1026 (u64 __user *)reg->addr);
1027 break;
1028 case KVM_REG_S390_PFSELECT:
1029 r = put_user(vcpu->arch.pfault_select,
1030 (u64 __user *)reg->addr);
1031 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001032 case KVM_REG_S390_PP:
1033 r = put_user(vcpu->arch.sie_block->pp,
1034 (u64 __user *)reg->addr);
1035 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001036 case KVM_REG_S390_GBEA:
1037 r = put_user(vcpu->arch.sie_block->gbea,
1038 (u64 __user *)reg->addr);
1039 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001040 default:
1041 break;
1042 }
1043
1044 return r;
1045}
1046
1047static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1048 struct kvm_one_reg *reg)
1049{
1050 int r = -EINVAL;
1051
1052 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001053 case KVM_REG_S390_TODPR:
1054 r = get_user(vcpu->arch.sie_block->todpr,
1055 (u32 __user *)reg->addr);
1056 break;
1057 case KVM_REG_S390_EPOCHDIFF:
1058 r = get_user(vcpu->arch.sie_block->epoch,
1059 (u64 __user *)reg->addr);
1060 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001061 case KVM_REG_S390_CPU_TIMER:
1062 r = get_user(vcpu->arch.sie_block->cputm,
1063 (u64 __user *)reg->addr);
1064 break;
1065 case KVM_REG_S390_CLOCK_COMP:
1066 r = get_user(vcpu->arch.sie_block->ckc,
1067 (u64 __user *)reg->addr);
1068 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001069 case KVM_REG_S390_PFTOKEN:
1070 r = get_user(vcpu->arch.pfault_token,
1071 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001072 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1073 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001074 break;
1075 case KVM_REG_S390_PFCOMPARE:
1076 r = get_user(vcpu->arch.pfault_compare,
1077 (u64 __user *)reg->addr);
1078 break;
1079 case KVM_REG_S390_PFSELECT:
1080 r = get_user(vcpu->arch.pfault_select,
1081 (u64 __user *)reg->addr);
1082 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001083 case KVM_REG_S390_PP:
1084 r = get_user(vcpu->arch.sie_block->pp,
1085 (u64 __user *)reg->addr);
1086 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001087 case KVM_REG_S390_GBEA:
1088 r = get_user(vcpu->arch.sie_block->gbea,
1089 (u64 __user *)reg->addr);
1090 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001091 default:
1092 break;
1093 }
1094
1095 return r;
1096}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001097
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001098static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1099{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001100 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001101 return 0;
1102}
1103
1104int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1105{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001106 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001107 return 0;
1108}
1109
1110int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1111{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001112 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001113 return 0;
1114}
1115
1116int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1117 struct kvm_sregs *sregs)
1118{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001119 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001120 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001121 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001122 return 0;
1123}
1124
1125int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1126 struct kvm_sregs *sregs)
1127{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001128 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001129 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001130 return 0;
1131}
1132
1133int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1134{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001135 if (test_fp_ctl(fpu->fpc))
1136 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001137 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001138 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1139 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1140 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001141 return 0;
1142}
1143
1144int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1145{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001146 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1147 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001148 return 0;
1149}
1150
1151static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1152{
1153 int rc = 0;
1154
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001155 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001156 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001157 else {
1158 vcpu->run->psw_mask = psw.mask;
1159 vcpu->run->psw_addr = psw.addr;
1160 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001161 return rc;
1162}
1163
1164int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1165 struct kvm_translation *tr)
1166{
1167 return -EINVAL; /* not implemented yet */
1168}
1169
David Hildenbrand27291e22014-01-23 12:26:52 +01001170#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1171 KVM_GUESTDBG_USE_HW_BP | \
1172 KVM_GUESTDBG_ENABLE)
1173
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001174int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1175 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001176{
David Hildenbrand27291e22014-01-23 12:26:52 +01001177 int rc = 0;
1178
1179 vcpu->guest_debug = 0;
1180 kvm_s390_clear_bp_data(vcpu);
1181
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001182 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001183 return -EINVAL;
1184
1185 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1186 vcpu->guest_debug = dbg->control;
1187 /* enforce guest PER */
1188 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1189
1190 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1191 rc = kvm_s390_import_bp_data(vcpu, dbg);
1192 } else {
1193 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1194 vcpu->arch.guestdbg.last_bp = 0;
1195 }
1196
1197 if (rc) {
1198 vcpu->guest_debug = 0;
1199 kvm_s390_clear_bp_data(vcpu);
1200 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1201 }
1202
1203 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001204}
1205
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001206int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1207 struct kvm_mp_state *mp_state)
1208{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001209 /* CHECK_STOP and LOAD are not supported yet */
1210 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1211 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001212}
1213
1214int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1215 struct kvm_mp_state *mp_state)
1216{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001217 int rc = 0;
1218
1219 /* user space knows about this interface - let it control the state */
1220 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1221
1222 switch (mp_state->mp_state) {
1223 case KVM_MP_STATE_STOPPED:
1224 kvm_s390_vcpu_stop(vcpu);
1225 break;
1226 case KVM_MP_STATE_OPERATING:
1227 kvm_s390_vcpu_start(vcpu);
1228 break;
1229 case KVM_MP_STATE_LOAD:
1230 case KVM_MP_STATE_CHECK_STOP:
1231 /* fall through - CHECK_STOP and LOAD are not supported yet */
1232 default:
1233 rc = -ENXIO;
1234 }
1235
1236 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001237}
1238
Dominik Dingelb31605c2014-03-25 13:47:11 +01001239bool kvm_s390_cmma_enabled(struct kvm *kvm)
1240{
1241 if (!MACHINE_IS_LPAR)
1242 return false;
1243 /* only enable for z10 and later */
1244 if (!MACHINE_HAS_EDAT1)
1245 return false;
1246 if (!kvm->arch.use_cmma)
1247 return false;
1248 return true;
1249}
1250
David Hildenbrand8ad35752014-03-14 11:00:21 +01001251static bool ibs_enabled(struct kvm_vcpu *vcpu)
1252{
1253 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1254}
1255
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001256static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1257{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001258retry:
1259 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001260 /*
1261 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1262 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1263 * This ensures that the ipte instruction for this request has
1264 * already finished. We might race against a second unmapper that
1265 * wants to set the blocking bit. Lets just retry the request loop.
1266 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001267 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001268 int rc;
1269 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001270 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001271 PAGE_SIZE * 2);
1272 if (rc)
1273 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001274 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001275 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001276
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001277 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1278 vcpu->arch.sie_block->ihcpu = 0xffff;
1279 goto retry;
1280 }
1281
David Hildenbrand8ad35752014-03-14 11:00:21 +01001282 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1283 if (!ibs_enabled(vcpu)) {
1284 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1285 atomic_set_mask(CPUSTAT_IBS,
1286 &vcpu->arch.sie_block->cpuflags);
1287 }
1288 goto retry;
1289 }
1290
1291 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1292 if (ibs_enabled(vcpu)) {
1293 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1294 atomic_clear_mask(CPUSTAT_IBS,
1295 &vcpu->arch.sie_block->cpuflags);
1296 }
1297 goto retry;
1298 }
1299
David Hildenbrand0759d062014-05-13 16:54:32 +02001300 /* nothing to do, just clear the request */
1301 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1302
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001303 return 0;
1304}
1305
Thomas Huthfa576c52014-05-06 17:20:16 +02001306/**
1307 * kvm_arch_fault_in_page - fault-in guest page if necessary
1308 * @vcpu: The corresponding virtual cpu
1309 * @gpa: Guest physical address
1310 * @writable: Whether the page should be writable or not
1311 *
1312 * Make sure that a guest page has been faulted-in on the host.
1313 *
1314 * Return: Zero on success, negative error code otherwise.
1315 */
1316long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001317{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001318 return gmap_fault(vcpu->arch.gmap, gpa,
1319 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001320}
1321
Dominik Dingel3c038e62013-10-07 17:11:48 +02001322static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1323 unsigned long token)
1324{
1325 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001326 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001327
1328 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001329 irq.u.ext.ext_params2 = token;
1330 irq.type = KVM_S390_INT_PFAULT_INIT;
1331 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001332 } else {
1333 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001334 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001335 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1336 }
1337}
1338
1339void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1340 struct kvm_async_pf *work)
1341{
1342 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1343 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1344}
1345
1346void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1347 struct kvm_async_pf *work)
1348{
1349 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1350 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1351}
1352
1353void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1354 struct kvm_async_pf *work)
1355{
1356 /* s390 will always inject the page directly */
1357}
1358
1359bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1360{
1361 /*
1362 * s390 will always inject the page directly,
1363 * but we still want check_async_completion to cleanup
1364 */
1365 return true;
1366}
1367
1368static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1369{
1370 hva_t hva;
1371 struct kvm_arch_async_pf arch;
1372 int rc;
1373
1374 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1375 return 0;
1376 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1377 vcpu->arch.pfault_compare)
1378 return 0;
1379 if (psw_extint_disabled(vcpu))
1380 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001381 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001382 return 0;
1383 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1384 return 0;
1385 if (!vcpu->arch.gmap->pfault_enabled)
1386 return 0;
1387
Heiko Carstens81480cc2014-01-01 16:36:07 +01001388 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1389 hva += current->thread.gmap_addr & ~PAGE_MASK;
1390 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001391 return 0;
1392
1393 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1394 return rc;
1395}
1396
Thomas Huth3fb4c402013-09-12 10:33:43 +02001397static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001398{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001399 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001400
Dominik Dingel3c038e62013-10-07 17:11:48 +02001401 /*
1402 * On s390 notifications for arriving pages will be delivered directly
1403 * to the guest but the house keeping for completed pfaults is
1404 * handled outside the worker.
1405 */
1406 kvm_check_async_pf_completion(vcpu);
1407
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001408 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001409
1410 if (need_resched())
1411 schedule();
1412
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001413 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001414 s390_handle_mcck();
1415
Jens Freimann79395032014-04-17 10:10:30 +02001416 if (!kvm_is_ucontrol(vcpu->kvm)) {
1417 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1418 if (rc)
1419 return rc;
1420 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001421
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001422 rc = kvm_s390_handle_requests(vcpu);
1423 if (rc)
1424 return rc;
1425
David Hildenbrand27291e22014-01-23 12:26:52 +01001426 if (guestdbg_enabled(vcpu)) {
1427 kvm_s390_backup_guest_per_regs(vcpu);
1428 kvm_s390_patch_guest_per_regs(vcpu);
1429 }
1430
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001431 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001432 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1433 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1434 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001435
Thomas Huth3fb4c402013-09-12 10:33:43 +02001436 return 0;
1437}
1438
1439static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1440{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001441 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001442
1443 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1444 vcpu->arch.sie_block->icptcode);
1445 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1446
David Hildenbrand27291e22014-01-23 12:26:52 +01001447 if (guestdbg_enabled(vcpu))
1448 kvm_s390_restore_guest_per_regs(vcpu);
1449
Thomas Huth3fb4c402013-09-12 10:33:43 +02001450 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001451 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001452 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1453 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1454 vcpu->run->s390_ucontrol.trans_exc_code =
1455 current->thread.gmap_addr;
1456 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1457 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001458
1459 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001460 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001461 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001462 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001463 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001464 } else {
1465 gpa_t gpa = current->thread.gmap_addr;
1466 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1467 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001468 }
1469
1470 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001471 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1472 trace_kvm_s390_sie_fault(vcpu);
1473 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001474 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001475
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001476 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001477
Thomas Hutha76ccff2013-09-12 10:33:44 +02001478 if (rc == 0) {
1479 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001480 /* Don't exit for host interrupts. */
1481 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001482 else
1483 rc = kvm_handle_sie_intercept(vcpu);
1484 }
1485
Thomas Huth3fb4c402013-09-12 10:33:43 +02001486 return rc;
1487}
1488
1489static int __vcpu_run(struct kvm_vcpu *vcpu)
1490{
1491 int rc, exit_reason;
1492
Thomas Huth800c1062013-09-12 10:33:45 +02001493 /*
1494 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1495 * ning the guest), so that memslots (and other stuff) are protected
1496 */
1497 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1498
Thomas Hutha76ccff2013-09-12 10:33:44 +02001499 do {
1500 rc = vcpu_pre_run(vcpu);
1501 if (rc)
1502 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001503
Thomas Huth800c1062013-09-12 10:33:45 +02001504 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001505 /*
1506 * As PF_VCPU will be used in fault handler, between
1507 * guest_enter and guest_exit should be no uaccess.
1508 */
1509 preempt_disable();
1510 kvm_guest_enter();
1511 preempt_enable();
1512 exit_reason = sie64a(vcpu->arch.sie_block,
1513 vcpu->run->s.regs.gprs);
1514 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001515 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001516
Thomas Hutha76ccff2013-09-12 10:33:44 +02001517 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001518 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001519
Thomas Huth800c1062013-09-12 10:33:45 +02001520 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001521 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001522}
1523
David Hildenbrandb028ee32014-07-17 10:47:43 +02001524static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1525{
1526 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1527 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1528 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1529 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1530 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1531 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001532 /* some control register changes require a tlb flush */
1533 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001534 }
1535 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1536 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1537 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1538 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1539 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1540 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1541 }
1542 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1543 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1544 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1545 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001546 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1547 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001548 }
1549 kvm_run->kvm_dirty_regs = 0;
1550}
1551
1552static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1553{
1554 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1555 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1556 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1557 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1558 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1559 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1560 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1561 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1562 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1563 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1564 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1565 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1566}
1567
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001568int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1569{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001570 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001571 sigset_t sigsaved;
1572
David Hildenbrand27291e22014-01-23 12:26:52 +01001573 if (guestdbg_exit_pending(vcpu)) {
1574 kvm_s390_prepare_debug_exit(vcpu);
1575 return 0;
1576 }
1577
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001578 if (vcpu->sigset_active)
1579 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1580
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001581 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1582 kvm_s390_vcpu_start(vcpu);
1583 } else if (is_vcpu_stopped(vcpu)) {
1584 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1585 vcpu->vcpu_id);
1586 return -EINVAL;
1587 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001588
David Hildenbrandb028ee32014-07-17 10:47:43 +02001589 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001590
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001591 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001592 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001593
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001594 if (signal_pending(current) && !rc) {
1595 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001596 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001597 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001598
David Hildenbrand27291e22014-01-23 12:26:52 +01001599 if (guestdbg_exit_pending(vcpu) && !rc) {
1600 kvm_s390_prepare_debug_exit(vcpu);
1601 rc = 0;
1602 }
1603
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001604 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001605 /* intercept cannot be handled in-kernel, prepare kvm-run */
1606 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1607 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001608 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1609 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1610 rc = 0;
1611 }
1612
1613 if (rc == -EREMOTE) {
1614 /* intercept was handled, but userspace support is needed
1615 * kvm_run has been prepared by the handler */
1616 rc = 0;
1617 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001618
David Hildenbrandb028ee32014-07-17 10:47:43 +02001619 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001620
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001621 if (vcpu->sigset_active)
1622 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1623
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001624 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001625 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001626}
1627
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001628/*
1629 * store status at address
1630 * we use have two special cases:
1631 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1632 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1633 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001634int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001635{
Carsten Otte092670c2011-07-24 10:48:22 +02001636 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001637 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001638 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001639 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001640
Heiko Carstensd0bce602014-01-01 16:45:58 +01001641 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1642 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001643 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001644 gpa = SAVE_AREA_BASE;
1645 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1646 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001647 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001648 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1649 }
1650 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1651 vcpu->arch.guest_fpregs.fprs, 128);
1652 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1653 vcpu->run->s.regs.gprs, 128);
1654 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1655 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001656 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001657 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001658 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001659 rc |= write_guest_abs(vcpu,
1660 gpa + offsetof(struct save_area, fp_ctrl_reg),
1661 &vcpu->arch.guest_fpregs.fpc, 4);
1662 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1663 &vcpu->arch.sie_block->todpr, 4);
1664 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1665 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001666 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001667 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1668 &clkcomp, 8);
1669 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1670 &vcpu->run->s.regs.acrs, 64);
1671 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1672 &vcpu->arch.sie_block->gcr, 128);
1673 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001674}
1675
Thomas Huthe8798922013-11-06 15:46:33 +01001676int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1677{
1678 /*
1679 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1680 * copying in vcpu load/put. Lets update our copies before we save
1681 * it into the save area
1682 */
1683 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1684 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1685 save_access_regs(vcpu->run->s.regs.acrs);
1686
1687 return kvm_s390_store_status_unloaded(vcpu, addr);
1688}
1689
David Hildenbrand8ad35752014-03-14 11:00:21 +01001690static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1691{
1692 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1693 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1694 exit_sie_sync(vcpu);
1695}
1696
1697static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1698{
1699 unsigned int i;
1700 struct kvm_vcpu *vcpu;
1701
1702 kvm_for_each_vcpu(i, vcpu, kvm) {
1703 __disable_ibs_on_vcpu(vcpu);
1704 }
1705}
1706
1707static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1708{
1709 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1710 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1711 exit_sie_sync(vcpu);
1712}
1713
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001714void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1715{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001716 int i, online_vcpus, started_vcpus = 0;
1717
1718 if (!is_vcpu_stopped(vcpu))
1719 return;
1720
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001721 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001722 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001723 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001724 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1725
1726 for (i = 0; i < online_vcpus; i++) {
1727 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1728 started_vcpus++;
1729 }
1730
1731 if (started_vcpus == 0) {
1732 /* we're the only active VCPU -> speed it up */
1733 __enable_ibs_on_vcpu(vcpu);
1734 } else if (started_vcpus == 1) {
1735 /*
1736 * As we are starting a second VCPU, we have to disable
1737 * the IBS facility on all VCPUs to remove potentially
1738 * oustanding ENABLE requests.
1739 */
1740 __disable_ibs_on_all_vcpus(vcpu->kvm);
1741 }
1742
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001743 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001744 /*
1745 * Another VCPU might have used IBS while we were offline.
1746 * Let's play safe and flush the VCPU at startup.
1747 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001748 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001749 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001750 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001751}
1752
1753void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1754{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001755 int i, online_vcpus, started_vcpus = 0;
1756 struct kvm_vcpu *started_vcpu = NULL;
1757
1758 if (is_vcpu_stopped(vcpu))
1759 return;
1760
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001761 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001762 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001763 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001764 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1765
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001766 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02001767 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001768
David Hildenbrand6cddd432014-10-15 16:48:53 +02001769 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001770 __disable_ibs_on_vcpu(vcpu);
1771
1772 for (i = 0; i < online_vcpus; i++) {
1773 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1774 started_vcpus++;
1775 started_vcpu = vcpu->kvm->vcpus[i];
1776 }
1777 }
1778
1779 if (started_vcpus == 1) {
1780 /*
1781 * As we only have one VCPU left, we want to enable the
1782 * IBS facility for that VCPU to speed it up.
1783 */
1784 __enable_ibs_on_vcpu(started_vcpu);
1785 }
1786
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001787 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001788 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001789}
1790
Cornelia Huckd6712df2012-12-20 15:32:11 +01001791static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1792 struct kvm_enable_cap *cap)
1793{
1794 int r;
1795
1796 if (cap->flags)
1797 return -EINVAL;
1798
1799 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001800 case KVM_CAP_S390_CSS_SUPPORT:
1801 if (!vcpu->kvm->arch.css_support) {
1802 vcpu->kvm->arch.css_support = 1;
1803 trace_kvm_s390_enable_css(vcpu->kvm);
1804 }
1805 r = 0;
1806 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001807 default:
1808 r = -EINVAL;
1809 break;
1810 }
1811 return r;
1812}
1813
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001814long kvm_arch_vcpu_ioctl(struct file *filp,
1815 unsigned int ioctl, unsigned long arg)
1816{
1817 struct kvm_vcpu *vcpu = filp->private_data;
1818 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001819 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001820 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001821
Avi Kivity93736622010-05-13 12:35:17 +03001822 switch (ioctl) {
1823 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001824 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001825 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001826
Avi Kivity93736622010-05-13 12:35:17 +03001827 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001828 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03001829 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02001830 if (s390int_to_s390irq(&s390int, &s390irq))
1831 return -EINVAL;
1832 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03001833 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001834 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001835 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001836 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001837 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001838 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001839 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001840 case KVM_S390_SET_INITIAL_PSW: {
1841 psw_t psw;
1842
Avi Kivitybc923cc2010-05-13 12:21:46 +03001843 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001844 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001845 break;
1846 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1847 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001848 }
1849 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001850 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1851 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001852 case KVM_SET_ONE_REG:
1853 case KVM_GET_ONE_REG: {
1854 struct kvm_one_reg reg;
1855 r = -EFAULT;
1856 if (copy_from_user(&reg, argp, sizeof(reg)))
1857 break;
1858 if (ioctl == KVM_SET_ONE_REG)
1859 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1860 else
1861 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1862 break;
1863 }
Carsten Otte27e03932012-01-04 10:25:21 +01001864#ifdef CONFIG_KVM_S390_UCONTROL
1865 case KVM_S390_UCAS_MAP: {
1866 struct kvm_s390_ucas_mapping ucasmap;
1867
1868 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1869 r = -EFAULT;
1870 break;
1871 }
1872
1873 if (!kvm_is_ucontrol(vcpu->kvm)) {
1874 r = -EINVAL;
1875 break;
1876 }
1877
1878 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1879 ucasmap.vcpu_addr, ucasmap.length);
1880 break;
1881 }
1882 case KVM_S390_UCAS_UNMAP: {
1883 struct kvm_s390_ucas_mapping ucasmap;
1884
1885 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1886 r = -EFAULT;
1887 break;
1888 }
1889
1890 if (!kvm_is_ucontrol(vcpu->kvm)) {
1891 r = -EINVAL;
1892 break;
1893 }
1894
1895 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1896 ucasmap.length);
1897 break;
1898 }
1899#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001900 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001901 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01001902 break;
1903 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001904 case KVM_ENABLE_CAP:
1905 {
1906 struct kvm_enable_cap cap;
1907 r = -EFAULT;
1908 if (copy_from_user(&cap, argp, sizeof(cap)))
1909 break;
1910 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1911 break;
1912 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001913 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001914 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001915 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001916 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001917}
1918
Carsten Otte5b1c1492012-01-04 10:25:23 +01001919int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1920{
1921#ifdef CONFIG_KVM_S390_UCONTROL
1922 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1923 && (kvm_is_ucontrol(vcpu->kvm))) {
1924 vmf->page = virt_to_page(vcpu->arch.sie_block);
1925 get_page(vmf->page);
1926 return 0;
1927 }
1928#endif
1929 return VM_FAULT_SIGBUS;
1930}
1931
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301932int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1933 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001934{
1935 return 0;
1936}
1937
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001938/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001939int kvm_arch_prepare_memory_region(struct kvm *kvm,
1940 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001941 struct kvm_userspace_memory_region *mem,
1942 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001943{
Nick Wangdd2887e2013-03-25 17:22:57 +01001944 /* A few sanity checks. We can have memory slots which have to be
1945 located/ended at a segment boundary (1MB). The memory in userland is
1946 ok to be fragmented into various different vmas. It is okay to mmap()
1947 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001948
Carsten Otte598841c2011-07-24 10:48:21 +02001949 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001950 return -EINVAL;
1951
Carsten Otte598841c2011-07-24 10:48:21 +02001952 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001953 return -EINVAL;
1954
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001955 return 0;
1956}
1957
1958void kvm_arch_commit_memory_region(struct kvm *kvm,
1959 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001960 const struct kvm_memory_slot *old,
1961 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001962{
Carsten Ottef7850c92011-07-24 10:48:23 +02001963 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001964
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001965 /* If the basics of the memslot do not change, we do not want
1966 * to update the gmap. Every update causes several unnecessary
1967 * segment translation exceptions. This is usually handled just
1968 * fine by the normal fault handler + gmap, but it will also
1969 * cause faults on the prefix page of running guest CPUs.
1970 */
1971 if (old->userspace_addr == mem->userspace_addr &&
1972 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1973 old->npages * PAGE_SIZE == mem->memory_size)
1974 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001975
1976 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1977 mem->guest_phys_addr, mem->memory_size);
1978 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001979 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001980 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001981}
1982
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001983static int __init kvm_s390_init(void)
1984{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001985 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001986 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001987 if (ret)
1988 return ret;
1989
1990 /*
1991 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001992 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001993 * only set facilities that are known to work in KVM.
1994 */
Michael Mueller78c4b592013-07-26 15:04:04 +02001995 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1996 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001997 kvm_exit();
1998 return -ENOMEM;
1999 }
Michael Mueller78c4b592013-07-26 15:04:04 +02002000 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Christian Borntraeger7be81a42014-09-19 15:55:20 +02002001 vfacilities[0] &= 0xff82fffbf47c2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002002 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02002003 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002004}
2005
2006static void __exit kvm_s390_exit(void)
2007{
Michael Mueller78c4b592013-07-26 15:04:04 +02002008 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002009 kvm_exit();
2010}
2011
2012module_init(kvm_s390_init);
2013module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002014
2015/*
2016 * Enable autoloading of the kvm module.
2017 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2018 * since x86 takes a different approach.
2019 */
2020#include <linux/miscdevice.h>
2021MODULE_ALIAS_MISCDEV(KVM_MINOR);
2022MODULE_ALIAS("devname:kvm");