blob: 00268cacdf4c47b954067709ae963174192618ba [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010027#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <asm/lowcore.h>
29#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010030#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010031#include <asm/switch_to.h>
Michael Mueller78c4b592013-07-26 15:04:04 +020032#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020033#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include "gaccess.h"
36
Cornelia Huck5786fff2012-07-23 17:20:29 +020037#define CREATE_TRACE_POINTS
38#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020039#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020040
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020045 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010050 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020053 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010054 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010055 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010057 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020058 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020066 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010067 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010072 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010073 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020075 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010076 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020078 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010079 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010080 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020081 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010082 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010087 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010088 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020089 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010090 { NULL }
91};
92
Michael Mueller78c4b592013-07-26 15:04:04 +020093unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020094static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010095
Michael Mueller78c4b592013-07-26 15:04:04 +020096/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +010097int test_vfacility(unsigned long nr)
Michael Mueller78c4b592013-07-26 15:04:04 +020098{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100102/* Section: not file related */
Alexander Graf10474ae2009-09-15 11:37:46 +0200103int kvm_arch_hardware_enable(void *garbage)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100104{
105 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200106 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100107}
108
109void kvm_arch_hardware_disable(void *garbage)
110{
111}
112
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200113static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100115int kvm_arch_hardware_setup(void)
116{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200117 gmap_notifier.notifier_call = kvm_gmap_notifier;
118 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100119 return 0;
120}
121
122void kvm_arch_hardware_unsetup(void)
123{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200124 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100125}
126
127void kvm_arch_check_processor_compat(void *rtn)
128{
129}
130
131int kvm_arch_init(void *opaque)
132{
133 return 0;
134}
135
136void kvm_arch_exit(void)
137{
138}
139
140/* Section: device related */
141long kvm_arch_dev_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
143{
144 if (ioctl == KVM_S390_ENABLE_SIE)
145 return s390_enable_sie();
146 return -EINVAL;
147}
148
Alexander Graf784aa3d2014-07-14 18:27:35 +0200149int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100150{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100151 int r;
152
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200153 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100154 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200155 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100156 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100157#ifdef CONFIG_KVM_S390_UCONTROL
158 case KVM_CAP_S390_UCONTROL:
159#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200160 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100161 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200162 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100163 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100164 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200165 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100166 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200167 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200168 case KVM_CAP_ENABLE_CAP_VM:
Dominik Dingelf2061652014-04-09 13:13:00 +0200169 case KVM_CAP_VM_ATTRIBUTES:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100170 r = 1;
171 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200172 case KVM_CAP_NR_VCPUS:
173 case KVM_CAP_MAX_VCPUS:
174 r = KVM_MAX_VCPUS;
175 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100176 case KVM_CAP_NR_MEMSLOTS:
177 r = KVM_USER_MEM_SLOTS;
178 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200179 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100180 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200181 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200182 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100183 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200184 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100185 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100186}
187
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400188static void kvm_s390_sync_dirty_log(struct kvm *kvm,
189 struct kvm_memory_slot *memslot)
190{
191 gfn_t cur_gfn, last_gfn;
192 unsigned long address;
193 struct gmap *gmap = kvm->arch.gmap;
194
195 down_read(&gmap->mm->mmap_sem);
196 /* Loop over all guest pages */
197 last_gfn = memslot->base_gfn + memslot->npages;
198 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
199 address = gfn_to_hva_memslot(memslot, cur_gfn);
200
201 if (gmap_test_and_clear_dirty(address, gmap))
202 mark_page_dirty(kvm, cur_gfn);
203 }
204 up_read(&gmap->mm->mmap_sem);
205}
206
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100207/* Section: vm related */
208/*
209 * Get (and clear) the dirty memory log for a memory slot.
210 */
211int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
212 struct kvm_dirty_log *log)
213{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400214 int r;
215 unsigned long n;
216 struct kvm_memory_slot *memslot;
217 int is_dirty = 0;
218
219 mutex_lock(&kvm->slots_lock);
220
221 r = -EINVAL;
222 if (log->slot >= KVM_USER_MEM_SLOTS)
223 goto out;
224
225 memslot = id_to_memslot(kvm->memslots, log->slot);
226 r = -ENOENT;
227 if (!memslot->dirty_bitmap)
228 goto out;
229
230 kvm_s390_sync_dirty_log(kvm, memslot);
231 r = kvm_get_dirty_log(kvm, log, &is_dirty);
232 if (r)
233 goto out;
234
235 /* Clear the dirty log */
236 if (is_dirty) {
237 n = kvm_dirty_bitmap_bytes(memslot);
238 memset(memslot->dirty_bitmap, 0, n);
239 }
240 r = 0;
241out:
242 mutex_unlock(&kvm->slots_lock);
243 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100244}
245
Cornelia Huckd938dc52013-10-23 18:26:34 +0200246static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
247{
248 int r;
249
250 if (cap->flags)
251 return -EINVAL;
252
253 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200254 case KVM_CAP_S390_IRQCHIP:
255 kvm->arch.use_irqchip = 1;
256 r = 0;
257 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200258 default:
259 r = -EINVAL;
260 break;
261 }
262 return r;
263}
264
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200265static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
266{
267 int ret;
268 unsigned int idx;
269 switch (attr->attr) {
270 case KVM_S390_VM_MEM_ENABLE_CMMA:
271 ret = -EBUSY;
272 mutex_lock(&kvm->lock);
273 if (atomic_read(&kvm->online_vcpus) == 0) {
274 kvm->arch.use_cmma = 1;
275 ret = 0;
276 }
277 mutex_unlock(&kvm->lock);
278 break;
279 case KVM_S390_VM_MEM_CLR_CMMA:
280 mutex_lock(&kvm->lock);
281 idx = srcu_read_lock(&kvm->srcu);
282 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
283 srcu_read_unlock(&kvm->srcu, idx);
284 mutex_unlock(&kvm->lock);
285 ret = 0;
286 break;
287 default:
288 ret = -ENXIO;
289 break;
290 }
291 return ret;
292}
293
Dominik Dingelf2061652014-04-09 13:13:00 +0200294static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
295{
296 int ret;
297
298 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200299 case KVM_S390_VM_MEM_CTRL:
300 ret = kvm_s390_mem_control(kvm, attr);
301 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200302 default:
303 ret = -ENXIO;
304 break;
305 }
306
307 return ret;
308}
309
310static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
311{
312 return -ENXIO;
313}
314
315static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
316{
317 int ret;
318
319 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200320 case KVM_S390_VM_MEM_CTRL:
321 switch (attr->attr) {
322 case KVM_S390_VM_MEM_ENABLE_CMMA:
323 case KVM_S390_VM_MEM_CLR_CMMA:
324 ret = 0;
325 break;
326 default:
327 ret = -ENXIO;
328 break;
329 }
330 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200331 default:
332 ret = -ENXIO;
333 break;
334 }
335
336 return ret;
337}
338
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100339long kvm_arch_vm_ioctl(struct file *filp,
340 unsigned int ioctl, unsigned long arg)
341{
342 struct kvm *kvm = filp->private_data;
343 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200344 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100345 int r;
346
347 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100348 case KVM_S390_INTERRUPT: {
349 struct kvm_s390_interrupt s390int;
350
351 r = -EFAULT;
352 if (copy_from_user(&s390int, argp, sizeof(s390int)))
353 break;
354 r = kvm_s390_inject_vm(kvm, &s390int);
355 break;
356 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200357 case KVM_ENABLE_CAP: {
358 struct kvm_enable_cap cap;
359 r = -EFAULT;
360 if (copy_from_user(&cap, argp, sizeof(cap)))
361 break;
362 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
363 break;
364 }
Cornelia Huck84223592013-07-15 13:36:01 +0200365 case KVM_CREATE_IRQCHIP: {
366 struct kvm_irq_routing_entry routing;
367
368 r = -EINVAL;
369 if (kvm->arch.use_irqchip) {
370 /* Set up dummy routing. */
371 memset(&routing, 0, sizeof(routing));
372 kvm_set_irq_routing(kvm, &routing, 0, 0);
373 r = 0;
374 }
375 break;
376 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200377 case KVM_SET_DEVICE_ATTR: {
378 r = -EFAULT;
379 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
380 break;
381 r = kvm_s390_vm_set_attr(kvm, &attr);
382 break;
383 }
384 case KVM_GET_DEVICE_ATTR: {
385 r = -EFAULT;
386 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
387 break;
388 r = kvm_s390_vm_get_attr(kvm, &attr);
389 break;
390 }
391 case KVM_HAS_DEVICE_ATTR: {
392 r = -EFAULT;
393 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
394 break;
395 r = kvm_s390_vm_has_attr(kvm, &attr);
396 break;
397 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100398 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300399 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100400 }
401
402 return r;
403}
404
Carsten Ottee08b9632012-01-04 10:25:20 +0100405int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100406{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100407 int rc;
408 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100409 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100410
Carsten Ottee08b9632012-01-04 10:25:20 +0100411 rc = -EINVAL;
412#ifdef CONFIG_KVM_S390_UCONTROL
413 if (type & ~KVM_VM_S390_UCONTROL)
414 goto out_err;
415 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
416 goto out_err;
417#else
418 if (type)
419 goto out_err;
420#endif
421
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100422 rc = s390_enable_sie();
423 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100424 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100425
Carsten Otteb2904112011-10-18 12:27:13 +0200426 rc = -ENOMEM;
427
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100428 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
429 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100430 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100431 spin_lock(&kvm_lock);
432 sca_offset = (sca_offset + 16) & 0x7f0;
433 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
434 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100435
436 sprintf(debug_name, "kvm-%u", current->pid);
437
438 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
439 if (!kvm->arch.dbf)
440 goto out_nodbf;
441
Carsten Otteba5c1e92008-03-25 18:47:26 +0100442 spin_lock_init(&kvm->arch.float_int.lock);
443 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100444 init_waitqueue_head(&kvm->arch.ipte_wq);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100445
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100446 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
447 VM_EVENT(kvm, 3, "%s", "vm created");
448
Carsten Ottee08b9632012-01-04 10:25:20 +0100449 if (type & KVM_VM_S390_UCONTROL) {
450 kvm->arch.gmap = NULL;
451 } else {
452 kvm->arch.gmap = gmap_alloc(current->mm);
453 if (!kvm->arch.gmap)
454 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200455 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200456 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100457 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100458
459 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200460 kvm->arch.use_irqchip = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100461
David Hildenbrand8ad35752014-03-14 11:00:21 +0100462 spin_lock_init(&kvm->arch.start_stop_lock);
463
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100464 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200465out_nogmap:
466 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100467out_nodbf:
468 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100469out_err:
470 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100471}
472
Christian Borntraegerd329c032008-11-26 14:50:27 +0100473void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
474{
475 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200476 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100477 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200478 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100479 if (!kvm_is_ucontrol(vcpu->kvm)) {
480 clear_bit(63 - vcpu->vcpu_id,
481 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
482 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
483 (__u64) vcpu->arch.sie_block)
484 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
485 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200486 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100487
488 if (kvm_is_ucontrol(vcpu->kvm))
489 gmap_free(vcpu->arch.gmap);
490
Dominik Dingelb31605c2014-03-25 13:47:11 +0100491 if (kvm_s390_cmma_enabled(vcpu->kvm))
492 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100493 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200494
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100495 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200496 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100497}
498
499static void kvm_free_vcpus(struct kvm *kvm)
500{
501 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300502 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100503
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300504 kvm_for_each_vcpu(i, vcpu, kvm)
505 kvm_arch_vcpu_destroy(vcpu);
506
507 mutex_lock(&kvm->lock);
508 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
509 kvm->vcpus[i] = NULL;
510
511 atomic_set(&kvm->online_vcpus, 0);
512 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100513}
514
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800515void kvm_arch_sync_events(struct kvm *kvm)
516{
517}
518
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100519void kvm_arch_destroy_vm(struct kvm *kvm)
520{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100521 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100522 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100523 debug_unregister(kvm->arch.dbf);
Carsten Otte27e03932012-01-04 10:25:21 +0100524 if (!kvm_is_ucontrol(kvm))
525 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200526 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100527 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100528}
529
530/* Section: vcpu related */
531int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
532{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200533 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
534 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100535 if (kvm_is_ucontrol(vcpu->kvm)) {
536 vcpu->arch.gmap = gmap_alloc(current->mm);
537 if (!vcpu->arch.gmap)
538 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200539 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100540 return 0;
541 }
542
Carsten Otte598841c2011-07-24 10:48:21 +0200543 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100544 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
545 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100546 KVM_SYNC_ACRS |
547 KVM_SYNC_CRS;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100548 return 0;
549}
550
551void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
552{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100553 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100554}
555
556void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
557{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200558 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
559 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100560 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200561 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
562 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100563 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200564 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100565 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100566}
567
568void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
569{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100570 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200571 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200572 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
573 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100574 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200575 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
576 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100577 restore_access_regs(vcpu->arch.host_acrs);
578}
579
580static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
581{
582 /* this equals initial cpu reset in pop, but we don't switch to ESA */
583 vcpu->arch.sie_block->gpsw.mask = 0UL;
584 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100585 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100586 vcpu->arch.sie_block->cputm = 0UL;
587 vcpu->arch.sie_block->ckc = 0UL;
588 vcpu->arch.sie_block->todpr = 0;
589 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
590 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
591 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
592 vcpu->arch.guest_fpregs.fpc = 0;
593 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
594 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100595 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200596 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
597 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6852d7b2014-03-14 10:59:29 +0100598 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100599 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100600}
601
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200602int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
603{
604 return 0;
605}
606
Dominik Dingelb31605c2014-03-25 13:47:11 +0100607void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
608{
609 free_page(vcpu->arch.sie_block->cbrlo);
610 vcpu->arch.sie_block->cbrlo = 0;
611}
612
613int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
614{
615 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
616 if (!vcpu->arch.sie_block->cbrlo)
617 return -ENOMEM;
618
619 vcpu->arch.sie_block->ecb2 |= 0x80;
620 vcpu->arch.sie_block->ecb2 &= ~0x08;
621 return 0;
622}
623
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100624int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
625{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100626 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200627
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100628 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
629 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200630 CPUSTAT_STOPPED |
631 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200632 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200633 if (test_vfacility(50) && test_vfacility(73))
634 vcpu->arch.sie_block->ecb |= 0x10;
635
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200636 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrand49539192014-02-21 08:59:59 +0100637 vcpu->arch.sie_block->eca = 0xD1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +0100638 if (sclp_has_siif())
639 vcpu->arch.sie_block->eca |= 1;
Michael Mueller78c4b592013-07-26 15:04:04 +0200640 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Matthew Rosato5a5e6532013-01-29 11:48:20 -0500641 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
642 ICTL_TPROT;
643
Dominik Dingelb31605c2014-03-25 13:47:11 +0100644 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
645 rc = kvm_s390_vcpu_setup_cmma(vcpu);
646 if (rc)
647 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200648 }
Christian Borntraegerca872302009-05-12 17:21:49 +0200649 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
650 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
651 (unsigned long) vcpu);
652 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100653 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100654 vcpu->arch.cpu_id.version = 0xff;
Dominik Dingelb31605c2014-03-25 13:47:11 +0100655 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100656}
657
658struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
659 unsigned int id)
660{
Carsten Otte4d475552011-10-18 12:27:12 +0200661 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200662 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200663 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100664
Carsten Otte4d475552011-10-18 12:27:12 +0200665 if (id >= KVM_MAX_VCPUS)
666 goto out;
667
668 rc = -ENOMEM;
669
Michael Muellerb110fea2013-06-12 13:54:54 +0200670 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100671 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200672 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100673
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200674 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
675 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100676 goto out_free_cpu;
677
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200678 vcpu->arch.sie_block = &sie_page->sie_block;
679 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
680
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100681 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100682 if (!kvm_is_ucontrol(kvm)) {
683 if (!kvm->arch.sca) {
684 WARN_ON_ONCE(1);
685 goto out_free_cpu;
686 }
687 if (!kvm->arch.sca->cpu[id].sda)
688 kvm->arch.sca->cpu[id].sda =
689 (__u64) vcpu->arch.sie_block;
690 vcpu->arch.sie_block->scaoh =
691 (__u32)(((__u64)kvm->arch.sca) >> 32);
692 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
693 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
694 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100695
Carsten Otteba5c1e92008-03-25 18:47:26 +0100696 spin_lock_init(&vcpu->arch.local_int.lock);
697 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
698 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200699 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100700 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100701
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100702 rc = kvm_vcpu_init(vcpu, kvm, id);
703 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800704 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100705 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
706 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200707 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100708
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100709 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800710out_free_sie_block:
711 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100712out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200713 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200714out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100715 return ERR_PTR(rc);
716}
717
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100718int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
719{
Michael Muellerf87618e2014-02-26 16:14:17 +0100720 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100721}
722
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200723void s390_vcpu_block(struct kvm_vcpu *vcpu)
724{
725 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
726}
727
728void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
729{
730 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
731}
732
733/*
734 * Kick a guest cpu out of SIE and wait until SIE is not running.
735 * If the CPU is not running (e.g. waiting as idle) the function will
736 * return immediately. */
737void exit_sie(struct kvm_vcpu *vcpu)
738{
739 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
740 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
741 cpu_relax();
742}
743
744/* Kick a guest cpu out of SIE and prevent SIE-reentry */
745void exit_sie_sync(struct kvm_vcpu *vcpu)
746{
747 s390_vcpu_block(vcpu);
748 exit_sie(vcpu);
749}
750
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200751static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
752{
753 int i;
754 struct kvm *kvm = gmap->private;
755 struct kvm_vcpu *vcpu;
756
757 kvm_for_each_vcpu(i, vcpu, kvm) {
758 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +0200759 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200760 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
761 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
762 exit_sie_sync(vcpu);
763 }
764 }
765}
766
Christoffer Dallb6d33832012-03-08 16:44:24 -0500767int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
768{
769 /* kvm common code refers to this, but never calls it */
770 BUG();
771 return 0;
772}
773
Carsten Otte14eebd92012-05-15 14:15:26 +0200774static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
775 struct kvm_one_reg *reg)
776{
777 int r = -EINVAL;
778
779 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200780 case KVM_REG_S390_TODPR:
781 r = put_user(vcpu->arch.sie_block->todpr,
782 (u32 __user *)reg->addr);
783 break;
784 case KVM_REG_S390_EPOCHDIFF:
785 r = put_user(vcpu->arch.sie_block->epoch,
786 (u64 __user *)reg->addr);
787 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200788 case KVM_REG_S390_CPU_TIMER:
789 r = put_user(vcpu->arch.sie_block->cputm,
790 (u64 __user *)reg->addr);
791 break;
792 case KVM_REG_S390_CLOCK_COMP:
793 r = put_user(vcpu->arch.sie_block->ckc,
794 (u64 __user *)reg->addr);
795 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200796 case KVM_REG_S390_PFTOKEN:
797 r = put_user(vcpu->arch.pfault_token,
798 (u64 __user *)reg->addr);
799 break;
800 case KVM_REG_S390_PFCOMPARE:
801 r = put_user(vcpu->arch.pfault_compare,
802 (u64 __user *)reg->addr);
803 break;
804 case KVM_REG_S390_PFSELECT:
805 r = put_user(vcpu->arch.pfault_select,
806 (u64 __user *)reg->addr);
807 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100808 case KVM_REG_S390_PP:
809 r = put_user(vcpu->arch.sie_block->pp,
810 (u64 __user *)reg->addr);
811 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100812 case KVM_REG_S390_GBEA:
813 r = put_user(vcpu->arch.sie_block->gbea,
814 (u64 __user *)reg->addr);
815 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200816 default:
817 break;
818 }
819
820 return r;
821}
822
823static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
824 struct kvm_one_reg *reg)
825{
826 int r = -EINVAL;
827
828 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200829 case KVM_REG_S390_TODPR:
830 r = get_user(vcpu->arch.sie_block->todpr,
831 (u32 __user *)reg->addr);
832 break;
833 case KVM_REG_S390_EPOCHDIFF:
834 r = get_user(vcpu->arch.sie_block->epoch,
835 (u64 __user *)reg->addr);
836 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200837 case KVM_REG_S390_CPU_TIMER:
838 r = get_user(vcpu->arch.sie_block->cputm,
839 (u64 __user *)reg->addr);
840 break;
841 case KVM_REG_S390_CLOCK_COMP:
842 r = get_user(vcpu->arch.sie_block->ckc,
843 (u64 __user *)reg->addr);
844 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200845 case KVM_REG_S390_PFTOKEN:
846 r = get_user(vcpu->arch.pfault_token,
847 (u64 __user *)reg->addr);
848 break;
849 case KVM_REG_S390_PFCOMPARE:
850 r = get_user(vcpu->arch.pfault_compare,
851 (u64 __user *)reg->addr);
852 break;
853 case KVM_REG_S390_PFSELECT:
854 r = get_user(vcpu->arch.pfault_select,
855 (u64 __user *)reg->addr);
856 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100857 case KVM_REG_S390_PP:
858 r = get_user(vcpu->arch.sie_block->pp,
859 (u64 __user *)reg->addr);
860 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100861 case KVM_REG_S390_GBEA:
862 r = get_user(vcpu->arch.sie_block->gbea,
863 (u64 __user *)reg->addr);
864 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200865 default:
866 break;
867 }
868
869 return r;
870}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500871
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100872static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
873{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100874 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100875 return 0;
876}
877
878int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
879{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100880 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100881 return 0;
882}
883
884int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
885{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100886 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100887 return 0;
888}
889
890int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
891 struct kvm_sregs *sregs)
892{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100893 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100894 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100895 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100896 return 0;
897}
898
899int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
900 struct kvm_sregs *sregs)
901{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100902 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100903 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100904 return 0;
905}
906
907int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
908{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200909 if (test_fp_ctl(fpu->fpc))
910 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100911 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200912 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
913 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
914 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100915 return 0;
916}
917
918int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
919{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100920 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
921 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100922 return 0;
923}
924
925static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
926{
927 int rc = 0;
928
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100929 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100930 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100931 else {
932 vcpu->run->psw_mask = psw.mask;
933 vcpu->run->psw_addr = psw.addr;
934 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100935 return rc;
936}
937
938int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
939 struct kvm_translation *tr)
940{
941 return -EINVAL; /* not implemented yet */
942}
943
David Hildenbrand27291e22014-01-23 12:26:52 +0100944#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
945 KVM_GUESTDBG_USE_HW_BP | \
946 KVM_GUESTDBG_ENABLE)
947
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100948int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
949 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100950{
David Hildenbrand27291e22014-01-23 12:26:52 +0100951 int rc = 0;
952
953 vcpu->guest_debug = 0;
954 kvm_s390_clear_bp_data(vcpu);
955
David Hildenbrand2de3bfc2014-05-20 17:25:20 +0200956 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +0100957 return -EINVAL;
958
959 if (dbg->control & KVM_GUESTDBG_ENABLE) {
960 vcpu->guest_debug = dbg->control;
961 /* enforce guest PER */
962 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
963
964 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
965 rc = kvm_s390_import_bp_data(vcpu, dbg);
966 } else {
967 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
968 vcpu->arch.guestdbg.last_bp = 0;
969 }
970
971 if (rc) {
972 vcpu->guest_debug = 0;
973 kvm_s390_clear_bp_data(vcpu);
974 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
975 }
976
977 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100978}
979
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300980int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
981 struct kvm_mp_state *mp_state)
982{
983 return -EINVAL; /* not implemented yet */
984}
985
986int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
987 struct kvm_mp_state *mp_state)
988{
989 return -EINVAL; /* not implemented yet */
990}
991
Dominik Dingelb31605c2014-03-25 13:47:11 +0100992bool kvm_s390_cmma_enabled(struct kvm *kvm)
993{
994 if (!MACHINE_IS_LPAR)
995 return false;
996 /* only enable for z10 and later */
997 if (!MACHINE_HAS_EDAT1)
998 return false;
999 if (!kvm->arch.use_cmma)
1000 return false;
1001 return true;
1002}
1003
David Hildenbrand8ad35752014-03-14 11:00:21 +01001004static bool ibs_enabled(struct kvm_vcpu *vcpu)
1005{
1006 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1007}
1008
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001009static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1010{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001011retry:
1012 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001013 /*
1014 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1015 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1016 * This ensures that the ipte instruction for this request has
1017 * already finished. We might race against a second unmapper that
1018 * wants to set the blocking bit. Lets just retry the request loop.
1019 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001020 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001021 int rc;
1022 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001023 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001024 PAGE_SIZE * 2);
1025 if (rc)
1026 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001027 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001028 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001029
1030 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1031 if (!ibs_enabled(vcpu)) {
1032 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1033 atomic_set_mask(CPUSTAT_IBS,
1034 &vcpu->arch.sie_block->cpuflags);
1035 }
1036 goto retry;
1037 }
1038
1039 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1040 if (ibs_enabled(vcpu)) {
1041 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1042 atomic_clear_mask(CPUSTAT_IBS,
1043 &vcpu->arch.sie_block->cpuflags);
1044 }
1045 goto retry;
1046 }
1047
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001048 return 0;
1049}
1050
Thomas Huthfa576c52014-05-06 17:20:16 +02001051/**
1052 * kvm_arch_fault_in_page - fault-in guest page if necessary
1053 * @vcpu: The corresponding virtual cpu
1054 * @gpa: Guest physical address
1055 * @writable: Whether the page should be writable or not
1056 *
1057 * Make sure that a guest page has been faulted-in on the host.
1058 *
1059 * Return: Zero on success, negative error code otherwise.
1060 */
1061long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001062{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001063 struct mm_struct *mm = current->mm;
Thomas Huthfa576c52014-05-06 17:20:16 +02001064 hva_t hva;
1065 long rc;
1066
1067 hva = gmap_fault(gpa, vcpu->arch.gmap);
1068 if (IS_ERR_VALUE(hva))
1069 return (long)hva;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001070 down_read(&mm->mmap_sem);
Thomas Huthfa576c52014-05-06 17:20:16 +02001071 rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001072 up_read(&mm->mmap_sem);
Thomas Huthfa576c52014-05-06 17:20:16 +02001073
1074 return rc < 0 ? rc : 0;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001075}
1076
Dominik Dingel3c038e62013-10-07 17:11:48 +02001077static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1078 unsigned long token)
1079{
1080 struct kvm_s390_interrupt inti;
1081 inti.parm64 = token;
1082
1083 if (start_token) {
1084 inti.type = KVM_S390_INT_PFAULT_INIT;
1085 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1086 } else {
1087 inti.type = KVM_S390_INT_PFAULT_DONE;
1088 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1089 }
1090}
1091
1092void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1093 struct kvm_async_pf *work)
1094{
1095 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1096 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1097}
1098
1099void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1100 struct kvm_async_pf *work)
1101{
1102 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1103 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1104}
1105
1106void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1107 struct kvm_async_pf *work)
1108{
1109 /* s390 will always inject the page directly */
1110}
1111
1112bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1113{
1114 /*
1115 * s390 will always inject the page directly,
1116 * but we still want check_async_completion to cleanup
1117 */
1118 return true;
1119}
1120
1121static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1122{
1123 hva_t hva;
1124 struct kvm_arch_async_pf arch;
1125 int rc;
1126
1127 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1128 return 0;
1129 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1130 vcpu->arch.pfault_compare)
1131 return 0;
1132 if (psw_extint_disabled(vcpu))
1133 return 0;
1134 if (kvm_cpu_has_interrupt(vcpu))
1135 return 0;
1136 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1137 return 0;
1138 if (!vcpu->arch.gmap->pfault_enabled)
1139 return 0;
1140
Heiko Carstens81480cc2014-01-01 16:36:07 +01001141 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1142 hva += current->thread.gmap_addr & ~PAGE_MASK;
1143 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001144 return 0;
1145
1146 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1147 return rc;
1148}
1149
Thomas Huth3fb4c402013-09-12 10:33:43 +02001150static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001151{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001152 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001153
Dominik Dingel3c038e62013-10-07 17:11:48 +02001154 /*
1155 * On s390 notifications for arriving pages will be delivered directly
1156 * to the guest but the house keeping for completed pfaults is
1157 * handled outside the worker.
1158 */
1159 kvm_check_async_pf_completion(vcpu);
1160
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001161 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001162
1163 if (need_resched())
1164 schedule();
1165
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001166 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001167 s390_handle_mcck();
1168
Carsten Otted6b6d162012-01-04 10:25:25 +01001169 if (!kvm_is_ucontrol(vcpu->kvm))
1170 kvm_s390_deliver_pending_interrupts(vcpu);
Carsten Otte0ff31862008-05-21 13:37:37 +02001171
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001172 rc = kvm_s390_handle_requests(vcpu);
1173 if (rc)
1174 return rc;
1175
David Hildenbrand27291e22014-01-23 12:26:52 +01001176 if (guestdbg_enabled(vcpu)) {
1177 kvm_s390_backup_guest_per_regs(vcpu);
1178 kvm_s390_patch_guest_per_regs(vcpu);
1179 }
1180
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001181 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001182 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1183 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1184 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001185
Thomas Huth3fb4c402013-09-12 10:33:43 +02001186 return 0;
1187}
1188
1189static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1190{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001191 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001192
1193 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1194 vcpu->arch.sie_block->icptcode);
1195 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1196
David Hildenbrand27291e22014-01-23 12:26:52 +01001197 if (guestdbg_enabled(vcpu))
1198 kvm_s390_restore_guest_per_regs(vcpu);
1199
Thomas Huth3fb4c402013-09-12 10:33:43 +02001200 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001201 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001202 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1203 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1204 vcpu->run->s390_ucontrol.trans_exc_code =
1205 current->thread.gmap_addr;
1206 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1207 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001208
1209 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001210 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001211 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001212 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001213 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001214 } else {
1215 gpa_t gpa = current->thread.gmap_addr;
1216 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1217 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001218 }
1219
1220 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001221 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1222 trace_kvm_s390_sie_fault(vcpu);
1223 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001224 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001225
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001226 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001227
Thomas Hutha76ccff2013-09-12 10:33:44 +02001228 if (rc == 0) {
1229 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001230 /* Don't exit for host interrupts. */
1231 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001232 else
1233 rc = kvm_handle_sie_intercept(vcpu);
1234 }
1235
Thomas Huth3fb4c402013-09-12 10:33:43 +02001236 return rc;
1237}
1238
1239static int __vcpu_run(struct kvm_vcpu *vcpu)
1240{
1241 int rc, exit_reason;
1242
Thomas Huth800c1062013-09-12 10:33:45 +02001243 /*
1244 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1245 * ning the guest), so that memslots (and other stuff) are protected
1246 */
1247 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1248
Thomas Hutha76ccff2013-09-12 10:33:44 +02001249 do {
1250 rc = vcpu_pre_run(vcpu);
1251 if (rc)
1252 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001253
Thomas Huth800c1062013-09-12 10:33:45 +02001254 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001255 /*
1256 * As PF_VCPU will be used in fault handler, between
1257 * guest_enter and guest_exit should be no uaccess.
1258 */
1259 preempt_disable();
1260 kvm_guest_enter();
1261 preempt_enable();
1262 exit_reason = sie64a(vcpu->arch.sie_block,
1263 vcpu->run->s.regs.gprs);
1264 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001265 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001266
Thomas Hutha76ccff2013-09-12 10:33:44 +02001267 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001268 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001269
Thomas Huth800c1062013-09-12 10:33:45 +02001270 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001271 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001272}
1273
1274int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1275{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001276 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001277 sigset_t sigsaved;
1278
David Hildenbrand27291e22014-01-23 12:26:52 +01001279 if (guestdbg_exit_pending(vcpu)) {
1280 kvm_s390_prepare_debug_exit(vcpu);
1281 return 0;
1282 }
1283
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001284 if (vcpu->sigset_active)
1285 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1286
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001287 kvm_s390_vcpu_start(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001288
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001289 switch (kvm_run->exit_reason) {
1290 case KVM_EXIT_S390_SIEIC:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001291 case KVM_EXIT_UNKNOWN:
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001292 case KVM_EXIT_INTR:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001293 case KVM_EXIT_S390_RESET:
Carsten Ottee168bf82012-01-04 10:25:22 +01001294 case KVM_EXIT_S390_UCONTROL:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001295 case KVM_EXIT_S390_TSCH:
David Hildenbrand27291e22014-01-23 12:26:52 +01001296 case KVM_EXIT_DEBUG:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001297 break;
1298 default:
1299 BUG();
1300 }
1301
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001302 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1303 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +01001304 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1305 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1306 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1307 }
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001308 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1309 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1310 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1311 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1312 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001313
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001314 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001315 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001316
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001317 if (signal_pending(current) && !rc) {
1318 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001319 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001320 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001321
David Hildenbrand27291e22014-01-23 12:26:52 +01001322 if (guestdbg_exit_pending(vcpu) && !rc) {
1323 kvm_s390_prepare_debug_exit(vcpu);
1324 rc = 0;
1325 }
1326
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001327 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001328 /* intercept cannot be handled in-kernel, prepare kvm-run */
1329 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1330 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001331 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1332 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1333 rc = 0;
1334 }
1335
1336 if (rc == -EREMOTE) {
1337 /* intercept was handled, but userspace support is needed
1338 * kvm_run has been prepared by the handler */
1339 rc = 0;
1340 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001341
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001342 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1343 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
Michael Muellerfda902c2014-05-13 16:58:30 +02001344 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001345 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001346
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001347 if (vcpu->sigset_active)
1348 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1349
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001350 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001351 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001352}
1353
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001354/*
1355 * store status at address
1356 * we use have two special cases:
1357 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1358 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1359 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001360int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001361{
Carsten Otte092670c2011-07-24 10:48:22 +02001362 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001363 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001364 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001365 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001366
Heiko Carstensd0bce602014-01-01 16:45:58 +01001367 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1368 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001369 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001370 gpa = SAVE_AREA_BASE;
1371 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1372 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001373 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001374 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1375 }
1376 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1377 vcpu->arch.guest_fpregs.fprs, 128);
1378 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1379 vcpu->run->s.regs.gprs, 128);
1380 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1381 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001382 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001383 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001384 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001385 rc |= write_guest_abs(vcpu,
1386 gpa + offsetof(struct save_area, fp_ctrl_reg),
1387 &vcpu->arch.guest_fpregs.fpc, 4);
1388 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1389 &vcpu->arch.sie_block->todpr, 4);
1390 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1391 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001392 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001393 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1394 &clkcomp, 8);
1395 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1396 &vcpu->run->s.regs.acrs, 64);
1397 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1398 &vcpu->arch.sie_block->gcr, 128);
1399 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001400}
1401
Thomas Huthe8798922013-11-06 15:46:33 +01001402int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1403{
1404 /*
1405 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1406 * copying in vcpu load/put. Lets update our copies before we save
1407 * it into the save area
1408 */
1409 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1410 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1411 save_access_regs(vcpu->run->s.regs.acrs);
1412
1413 return kvm_s390_store_status_unloaded(vcpu, addr);
1414}
1415
David Hildenbrand8ad35752014-03-14 11:00:21 +01001416static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
1417{
1418 return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
1419}
1420
1421static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1422{
1423 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1424 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1425 exit_sie_sync(vcpu);
1426}
1427
1428static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1429{
1430 unsigned int i;
1431 struct kvm_vcpu *vcpu;
1432
1433 kvm_for_each_vcpu(i, vcpu, kvm) {
1434 __disable_ibs_on_vcpu(vcpu);
1435 }
1436}
1437
1438static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1439{
1440 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1441 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1442 exit_sie_sync(vcpu);
1443}
1444
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001445void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1446{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001447 int i, online_vcpus, started_vcpus = 0;
1448
1449 if (!is_vcpu_stopped(vcpu))
1450 return;
1451
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001452 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001453 /* Only one cpu at a time may enter/leave the STOPPED state. */
1454 spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
1455 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1456
1457 for (i = 0; i < online_vcpus; i++) {
1458 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1459 started_vcpus++;
1460 }
1461
1462 if (started_vcpus == 0) {
1463 /* we're the only active VCPU -> speed it up */
1464 __enable_ibs_on_vcpu(vcpu);
1465 } else if (started_vcpus == 1) {
1466 /*
1467 * As we are starting a second VCPU, we have to disable
1468 * the IBS facility on all VCPUs to remove potentially
1469 * oustanding ENABLE requests.
1470 */
1471 __disable_ibs_on_all_vcpus(vcpu->kvm);
1472 }
1473
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001474 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001475 /*
1476 * Another VCPU might have used IBS while we were offline.
1477 * Let's play safe and flush the VCPU at startup.
1478 */
1479 vcpu->arch.sie_block->ihcpu = 0xffff;
1480 spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
1481 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001482}
1483
1484void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1485{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001486 int i, online_vcpus, started_vcpus = 0;
1487 struct kvm_vcpu *started_vcpu = NULL;
1488
1489 if (is_vcpu_stopped(vcpu))
1490 return;
1491
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001492 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001493 /* Only one cpu at a time may enter/leave the STOPPED state. */
1494 spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
1495 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1496
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001497 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001498 __disable_ibs_on_vcpu(vcpu);
1499
1500 for (i = 0; i < online_vcpus; i++) {
1501 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1502 started_vcpus++;
1503 started_vcpu = vcpu->kvm->vcpus[i];
1504 }
1505 }
1506
1507 if (started_vcpus == 1) {
1508 /*
1509 * As we only have one VCPU left, we want to enable the
1510 * IBS facility for that VCPU to speed it up.
1511 */
1512 __enable_ibs_on_vcpu(started_vcpu);
1513 }
1514
1515 spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
1516 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001517}
1518
Cornelia Huckd6712df2012-12-20 15:32:11 +01001519static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1520 struct kvm_enable_cap *cap)
1521{
1522 int r;
1523
1524 if (cap->flags)
1525 return -EINVAL;
1526
1527 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001528 case KVM_CAP_S390_CSS_SUPPORT:
1529 if (!vcpu->kvm->arch.css_support) {
1530 vcpu->kvm->arch.css_support = 1;
1531 trace_kvm_s390_enable_css(vcpu->kvm);
1532 }
1533 r = 0;
1534 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001535 default:
1536 r = -EINVAL;
1537 break;
1538 }
1539 return r;
1540}
1541
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001542long kvm_arch_vcpu_ioctl(struct file *filp,
1543 unsigned int ioctl, unsigned long arg)
1544{
1545 struct kvm_vcpu *vcpu = filp->private_data;
1546 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001547 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001548 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001549
Avi Kivity93736622010-05-13 12:35:17 +03001550 switch (ioctl) {
1551 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001552 struct kvm_s390_interrupt s390int;
1553
Avi Kivity93736622010-05-13 12:35:17 +03001554 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001555 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03001556 break;
1557 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1558 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001559 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001560 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001561 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001562 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001563 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001564 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001565 case KVM_S390_SET_INITIAL_PSW: {
1566 psw_t psw;
1567
Avi Kivitybc923cc2010-05-13 12:21:46 +03001568 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001569 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001570 break;
1571 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1572 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001573 }
1574 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001575 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1576 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001577 case KVM_SET_ONE_REG:
1578 case KVM_GET_ONE_REG: {
1579 struct kvm_one_reg reg;
1580 r = -EFAULT;
1581 if (copy_from_user(&reg, argp, sizeof(reg)))
1582 break;
1583 if (ioctl == KVM_SET_ONE_REG)
1584 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1585 else
1586 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1587 break;
1588 }
Carsten Otte27e03932012-01-04 10:25:21 +01001589#ifdef CONFIG_KVM_S390_UCONTROL
1590 case KVM_S390_UCAS_MAP: {
1591 struct kvm_s390_ucas_mapping ucasmap;
1592
1593 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1594 r = -EFAULT;
1595 break;
1596 }
1597
1598 if (!kvm_is_ucontrol(vcpu->kvm)) {
1599 r = -EINVAL;
1600 break;
1601 }
1602
1603 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1604 ucasmap.vcpu_addr, ucasmap.length);
1605 break;
1606 }
1607 case KVM_S390_UCAS_UNMAP: {
1608 struct kvm_s390_ucas_mapping ucasmap;
1609
1610 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1611 r = -EFAULT;
1612 break;
1613 }
1614
1615 if (!kvm_is_ucontrol(vcpu->kvm)) {
1616 r = -EINVAL;
1617 break;
1618 }
1619
1620 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1621 ucasmap.length);
1622 break;
1623 }
1624#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001625 case KVM_S390_VCPU_FAULT: {
1626 r = gmap_fault(arg, vcpu->arch.gmap);
1627 if (!IS_ERR_VALUE(r))
1628 r = 0;
1629 break;
1630 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001631 case KVM_ENABLE_CAP:
1632 {
1633 struct kvm_enable_cap cap;
1634 r = -EFAULT;
1635 if (copy_from_user(&cap, argp, sizeof(cap)))
1636 break;
1637 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1638 break;
1639 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001640 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001641 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001642 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001643 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001644}
1645
Carsten Otte5b1c1492012-01-04 10:25:23 +01001646int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1647{
1648#ifdef CONFIG_KVM_S390_UCONTROL
1649 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1650 && (kvm_is_ucontrol(vcpu->kvm))) {
1651 vmf->page = virt_to_page(vcpu->arch.sie_block);
1652 get_page(vmf->page);
1653 return 0;
1654 }
1655#endif
1656 return VM_FAULT_SIGBUS;
1657}
1658
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301659void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001660 struct kvm_memory_slot *dont)
1661{
1662}
1663
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301664int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1665 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001666{
1667 return 0;
1668}
1669
Takuya Yoshikawae59dbe02013-07-04 13:40:29 +09001670void kvm_arch_memslots_updated(struct kvm *kvm)
1671{
1672}
1673
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001674/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001675int kvm_arch_prepare_memory_region(struct kvm *kvm,
1676 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001677 struct kvm_userspace_memory_region *mem,
1678 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001679{
Nick Wangdd2887e2013-03-25 17:22:57 +01001680 /* A few sanity checks. We can have memory slots which have to be
1681 located/ended at a segment boundary (1MB). The memory in userland is
1682 ok to be fragmented into various different vmas. It is okay to mmap()
1683 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001684
Carsten Otte598841c2011-07-24 10:48:21 +02001685 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001686 return -EINVAL;
1687
Carsten Otte598841c2011-07-24 10:48:21 +02001688 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001689 return -EINVAL;
1690
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001691 return 0;
1692}
1693
1694void kvm_arch_commit_memory_region(struct kvm *kvm,
1695 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001696 const struct kvm_memory_slot *old,
1697 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001698{
Carsten Ottef7850c92011-07-24 10:48:23 +02001699 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001700
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001701 /* If the basics of the memslot do not change, we do not want
1702 * to update the gmap. Every update causes several unnecessary
1703 * segment translation exceptions. This is usually handled just
1704 * fine by the normal fault handler + gmap, but it will also
1705 * cause faults on the prefix page of running guest CPUs.
1706 */
1707 if (old->userspace_addr == mem->userspace_addr &&
1708 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1709 old->npages * PAGE_SIZE == mem->memory_size)
1710 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001711
1712 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1713 mem->guest_phys_addr, mem->memory_size);
1714 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001715 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001716 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001717}
1718
Marcelo Tosatti2df72e92012-08-24 15:54:57 -03001719void kvm_arch_flush_shadow_all(struct kvm *kvm)
1720{
1721}
1722
1723void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1724 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001725{
1726}
1727
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001728static int __init kvm_s390_init(void)
1729{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001730 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001731 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001732 if (ret)
1733 return ret;
1734
1735 /*
1736 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001737 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001738 * only set facilities that are known to work in KVM.
1739 */
Michael Mueller78c4b592013-07-26 15:04:04 +02001740 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1741 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001742 kvm_exit();
1743 return -ENOMEM;
1744 }
Michael Mueller78c4b592013-07-26 15:04:04 +02001745 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Thomas Huthd208c792013-12-12 13:40:40 +01001746 vfacilities[0] &= 0xff82fff3f4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001747 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001748 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001749}
1750
1751static void __exit kvm_s390_exit(void)
1752{
Michael Mueller78c4b592013-07-26 15:04:04 +02001753 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001754 kvm_exit();
1755}
1756
1757module_init(kvm_s390_init);
1758module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001759
1760/*
1761 * Enable autoloading of the kvm module.
1762 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1763 * since x86 takes a different approach.
1764 */
1765#include <linux/miscdevice.h>
1766MODULE_ALIAS_MISCDEV(KVM_MINOR);
1767MODULE_ALIAS("devname:kvm");