blob: 9f1e99f12d4f8379f53ef981ba3120aade1adaf6 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010014 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020019#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010020#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010025#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010026#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010027#include <asm/lowcore.h>
28#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010029#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010030#include <asm/switch_to.h>
Michael Mueller78c4b592013-07-26 15:04:04 +020031#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020032#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010033#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010034#include "gaccess.h"
35
Cornelia Huck5786fff2012-07-23 17:20:29 +020036#define CREATE_TRACE_POINTS
37#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020038#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020039
Heiko Carstensb0c632d2008-03-25 18:47:20 +010040#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020044 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010045 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010049 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020052 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010053 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020055 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010056 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020063 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010064 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020073 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010074 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010075 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020076 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010077 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010082 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010083 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020084 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010085 { NULL }
86};
87
Michael Mueller78c4b592013-07-26 15:04:04 +020088unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020089static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010090
Michael Mueller78c4b592013-07-26 15:04:04 +020091/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
Heiko Carstensb0c632d2008-03-25 18:47:20 +010097/* Section: not file related */
Alexander Graf10474ae2009-09-15 11:37:46 +020098int kvm_arch_hardware_enable(void *garbage)
Heiko Carstensb0c632d2008-03-25 18:47:20 +010099{
100 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200101 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100110int kvm_arch_hardware_setup(void)
111{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200119 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100146 int r;
147
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200148 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100149 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200150 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100151 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200155 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100156 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200157 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100158 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100159 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100160 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200161 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200162 case KVM_CAP_ENABLE_CAP_VM:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100163 r = 1;
164 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200165 case KVM_CAP_NR_VCPUS:
166 case KVM_CAP_MAX_VCPUS:
167 r = KVM_MAX_VCPUS;
168 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100169 case KVM_CAP_NR_MEMSLOTS:
170 r = KVM_USER_MEM_SLOTS;
171 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200172 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100173 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200174 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200175 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100176 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200177 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100178 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100179}
180
181/* Section: vm related */
182/*
183 * Get (and clear) the dirty memory log for a memory slot.
184 */
185int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
186 struct kvm_dirty_log *log)
187{
188 return 0;
189}
190
Cornelia Huckd938dc52013-10-23 18:26:34 +0200191static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
192{
193 int r;
194
195 if (cap->flags)
196 return -EINVAL;
197
198 switch (cap->cap) {
199 default:
200 r = -EINVAL;
201 break;
202 }
203 return r;
204}
205
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100206long kvm_arch_vm_ioctl(struct file *filp,
207 unsigned int ioctl, unsigned long arg)
208{
209 struct kvm *kvm = filp->private_data;
210 void __user *argp = (void __user *)arg;
211 int r;
212
213 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100214 case KVM_S390_INTERRUPT: {
215 struct kvm_s390_interrupt s390int;
216
217 r = -EFAULT;
218 if (copy_from_user(&s390int, argp, sizeof(s390int)))
219 break;
220 r = kvm_s390_inject_vm(kvm, &s390int);
221 break;
222 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200223 case KVM_ENABLE_CAP: {
224 struct kvm_enable_cap cap;
225 r = -EFAULT;
226 if (copy_from_user(&cap, argp, sizeof(cap)))
227 break;
228 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
229 break;
230 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100231 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300232 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100233 }
234
235 return r;
236}
237
Carsten Ottee08b9632012-01-04 10:25:20 +0100238int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100239{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100240 int rc;
241 char debug_name[16];
242
Carsten Ottee08b9632012-01-04 10:25:20 +0100243 rc = -EINVAL;
244#ifdef CONFIG_KVM_S390_UCONTROL
245 if (type & ~KVM_VM_S390_UCONTROL)
246 goto out_err;
247 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
248 goto out_err;
249#else
250 if (type)
251 goto out_err;
252#endif
253
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100254 rc = s390_enable_sie();
255 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100256 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100257
Carsten Otteb2904112011-10-18 12:27:13 +0200258 rc = -ENOMEM;
259
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100260 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
261 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100262 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100263
264 sprintf(debug_name, "kvm-%u", current->pid);
265
266 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
267 if (!kvm->arch.dbf)
268 goto out_nodbf;
269
Carsten Otteba5c1e92008-03-25 18:47:26 +0100270 spin_lock_init(&kvm->arch.float_int.lock);
271 INIT_LIST_HEAD(&kvm->arch.float_int.list);
272
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100273 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
274 VM_EVENT(kvm, 3, "%s", "vm created");
275
Carsten Ottee08b9632012-01-04 10:25:20 +0100276 if (type & KVM_VM_S390_UCONTROL) {
277 kvm->arch.gmap = NULL;
278 } else {
279 kvm->arch.gmap = gmap_alloc(current->mm);
280 if (!kvm->arch.gmap)
281 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200282 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200283 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100284 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100285
286 kvm->arch.css_support = 0;
287
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100288 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200289out_nogmap:
290 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100291out_nodbf:
292 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100293out_err:
294 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100295}
296
Christian Borntraegerd329c032008-11-26 14:50:27 +0100297void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
298{
299 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200300 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200301 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100302 if (!kvm_is_ucontrol(vcpu->kvm)) {
303 clear_bit(63 - vcpu->vcpu_id,
304 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
305 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
306 (__u64) vcpu->arch.sie_block)
307 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
308 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200309 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100310
311 if (kvm_is_ucontrol(vcpu->kvm))
312 gmap_free(vcpu->arch.gmap);
313
Christian Borntraegerd329c032008-11-26 14:50:27 +0100314 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100315 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200316 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100317}
318
319static void kvm_free_vcpus(struct kvm *kvm)
320{
321 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300322 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100323
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300324 kvm_for_each_vcpu(i, vcpu, kvm)
325 kvm_arch_vcpu_destroy(vcpu);
326
327 mutex_lock(&kvm->lock);
328 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
329 kvm->vcpus[i] = NULL;
330
331 atomic_set(&kvm->online_vcpus, 0);
332 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100333}
334
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800335void kvm_arch_sync_events(struct kvm *kvm)
336{
337}
338
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100339void kvm_arch_destroy_vm(struct kvm *kvm)
340{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100341 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100342 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100343 debug_unregister(kvm->arch.dbf);
Carsten Otte27e03932012-01-04 10:25:21 +0100344 if (!kvm_is_ucontrol(kvm))
345 gmap_free(kvm->arch.gmap);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100346}
347
348/* Section: vcpu related */
349int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
350{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200351 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
352 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100353 if (kvm_is_ucontrol(vcpu->kvm)) {
354 vcpu->arch.gmap = gmap_alloc(current->mm);
355 if (!vcpu->arch.gmap)
356 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200357 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100358 return 0;
359 }
360
Carsten Otte598841c2011-07-24 10:48:21 +0200361 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100362 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
363 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100364 KVM_SYNC_ACRS |
365 KVM_SYNC_CRS;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100366 return 0;
367}
368
369void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
370{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100371 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100372}
373
374void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
375{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200376 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
377 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100378 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200379 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
380 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100381 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200382 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100383 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100384}
385
386void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
387{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100388 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200389 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200390 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
391 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100392 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200393 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
394 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100395 restore_access_regs(vcpu->arch.host_acrs);
396}
397
398static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
399{
400 /* this equals initial cpu reset in pop, but we don't switch to ESA */
401 vcpu->arch.sie_block->gpsw.mask = 0UL;
402 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100403 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100404 vcpu->arch.sie_block->cputm = 0UL;
405 vcpu->arch.sie_block->ckc = 0UL;
406 vcpu->arch.sie_block->todpr = 0;
407 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
408 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
409 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
410 vcpu->arch.guest_fpregs.fpc = 0;
411 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
412 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100413 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200414 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
415 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger61bde822012-06-11 16:06:57 +0200416 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100417}
418
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200419int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
420{
421 return 0;
422}
423
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100424int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
425{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100426 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
427 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200428 CPUSTAT_STOPPED |
429 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200430 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200431 if (test_vfacility(50) && test_vfacility(73))
432 vcpu->arch.sie_block->ecb |= 0x10;
433
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200434 vcpu->arch.sie_block->ecb2 = 8;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100435 vcpu->arch.sie_block->eca = 0xC1002001U;
Michael Mueller78c4b592013-07-26 15:04:04 +0200436 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Christian Borntraegerca872302009-05-12 17:21:49 +0200437 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
438 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
439 (unsigned long) vcpu);
440 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100441 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100442 vcpu->arch.cpu_id.version = 0xff;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100443 return 0;
444}
445
446struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
447 unsigned int id)
448{
Carsten Otte4d475552011-10-18 12:27:12 +0200449 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200450 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200451 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100452
Carsten Otte4d475552011-10-18 12:27:12 +0200453 if (id >= KVM_MAX_VCPUS)
454 goto out;
455
456 rc = -ENOMEM;
457
Michael Muellerb110fea2013-06-12 13:54:54 +0200458 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100459 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200460 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100461
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200462 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
463 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100464 goto out_free_cpu;
465
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200466 vcpu->arch.sie_block = &sie_page->sie_block;
467 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
468
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100469 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100470 if (!kvm_is_ucontrol(kvm)) {
471 if (!kvm->arch.sca) {
472 WARN_ON_ONCE(1);
473 goto out_free_cpu;
474 }
475 if (!kvm->arch.sca->cpu[id].sda)
476 kvm->arch.sca->cpu[id].sda =
477 (__u64) vcpu->arch.sie_block;
478 vcpu->arch.sie_block->scaoh =
479 (__u32)(((__u64)kvm->arch.sca) >> 32);
480 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
481 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
482 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100483
Carsten Otteba5c1e92008-03-25 18:47:26 +0100484 spin_lock_init(&vcpu->arch.local_int.lock);
485 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
486 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200487 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100488 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100489
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100490 rc = kvm_vcpu_init(vcpu, kvm, id);
491 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800492 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100493 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
494 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200495 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100496
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100497 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800498out_free_sie_block:
499 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100500out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200501 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200502out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100503 return ERR_PTR(rc);
504}
505
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100506int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
507{
Michael Muellerf87618e2014-02-26 16:14:17 +0100508 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100509}
510
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200511void s390_vcpu_block(struct kvm_vcpu *vcpu)
512{
513 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
514}
515
516void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
517{
518 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
519}
520
521/*
522 * Kick a guest cpu out of SIE and wait until SIE is not running.
523 * If the CPU is not running (e.g. waiting as idle) the function will
524 * return immediately. */
525void exit_sie(struct kvm_vcpu *vcpu)
526{
527 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
528 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
529 cpu_relax();
530}
531
532/* Kick a guest cpu out of SIE and prevent SIE-reentry */
533void exit_sie_sync(struct kvm_vcpu *vcpu)
534{
535 s390_vcpu_block(vcpu);
536 exit_sie(vcpu);
537}
538
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200539static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
540{
541 int i;
542 struct kvm *kvm = gmap->private;
543 struct kvm_vcpu *vcpu;
544
545 kvm_for_each_vcpu(i, vcpu, kvm) {
546 /* match against both prefix pages */
547 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
548 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
549 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
550 exit_sie_sync(vcpu);
551 }
552 }
553}
554
Christoffer Dallb6d33832012-03-08 16:44:24 -0500555int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
556{
557 /* kvm common code refers to this, but never calls it */
558 BUG();
559 return 0;
560}
561
Carsten Otte14eebd92012-05-15 14:15:26 +0200562static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
563 struct kvm_one_reg *reg)
564{
565 int r = -EINVAL;
566
567 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200568 case KVM_REG_S390_TODPR:
569 r = put_user(vcpu->arch.sie_block->todpr,
570 (u32 __user *)reg->addr);
571 break;
572 case KVM_REG_S390_EPOCHDIFF:
573 r = put_user(vcpu->arch.sie_block->epoch,
574 (u64 __user *)reg->addr);
575 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200576 case KVM_REG_S390_CPU_TIMER:
577 r = put_user(vcpu->arch.sie_block->cputm,
578 (u64 __user *)reg->addr);
579 break;
580 case KVM_REG_S390_CLOCK_COMP:
581 r = put_user(vcpu->arch.sie_block->ckc,
582 (u64 __user *)reg->addr);
583 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200584 case KVM_REG_S390_PFTOKEN:
585 r = put_user(vcpu->arch.pfault_token,
586 (u64 __user *)reg->addr);
587 break;
588 case KVM_REG_S390_PFCOMPARE:
589 r = put_user(vcpu->arch.pfault_compare,
590 (u64 __user *)reg->addr);
591 break;
592 case KVM_REG_S390_PFSELECT:
593 r = put_user(vcpu->arch.pfault_select,
594 (u64 __user *)reg->addr);
595 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100596 case KVM_REG_S390_PP:
597 r = put_user(vcpu->arch.sie_block->pp,
598 (u64 __user *)reg->addr);
599 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100600 case KVM_REG_S390_GBEA:
601 r = put_user(vcpu->arch.sie_block->gbea,
602 (u64 __user *)reg->addr);
603 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200604 default:
605 break;
606 }
607
608 return r;
609}
610
611static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
612 struct kvm_one_reg *reg)
613{
614 int r = -EINVAL;
615
616 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200617 case KVM_REG_S390_TODPR:
618 r = get_user(vcpu->arch.sie_block->todpr,
619 (u32 __user *)reg->addr);
620 break;
621 case KVM_REG_S390_EPOCHDIFF:
622 r = get_user(vcpu->arch.sie_block->epoch,
623 (u64 __user *)reg->addr);
624 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200625 case KVM_REG_S390_CPU_TIMER:
626 r = get_user(vcpu->arch.sie_block->cputm,
627 (u64 __user *)reg->addr);
628 break;
629 case KVM_REG_S390_CLOCK_COMP:
630 r = get_user(vcpu->arch.sie_block->ckc,
631 (u64 __user *)reg->addr);
632 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200633 case KVM_REG_S390_PFTOKEN:
634 r = get_user(vcpu->arch.pfault_token,
635 (u64 __user *)reg->addr);
636 break;
637 case KVM_REG_S390_PFCOMPARE:
638 r = get_user(vcpu->arch.pfault_compare,
639 (u64 __user *)reg->addr);
640 break;
641 case KVM_REG_S390_PFSELECT:
642 r = get_user(vcpu->arch.pfault_select,
643 (u64 __user *)reg->addr);
644 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100645 case KVM_REG_S390_PP:
646 r = get_user(vcpu->arch.sie_block->pp,
647 (u64 __user *)reg->addr);
648 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100649 case KVM_REG_S390_GBEA:
650 r = get_user(vcpu->arch.sie_block->gbea,
651 (u64 __user *)reg->addr);
652 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200653 default:
654 break;
655 }
656
657 return r;
658}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500659
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100660static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
661{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100662 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100663 return 0;
664}
665
666int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
667{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100668 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100669 return 0;
670}
671
672int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
673{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100674 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100675 return 0;
676}
677
678int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
679 struct kvm_sregs *sregs)
680{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100681 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100682 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100683 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100684 return 0;
685}
686
687int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
688 struct kvm_sregs *sregs)
689{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100690 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100691 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100692 return 0;
693}
694
695int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
696{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200697 if (test_fp_ctl(fpu->fpc))
698 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100699 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200700 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
701 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
702 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100703 return 0;
704}
705
706int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
707{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100708 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
709 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100710 return 0;
711}
712
713static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
714{
715 int rc = 0;
716
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100717 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100718 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100719 else {
720 vcpu->run->psw_mask = psw.mask;
721 vcpu->run->psw_addr = psw.addr;
722 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100723 return rc;
724}
725
726int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
727 struct kvm_translation *tr)
728{
729 return -EINVAL; /* not implemented yet */
730}
731
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100732int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
733 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100734{
735 return -EINVAL; /* not implemented yet */
736}
737
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300738int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
739 struct kvm_mp_state *mp_state)
740{
741 return -EINVAL; /* not implemented yet */
742}
743
744int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
745 struct kvm_mp_state *mp_state)
746{
747 return -EINVAL; /* not implemented yet */
748}
749
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200750static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
751{
752 /*
753 * We use MMU_RELOAD just to re-arm the ipte notifier for the
754 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
755 * This ensures that the ipte instruction for this request has
756 * already finished. We might race against a second unmapper that
757 * wants to set the blocking bit. Lets just retry the request loop.
758 */
759 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
760 int rc;
761 rc = gmap_ipte_notify(vcpu->arch.gmap,
762 vcpu->arch.sie_block->prefix,
763 PAGE_SIZE * 2);
764 if (rc)
765 return rc;
766 s390_vcpu_unblock(vcpu);
767 }
768 return 0;
769}
770
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200771static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
772{
773 long rc;
774 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
775 struct mm_struct *mm = current->mm;
776 down_read(&mm->mmap_sem);
777 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
778 up_read(&mm->mmap_sem);
779 return rc;
780}
781
Dominik Dingel3c038e62013-10-07 17:11:48 +0200782static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
783 unsigned long token)
784{
785 struct kvm_s390_interrupt inti;
786 inti.parm64 = token;
787
788 if (start_token) {
789 inti.type = KVM_S390_INT_PFAULT_INIT;
790 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
791 } else {
792 inti.type = KVM_S390_INT_PFAULT_DONE;
793 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
794 }
795}
796
797void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
798 struct kvm_async_pf *work)
799{
800 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
801 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
802}
803
804void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
805 struct kvm_async_pf *work)
806{
807 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
808 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
809}
810
811void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
812 struct kvm_async_pf *work)
813{
814 /* s390 will always inject the page directly */
815}
816
817bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
818{
819 /*
820 * s390 will always inject the page directly,
821 * but we still want check_async_completion to cleanup
822 */
823 return true;
824}
825
826static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
827{
828 hva_t hva;
829 struct kvm_arch_async_pf arch;
830 int rc;
831
832 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
833 return 0;
834 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
835 vcpu->arch.pfault_compare)
836 return 0;
837 if (psw_extint_disabled(vcpu))
838 return 0;
839 if (kvm_cpu_has_interrupt(vcpu))
840 return 0;
841 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
842 return 0;
843 if (!vcpu->arch.gmap->pfault_enabled)
844 return 0;
845
846 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
847 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
848 return 0;
849
850 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
851 return rc;
852}
853
Thomas Huth3fb4c402013-09-12 10:33:43 +0200854static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100855{
Thomas Huth3fb4c402013-09-12 10:33:43 +0200856 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +0100857
Dominik Dingel3c038e62013-10-07 17:11:48 +0200858 /*
859 * On s390 notifications for arriving pages will be delivered directly
860 * to the guest but the house keeping for completed pfaults is
861 * handled outside the worker.
862 */
863 kvm_check_async_pf_completion(vcpu);
864
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100865 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100866
867 if (need_resched())
868 schedule();
869
Christian Borntraeger71cde582008-05-21 13:37:34 +0200870 if (test_thread_flag(TIF_MCCK_PENDING))
871 s390_handle_mcck();
872
Carsten Otted6b6d162012-01-04 10:25:25 +0100873 if (!kvm_is_ucontrol(vcpu->kvm))
874 kvm_s390_deliver_pending_interrupts(vcpu);
Carsten Otte0ff31862008-05-21 13:37:37 +0200875
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200876 rc = kvm_s390_handle_requests(vcpu);
877 if (rc)
878 return rc;
879
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100880 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +0200881 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
882 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
883 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +0200884
Thomas Huth3fb4c402013-09-12 10:33:43 +0200885 return 0;
886}
887
888static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
889{
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200890 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +0200891
892 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
893 vcpu->arch.sie_block->icptcode);
894 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
895
Thomas Huth3fb4c402013-09-12 10:33:43 +0200896 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +0200897 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +0200898 } else if (kvm_is_ucontrol(vcpu->kvm)) {
899 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
900 vcpu->run->s390_ucontrol.trans_exc_code =
901 current->thread.gmap_addr;
902 vcpu->run->s390_ucontrol.pgm_code = 0x10;
903 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200904
905 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +0200906 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200907 current->thread.gmap_pfault = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200908 if (kvm_arch_setup_async_pf(vcpu) ||
909 (kvm_arch_fault_in_sync(vcpu) >= 0))
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200910 rc = 0;
911 }
912
913 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +0100914 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
915 trace_kvm_s390_sie_fault(vcpu);
916 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +0200917 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100918
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100919 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200920
Thomas Hutha76ccff2013-09-12 10:33:44 +0200921 if (rc == 0) {
922 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +0100923 /* Don't exit for host interrupts. */
924 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +0200925 else
926 rc = kvm_handle_sie_intercept(vcpu);
927 }
928
Thomas Huth3fb4c402013-09-12 10:33:43 +0200929 return rc;
930}
931
932static int __vcpu_run(struct kvm_vcpu *vcpu)
933{
934 int rc, exit_reason;
935
Thomas Huth800c1062013-09-12 10:33:45 +0200936 /*
937 * We try to hold kvm->srcu during most of vcpu_run (except when run-
938 * ning the guest), so that memslots (and other stuff) are protected
939 */
940 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
941
Thomas Hutha76ccff2013-09-12 10:33:44 +0200942 do {
943 rc = vcpu_pre_run(vcpu);
944 if (rc)
945 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +0200946
Thomas Huth800c1062013-09-12 10:33:45 +0200947 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +0200948 /*
949 * As PF_VCPU will be used in fault handler, between
950 * guest_enter and guest_exit should be no uaccess.
951 */
952 preempt_disable();
953 kvm_guest_enter();
954 preempt_enable();
955 exit_reason = sie64a(vcpu->arch.sie_block,
956 vcpu->run->s.regs.gprs);
957 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +0200958 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200959
Thomas Hutha76ccff2013-09-12 10:33:44 +0200960 rc = vcpu_post_run(vcpu, exit_reason);
961 } while (!signal_pending(current) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200962
Thomas Huth800c1062013-09-12 10:33:45 +0200963 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +0100964 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100965}
966
967int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
968{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100969 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100970 sigset_t sigsaved;
971
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100972 if (vcpu->sigset_active)
973 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
974
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100975 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100976
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100977 switch (kvm_run->exit_reason) {
978 case KVM_EXIT_S390_SIEIC:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100979 case KVM_EXIT_UNKNOWN:
Christian Ehrhardt9ace9032009-05-20 15:34:55 +0200980 case KVM_EXIT_INTR:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100981 case KVM_EXIT_S390_RESET:
Carsten Ottee168bf82012-01-04 10:25:22 +0100982 case KVM_EXIT_S390_UCONTROL:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100983 case KVM_EXIT_S390_TSCH:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100984 break;
985 default:
986 BUG();
987 }
988
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100989 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
990 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100991 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
992 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
993 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
994 }
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100995 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
996 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
997 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
998 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
999 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001000
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001001 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001002 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001003
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001004 if (signal_pending(current) && !rc) {
1005 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001006 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001007 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001008
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001009 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001010 /* intercept cannot be handled in-kernel, prepare kvm-run */
1011 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1012 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001013 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1014 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1015 rc = 0;
1016 }
1017
1018 if (rc == -EREMOTE) {
1019 /* intercept was handled, but userspace support is needed
1020 * kvm_run has been prepared by the handler */
1021 rc = 0;
1022 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001023
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001024 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1025 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +01001026 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001027 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001028
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001029 if (vcpu->sigset_active)
1030 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1031
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001032 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001033 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001034}
1035
Carsten Otte092670c2011-07-24 10:48:22 +02001036static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001037 unsigned long n, int prefix)
1038{
1039 if (prefix)
1040 return copy_to_guest(vcpu, guestdest, from, n);
1041 else
1042 return copy_to_guest_absolute(vcpu, guestdest, from, n);
1043}
1044
1045/*
1046 * store status at address
1047 * we use have two special cases:
1048 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1049 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1050 */
Thomas Huthe8798922013-11-06 15:46:33 +01001051int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001052{
Carsten Otte092670c2011-07-24 10:48:22 +02001053 unsigned char archmode = 1;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001054 int prefix;
Thomas Huth178bd782013-11-13 20:28:18 +01001055 u64 clkcomp;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001056
1057 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
1058 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1059 return -EFAULT;
1060 addr = SAVE_AREA_BASE;
1061 prefix = 0;
1062 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1063 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1064 return -EFAULT;
1065 addr = SAVE_AREA_BASE;
1066 prefix = 1;
1067 } else
1068 prefix = 0;
1069
Heiko Carstensf64ca212010-02-26 22:37:32 +01001070 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001071 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1072 return -EFAULT;
1073
Heiko Carstensf64ca212010-02-26 22:37:32 +01001074 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001075 vcpu->run->s.regs.gprs, 128, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001076 return -EFAULT;
1077
Heiko Carstensf64ca212010-02-26 22:37:32 +01001078 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001079 &vcpu->arch.sie_block->gpsw, 16, prefix))
1080 return -EFAULT;
1081
Heiko Carstensf64ca212010-02-26 22:37:32 +01001082 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001083 &vcpu->arch.sie_block->prefix, 4, prefix))
1084 return -EFAULT;
1085
1086 if (__guestcopy(vcpu,
Heiko Carstensf64ca212010-02-26 22:37:32 +01001087 addr + offsetof(struct save_area, fp_ctrl_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001088 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1089 return -EFAULT;
1090
Heiko Carstensf64ca212010-02-26 22:37:32 +01001091 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001092 &vcpu->arch.sie_block->todpr, 4, prefix))
1093 return -EFAULT;
1094
Heiko Carstensf64ca212010-02-26 22:37:32 +01001095 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001096 &vcpu->arch.sie_block->cputm, 8, prefix))
1097 return -EFAULT;
1098
Thomas Huth178bd782013-11-13 20:28:18 +01001099 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensf64ca212010-02-26 22:37:32 +01001100 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
Thomas Huth178bd782013-11-13 20:28:18 +01001101 &clkcomp, 8, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001102 return -EFAULT;
1103
Heiko Carstensf64ca212010-02-26 22:37:32 +01001104 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
Christian Borntraeger59674c12012-01-11 11:20:33 +01001105 &vcpu->run->s.regs.acrs, 64, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001106 return -EFAULT;
1107
1108 if (__guestcopy(vcpu,
Heiko Carstensf64ca212010-02-26 22:37:32 +01001109 addr + offsetof(struct save_area, ctrl_regs),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001110 &vcpu->arch.sie_block->gcr, 128, prefix))
1111 return -EFAULT;
1112 return 0;
1113}
1114
Thomas Huthe8798922013-11-06 15:46:33 +01001115int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1116{
1117 /*
1118 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1119 * copying in vcpu load/put. Lets update our copies before we save
1120 * it into the save area
1121 */
1122 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1123 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1124 save_access_regs(vcpu->run->s.regs.acrs);
1125
1126 return kvm_s390_store_status_unloaded(vcpu, addr);
1127}
1128
Cornelia Huckd6712df2012-12-20 15:32:11 +01001129static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1130 struct kvm_enable_cap *cap)
1131{
1132 int r;
1133
1134 if (cap->flags)
1135 return -EINVAL;
1136
1137 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001138 case KVM_CAP_S390_CSS_SUPPORT:
1139 if (!vcpu->kvm->arch.css_support) {
1140 vcpu->kvm->arch.css_support = 1;
1141 trace_kvm_s390_enable_css(vcpu->kvm);
1142 }
1143 r = 0;
1144 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001145 default:
1146 r = -EINVAL;
1147 break;
1148 }
1149 return r;
1150}
1151
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001152long kvm_arch_vcpu_ioctl(struct file *filp,
1153 unsigned int ioctl, unsigned long arg)
1154{
1155 struct kvm_vcpu *vcpu = filp->private_data;
1156 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001157 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001158 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001159
Avi Kivity93736622010-05-13 12:35:17 +03001160 switch (ioctl) {
1161 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001162 struct kvm_s390_interrupt s390int;
1163
Avi Kivity93736622010-05-13 12:35:17 +03001164 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001165 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03001166 break;
1167 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1168 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001169 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001170 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001171 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001172 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001173 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001174 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001175 case KVM_S390_SET_INITIAL_PSW: {
1176 psw_t psw;
1177
Avi Kivitybc923cc2010-05-13 12:21:46 +03001178 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001179 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001180 break;
1181 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1182 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001183 }
1184 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001185 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1186 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001187 case KVM_SET_ONE_REG:
1188 case KVM_GET_ONE_REG: {
1189 struct kvm_one_reg reg;
1190 r = -EFAULT;
1191 if (copy_from_user(&reg, argp, sizeof(reg)))
1192 break;
1193 if (ioctl == KVM_SET_ONE_REG)
1194 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1195 else
1196 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1197 break;
1198 }
Carsten Otte27e03932012-01-04 10:25:21 +01001199#ifdef CONFIG_KVM_S390_UCONTROL
1200 case KVM_S390_UCAS_MAP: {
1201 struct kvm_s390_ucas_mapping ucasmap;
1202
1203 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1204 r = -EFAULT;
1205 break;
1206 }
1207
1208 if (!kvm_is_ucontrol(vcpu->kvm)) {
1209 r = -EINVAL;
1210 break;
1211 }
1212
1213 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1214 ucasmap.vcpu_addr, ucasmap.length);
1215 break;
1216 }
1217 case KVM_S390_UCAS_UNMAP: {
1218 struct kvm_s390_ucas_mapping ucasmap;
1219
1220 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1221 r = -EFAULT;
1222 break;
1223 }
1224
1225 if (!kvm_is_ucontrol(vcpu->kvm)) {
1226 r = -EINVAL;
1227 break;
1228 }
1229
1230 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1231 ucasmap.length);
1232 break;
1233 }
1234#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001235 case KVM_S390_VCPU_FAULT: {
1236 r = gmap_fault(arg, vcpu->arch.gmap);
1237 if (!IS_ERR_VALUE(r))
1238 r = 0;
1239 break;
1240 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001241 case KVM_ENABLE_CAP:
1242 {
1243 struct kvm_enable_cap cap;
1244 r = -EFAULT;
1245 if (copy_from_user(&cap, argp, sizeof(cap)))
1246 break;
1247 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1248 break;
1249 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001250 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001251 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001252 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001253 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001254}
1255
Carsten Otte5b1c1492012-01-04 10:25:23 +01001256int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1257{
1258#ifdef CONFIG_KVM_S390_UCONTROL
1259 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1260 && (kvm_is_ucontrol(vcpu->kvm))) {
1261 vmf->page = virt_to_page(vcpu->arch.sie_block);
1262 get_page(vmf->page);
1263 return 0;
1264 }
1265#endif
1266 return VM_FAULT_SIGBUS;
1267}
1268
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301269void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001270 struct kvm_memory_slot *dont)
1271{
1272}
1273
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301274int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1275 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001276{
1277 return 0;
1278}
1279
Takuya Yoshikawae59dbe02013-07-04 13:40:29 +09001280void kvm_arch_memslots_updated(struct kvm *kvm)
1281{
1282}
1283
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001284/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001285int kvm_arch_prepare_memory_region(struct kvm *kvm,
1286 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001287 struct kvm_userspace_memory_region *mem,
1288 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001289{
Nick Wangdd2887e2013-03-25 17:22:57 +01001290 /* A few sanity checks. We can have memory slots which have to be
1291 located/ended at a segment boundary (1MB). The memory in userland is
1292 ok to be fragmented into various different vmas. It is okay to mmap()
1293 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001294
Carsten Otte598841c2011-07-24 10:48:21 +02001295 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001296 return -EINVAL;
1297
Carsten Otte598841c2011-07-24 10:48:21 +02001298 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001299 return -EINVAL;
1300
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001301 return 0;
1302}
1303
1304void kvm_arch_commit_memory_region(struct kvm *kvm,
1305 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001306 const struct kvm_memory_slot *old,
1307 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001308{
Carsten Ottef7850c92011-07-24 10:48:23 +02001309 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001310
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001311 /* If the basics of the memslot do not change, we do not want
1312 * to update the gmap. Every update causes several unnecessary
1313 * segment translation exceptions. This is usually handled just
1314 * fine by the normal fault handler + gmap, but it will also
1315 * cause faults on the prefix page of running guest CPUs.
1316 */
1317 if (old->userspace_addr == mem->userspace_addr &&
1318 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1319 old->npages * PAGE_SIZE == mem->memory_size)
1320 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001321
1322 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1323 mem->guest_phys_addr, mem->memory_size);
1324 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001325 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001326 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001327}
1328
Marcelo Tosatti2df72e92012-08-24 15:54:57 -03001329void kvm_arch_flush_shadow_all(struct kvm *kvm)
1330{
1331}
1332
1333void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1334 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001335{
1336}
1337
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001338static int __init kvm_s390_init(void)
1339{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001340 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001341 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001342 if (ret)
1343 return ret;
1344
1345 /*
1346 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001347 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001348 * only set facilities that are known to work in KVM.
1349 */
Michael Mueller78c4b592013-07-26 15:04:04 +02001350 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1351 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001352 kvm_exit();
1353 return -ENOMEM;
1354 }
Michael Mueller78c4b592013-07-26 15:04:04 +02001355 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Thomas Huthd208c792013-12-12 13:40:40 +01001356 vfacilities[0] &= 0xff82fff3f4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001357 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001358 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001359}
1360
1361static void __exit kvm_s390_exit(void)
1362{
Michael Mueller78c4b592013-07-26 15:04:04 +02001363 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001364 kvm_exit();
1365}
1366
1367module_init(kvm_s390_init);
1368module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001369
1370/*
1371 * Enable autoloading of the kvm module.
1372 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1373 * since x86 takes a different approach.
1374 */
1375#include <linux/miscdevice.h>
1376MODULE_ALIAS_MISCDEV(KVM_MINOR);
1377MODULE_ALIAS("devname:kvm");