blob: 2e6fbb0b4f681393ad6f97240e7e7c12e122e0f5 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010014 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020019#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010020#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010025#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010026#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010027#include <asm/lowcore.h>
28#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010029#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010030#include <asm/switch_to.h>
Michael Mueller78c4b592013-07-26 15:04:04 +020031#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020032#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010033#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010034#include "gaccess.h"
35
Cornelia Huck5786fff2012-07-23 17:20:29 +020036#define CREATE_TRACE_POINTS
37#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020038#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020039
Heiko Carstensb0c632d2008-03-25 18:47:20 +010040#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020044 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010045 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010049 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020052 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010053 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020055 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010056 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020063 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010064 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020073 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010074 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010075 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020076 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010077 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010082 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010083 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020084 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010085 { NULL }
86};
87
Michael Mueller78c4b592013-07-26 15:04:04 +020088unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020089static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010090
Michael Mueller78c4b592013-07-26 15:04:04 +020091/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
Heiko Carstensb0c632d2008-03-25 18:47:20 +010097/* Section: not file related */
Alexander Graf10474ae2009-09-15 11:37:46 +020098int kvm_arch_hardware_enable(void *garbage)
Heiko Carstensb0c632d2008-03-25 18:47:20 +010099{
100 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200101 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100110int kvm_arch_hardware_setup(void)
111{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200119 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100146 int r;
147
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200148 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100149 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200150 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100151 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200155 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100156 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200157 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100158 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100159 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100160 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200161 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200162 case KVM_CAP_ENABLE_CAP_VM:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100163 r = 1;
164 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200165 case KVM_CAP_NR_VCPUS:
166 case KVM_CAP_MAX_VCPUS:
167 r = KVM_MAX_VCPUS;
168 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100169 case KVM_CAP_NR_MEMSLOTS:
170 r = KVM_USER_MEM_SLOTS;
171 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200172 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100173 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200174 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200175 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100176 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200177 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100178 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100179}
180
181/* Section: vm related */
182/*
183 * Get (and clear) the dirty memory log for a memory slot.
184 */
185int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
186 struct kvm_dirty_log *log)
187{
188 return 0;
189}
190
Cornelia Huckd938dc52013-10-23 18:26:34 +0200191static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
192{
193 int r;
194
195 if (cap->flags)
196 return -EINVAL;
197
198 switch (cap->cap) {
199 default:
200 r = -EINVAL;
201 break;
202 }
203 return r;
204}
205
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100206long kvm_arch_vm_ioctl(struct file *filp,
207 unsigned int ioctl, unsigned long arg)
208{
209 struct kvm *kvm = filp->private_data;
210 void __user *argp = (void __user *)arg;
211 int r;
212
213 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100214 case KVM_S390_INTERRUPT: {
215 struct kvm_s390_interrupt s390int;
216
217 r = -EFAULT;
218 if (copy_from_user(&s390int, argp, sizeof(s390int)))
219 break;
220 r = kvm_s390_inject_vm(kvm, &s390int);
221 break;
222 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200223 case KVM_ENABLE_CAP: {
224 struct kvm_enable_cap cap;
225 r = -EFAULT;
226 if (copy_from_user(&cap, argp, sizeof(cap)))
227 break;
228 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
229 break;
230 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100231 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300232 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100233 }
234
235 return r;
236}
237
Carsten Ottee08b9632012-01-04 10:25:20 +0100238int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100239{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100240 int rc;
241 char debug_name[16];
242
Carsten Ottee08b9632012-01-04 10:25:20 +0100243 rc = -EINVAL;
244#ifdef CONFIG_KVM_S390_UCONTROL
245 if (type & ~KVM_VM_S390_UCONTROL)
246 goto out_err;
247 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
248 goto out_err;
249#else
250 if (type)
251 goto out_err;
252#endif
253
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100254 rc = s390_enable_sie();
255 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100256 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100257
Carsten Otteb2904112011-10-18 12:27:13 +0200258 rc = -ENOMEM;
259
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100260 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
261 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100262 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100263
264 sprintf(debug_name, "kvm-%u", current->pid);
265
266 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
267 if (!kvm->arch.dbf)
268 goto out_nodbf;
269
Carsten Otteba5c1e92008-03-25 18:47:26 +0100270 spin_lock_init(&kvm->arch.float_int.lock);
271 INIT_LIST_HEAD(&kvm->arch.float_int.list);
272
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100273 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
274 VM_EVENT(kvm, 3, "%s", "vm created");
275
Carsten Ottee08b9632012-01-04 10:25:20 +0100276 if (type & KVM_VM_S390_UCONTROL) {
277 kvm->arch.gmap = NULL;
278 } else {
279 kvm->arch.gmap = gmap_alloc(current->mm);
280 if (!kvm->arch.gmap)
281 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200282 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200283 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100284 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100285
286 kvm->arch.css_support = 0;
287
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100288 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200289out_nogmap:
290 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100291out_nodbf:
292 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100293out_err:
294 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100295}
296
Christian Borntraegerd329c032008-11-26 14:50:27 +0100297void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
298{
299 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200300 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200301 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100302 if (!kvm_is_ucontrol(vcpu->kvm)) {
303 clear_bit(63 - vcpu->vcpu_id,
304 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
305 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
306 (__u64) vcpu->arch.sie_block)
307 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
308 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200309 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100310
311 if (kvm_is_ucontrol(vcpu->kvm))
312 gmap_free(vcpu->arch.gmap);
313
Christian Borntraegerd329c032008-11-26 14:50:27 +0100314 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100315 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200316 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100317}
318
319static void kvm_free_vcpus(struct kvm *kvm)
320{
321 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300322 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100323
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300324 kvm_for_each_vcpu(i, vcpu, kvm)
325 kvm_arch_vcpu_destroy(vcpu);
326
327 mutex_lock(&kvm->lock);
328 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
329 kvm->vcpus[i] = NULL;
330
331 atomic_set(&kvm->online_vcpus, 0);
332 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100333}
334
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800335void kvm_arch_sync_events(struct kvm *kvm)
336{
337}
338
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100339void kvm_arch_destroy_vm(struct kvm *kvm)
340{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100341 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100342 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100343 debug_unregister(kvm->arch.dbf);
Carsten Otte27e03932012-01-04 10:25:21 +0100344 if (!kvm_is_ucontrol(kvm))
345 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200346 kvm_s390_destroy_adapters(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100347}
348
349/* Section: vcpu related */
350int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
351{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200352 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
353 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100354 if (kvm_is_ucontrol(vcpu->kvm)) {
355 vcpu->arch.gmap = gmap_alloc(current->mm);
356 if (!vcpu->arch.gmap)
357 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200358 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100359 return 0;
360 }
361
Carsten Otte598841c2011-07-24 10:48:21 +0200362 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100363 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
364 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100365 KVM_SYNC_ACRS |
366 KVM_SYNC_CRS;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100367 return 0;
368}
369
370void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
371{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100372 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100373}
374
375void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
376{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200377 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
378 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100379 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200380 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
381 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100382 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200383 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100384 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100385}
386
387void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
388{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100389 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200390 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200391 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
392 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100393 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200394 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
395 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100396 restore_access_regs(vcpu->arch.host_acrs);
397}
398
399static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
400{
401 /* this equals initial cpu reset in pop, but we don't switch to ESA */
402 vcpu->arch.sie_block->gpsw.mask = 0UL;
403 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100404 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100405 vcpu->arch.sie_block->cputm = 0UL;
406 vcpu->arch.sie_block->ckc = 0UL;
407 vcpu->arch.sie_block->todpr = 0;
408 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
409 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
410 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
411 vcpu->arch.guest_fpregs.fpc = 0;
412 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
413 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100414 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200415 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
416 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger61bde822012-06-11 16:06:57 +0200417 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100418}
419
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200420int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
421{
422 return 0;
423}
424
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100425int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
426{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100427 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
428 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200429 CPUSTAT_STOPPED |
430 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200431 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200432 if (test_vfacility(50) && test_vfacility(73))
433 vcpu->arch.sie_block->ecb |= 0x10;
434
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200435 vcpu->arch.sie_block->ecb2 = 8;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100436 vcpu->arch.sie_block->eca = 0xC1002001U;
Michael Mueller78c4b592013-07-26 15:04:04 +0200437 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Christian Borntraegerca872302009-05-12 17:21:49 +0200438 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
439 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
440 (unsigned long) vcpu);
441 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100442 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100443 vcpu->arch.cpu_id.version = 0xff;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100444 return 0;
445}
446
447struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
448 unsigned int id)
449{
Carsten Otte4d475552011-10-18 12:27:12 +0200450 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200451 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200452 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100453
Carsten Otte4d475552011-10-18 12:27:12 +0200454 if (id >= KVM_MAX_VCPUS)
455 goto out;
456
457 rc = -ENOMEM;
458
Michael Muellerb110fea2013-06-12 13:54:54 +0200459 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100460 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200461 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100462
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200463 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
464 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100465 goto out_free_cpu;
466
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200467 vcpu->arch.sie_block = &sie_page->sie_block;
468 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
469
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100470 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100471 if (!kvm_is_ucontrol(kvm)) {
472 if (!kvm->arch.sca) {
473 WARN_ON_ONCE(1);
474 goto out_free_cpu;
475 }
476 if (!kvm->arch.sca->cpu[id].sda)
477 kvm->arch.sca->cpu[id].sda =
478 (__u64) vcpu->arch.sie_block;
479 vcpu->arch.sie_block->scaoh =
480 (__u32)(((__u64)kvm->arch.sca) >> 32);
481 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
482 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
483 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100484
Carsten Otteba5c1e92008-03-25 18:47:26 +0100485 spin_lock_init(&vcpu->arch.local_int.lock);
486 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
487 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200488 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100489 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100490
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100491 rc = kvm_vcpu_init(vcpu, kvm, id);
492 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800493 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100494 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
495 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200496 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100497
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100498 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800499out_free_sie_block:
500 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100501out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200502 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200503out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100504 return ERR_PTR(rc);
505}
506
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100507int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
508{
Michael Muellerf87618e2014-02-26 16:14:17 +0100509 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100510}
511
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200512void s390_vcpu_block(struct kvm_vcpu *vcpu)
513{
514 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
515}
516
517void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
518{
519 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
520}
521
522/*
523 * Kick a guest cpu out of SIE and wait until SIE is not running.
524 * If the CPU is not running (e.g. waiting as idle) the function will
525 * return immediately. */
526void exit_sie(struct kvm_vcpu *vcpu)
527{
528 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
529 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
530 cpu_relax();
531}
532
533/* Kick a guest cpu out of SIE and prevent SIE-reentry */
534void exit_sie_sync(struct kvm_vcpu *vcpu)
535{
536 s390_vcpu_block(vcpu);
537 exit_sie(vcpu);
538}
539
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200540static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
541{
542 int i;
543 struct kvm *kvm = gmap->private;
544 struct kvm_vcpu *vcpu;
545
546 kvm_for_each_vcpu(i, vcpu, kvm) {
547 /* match against both prefix pages */
548 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
549 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
550 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
551 exit_sie_sync(vcpu);
552 }
553 }
554}
555
Christoffer Dallb6d33832012-03-08 16:44:24 -0500556int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
557{
558 /* kvm common code refers to this, but never calls it */
559 BUG();
560 return 0;
561}
562
Carsten Otte14eebd92012-05-15 14:15:26 +0200563static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
564 struct kvm_one_reg *reg)
565{
566 int r = -EINVAL;
567
568 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200569 case KVM_REG_S390_TODPR:
570 r = put_user(vcpu->arch.sie_block->todpr,
571 (u32 __user *)reg->addr);
572 break;
573 case KVM_REG_S390_EPOCHDIFF:
574 r = put_user(vcpu->arch.sie_block->epoch,
575 (u64 __user *)reg->addr);
576 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200577 case KVM_REG_S390_CPU_TIMER:
578 r = put_user(vcpu->arch.sie_block->cputm,
579 (u64 __user *)reg->addr);
580 break;
581 case KVM_REG_S390_CLOCK_COMP:
582 r = put_user(vcpu->arch.sie_block->ckc,
583 (u64 __user *)reg->addr);
584 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200585 case KVM_REG_S390_PFTOKEN:
586 r = put_user(vcpu->arch.pfault_token,
587 (u64 __user *)reg->addr);
588 break;
589 case KVM_REG_S390_PFCOMPARE:
590 r = put_user(vcpu->arch.pfault_compare,
591 (u64 __user *)reg->addr);
592 break;
593 case KVM_REG_S390_PFSELECT:
594 r = put_user(vcpu->arch.pfault_select,
595 (u64 __user *)reg->addr);
596 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100597 case KVM_REG_S390_PP:
598 r = put_user(vcpu->arch.sie_block->pp,
599 (u64 __user *)reg->addr);
600 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100601 case KVM_REG_S390_GBEA:
602 r = put_user(vcpu->arch.sie_block->gbea,
603 (u64 __user *)reg->addr);
604 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200605 default:
606 break;
607 }
608
609 return r;
610}
611
612static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
613 struct kvm_one_reg *reg)
614{
615 int r = -EINVAL;
616
617 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200618 case KVM_REG_S390_TODPR:
619 r = get_user(vcpu->arch.sie_block->todpr,
620 (u32 __user *)reg->addr);
621 break;
622 case KVM_REG_S390_EPOCHDIFF:
623 r = get_user(vcpu->arch.sie_block->epoch,
624 (u64 __user *)reg->addr);
625 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200626 case KVM_REG_S390_CPU_TIMER:
627 r = get_user(vcpu->arch.sie_block->cputm,
628 (u64 __user *)reg->addr);
629 break;
630 case KVM_REG_S390_CLOCK_COMP:
631 r = get_user(vcpu->arch.sie_block->ckc,
632 (u64 __user *)reg->addr);
633 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200634 case KVM_REG_S390_PFTOKEN:
635 r = get_user(vcpu->arch.pfault_token,
636 (u64 __user *)reg->addr);
637 break;
638 case KVM_REG_S390_PFCOMPARE:
639 r = get_user(vcpu->arch.pfault_compare,
640 (u64 __user *)reg->addr);
641 break;
642 case KVM_REG_S390_PFSELECT:
643 r = get_user(vcpu->arch.pfault_select,
644 (u64 __user *)reg->addr);
645 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100646 case KVM_REG_S390_PP:
647 r = get_user(vcpu->arch.sie_block->pp,
648 (u64 __user *)reg->addr);
649 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100650 case KVM_REG_S390_GBEA:
651 r = get_user(vcpu->arch.sie_block->gbea,
652 (u64 __user *)reg->addr);
653 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200654 default:
655 break;
656 }
657
658 return r;
659}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500660
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100661static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
662{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100663 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100664 return 0;
665}
666
667int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
668{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100669 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100670 return 0;
671}
672
673int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
674{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100675 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100676 return 0;
677}
678
679int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
680 struct kvm_sregs *sregs)
681{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100682 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100683 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100684 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100685 return 0;
686}
687
688int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
689 struct kvm_sregs *sregs)
690{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100691 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100692 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100693 return 0;
694}
695
696int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
697{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200698 if (test_fp_ctl(fpu->fpc))
699 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100700 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200701 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
702 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
703 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100704 return 0;
705}
706
707int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
708{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100709 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
710 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100711 return 0;
712}
713
714static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
715{
716 int rc = 0;
717
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100718 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100719 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100720 else {
721 vcpu->run->psw_mask = psw.mask;
722 vcpu->run->psw_addr = psw.addr;
723 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100724 return rc;
725}
726
727int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
728 struct kvm_translation *tr)
729{
730 return -EINVAL; /* not implemented yet */
731}
732
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100733int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
734 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100735{
736 return -EINVAL; /* not implemented yet */
737}
738
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300739int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
740 struct kvm_mp_state *mp_state)
741{
742 return -EINVAL; /* not implemented yet */
743}
744
745int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
746 struct kvm_mp_state *mp_state)
747{
748 return -EINVAL; /* not implemented yet */
749}
750
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200751static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
752{
753 /*
754 * We use MMU_RELOAD just to re-arm the ipte notifier for the
755 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
756 * This ensures that the ipte instruction for this request has
757 * already finished. We might race against a second unmapper that
758 * wants to set the blocking bit. Lets just retry the request loop.
759 */
760 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
761 int rc;
762 rc = gmap_ipte_notify(vcpu->arch.gmap,
763 vcpu->arch.sie_block->prefix,
764 PAGE_SIZE * 2);
765 if (rc)
766 return rc;
767 s390_vcpu_unblock(vcpu);
768 }
769 return 0;
770}
771
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200772static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
773{
774 long rc;
775 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
776 struct mm_struct *mm = current->mm;
777 down_read(&mm->mmap_sem);
778 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
779 up_read(&mm->mmap_sem);
780 return rc;
781}
782
Dominik Dingel3c038e62013-10-07 17:11:48 +0200783static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
784 unsigned long token)
785{
786 struct kvm_s390_interrupt inti;
787 inti.parm64 = token;
788
789 if (start_token) {
790 inti.type = KVM_S390_INT_PFAULT_INIT;
791 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
792 } else {
793 inti.type = KVM_S390_INT_PFAULT_DONE;
794 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
795 }
796}
797
798void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
799 struct kvm_async_pf *work)
800{
801 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
802 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
803}
804
805void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
806 struct kvm_async_pf *work)
807{
808 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
809 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
810}
811
812void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
813 struct kvm_async_pf *work)
814{
815 /* s390 will always inject the page directly */
816}
817
818bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
819{
820 /*
821 * s390 will always inject the page directly,
822 * but we still want check_async_completion to cleanup
823 */
824 return true;
825}
826
827static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
828{
829 hva_t hva;
830 struct kvm_arch_async_pf arch;
831 int rc;
832
833 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
834 return 0;
835 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
836 vcpu->arch.pfault_compare)
837 return 0;
838 if (psw_extint_disabled(vcpu))
839 return 0;
840 if (kvm_cpu_has_interrupt(vcpu))
841 return 0;
842 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
843 return 0;
844 if (!vcpu->arch.gmap->pfault_enabled)
845 return 0;
846
847 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
848 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
849 return 0;
850
851 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
852 return rc;
853}
854
Thomas Huth3fb4c402013-09-12 10:33:43 +0200855static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100856{
Thomas Huth3fb4c402013-09-12 10:33:43 +0200857 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +0100858
Dominik Dingel3c038e62013-10-07 17:11:48 +0200859 /*
860 * On s390 notifications for arriving pages will be delivered directly
861 * to the guest but the house keeping for completed pfaults is
862 * handled outside the worker.
863 */
864 kvm_check_async_pf_completion(vcpu);
865
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100866 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100867
868 if (need_resched())
869 schedule();
870
Christian Borntraeger71cde582008-05-21 13:37:34 +0200871 if (test_thread_flag(TIF_MCCK_PENDING))
872 s390_handle_mcck();
873
Carsten Otted6b6d162012-01-04 10:25:25 +0100874 if (!kvm_is_ucontrol(vcpu->kvm))
875 kvm_s390_deliver_pending_interrupts(vcpu);
Carsten Otte0ff31862008-05-21 13:37:37 +0200876
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200877 rc = kvm_s390_handle_requests(vcpu);
878 if (rc)
879 return rc;
880
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100881 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +0200882 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
883 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
884 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +0200885
Thomas Huth3fb4c402013-09-12 10:33:43 +0200886 return 0;
887}
888
889static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
890{
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200891 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +0200892
893 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
894 vcpu->arch.sie_block->icptcode);
895 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
896
Thomas Huth3fb4c402013-09-12 10:33:43 +0200897 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +0200898 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +0200899 } else if (kvm_is_ucontrol(vcpu->kvm)) {
900 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
901 vcpu->run->s390_ucontrol.trans_exc_code =
902 current->thread.gmap_addr;
903 vcpu->run->s390_ucontrol.pgm_code = 0x10;
904 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200905
906 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +0200907 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200908 current->thread.gmap_pfault = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200909 if (kvm_arch_setup_async_pf(vcpu) ||
910 (kvm_arch_fault_in_sync(vcpu) >= 0))
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200911 rc = 0;
912 }
913
914 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +0100915 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
916 trace_kvm_s390_sie_fault(vcpu);
917 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +0200918 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100919
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100920 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200921
Thomas Hutha76ccff2013-09-12 10:33:44 +0200922 if (rc == 0) {
923 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +0100924 /* Don't exit for host interrupts. */
925 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +0200926 else
927 rc = kvm_handle_sie_intercept(vcpu);
928 }
929
Thomas Huth3fb4c402013-09-12 10:33:43 +0200930 return rc;
931}
932
933static int __vcpu_run(struct kvm_vcpu *vcpu)
934{
935 int rc, exit_reason;
936
Thomas Huth800c1062013-09-12 10:33:45 +0200937 /*
938 * We try to hold kvm->srcu during most of vcpu_run (except when run-
939 * ning the guest), so that memslots (and other stuff) are protected
940 */
941 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
942
Thomas Hutha76ccff2013-09-12 10:33:44 +0200943 do {
944 rc = vcpu_pre_run(vcpu);
945 if (rc)
946 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +0200947
Thomas Huth800c1062013-09-12 10:33:45 +0200948 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +0200949 /*
950 * As PF_VCPU will be used in fault handler, between
951 * guest_enter and guest_exit should be no uaccess.
952 */
953 preempt_disable();
954 kvm_guest_enter();
955 preempt_enable();
956 exit_reason = sie64a(vcpu->arch.sie_block,
957 vcpu->run->s.regs.gprs);
958 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +0200959 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200960
Thomas Hutha76ccff2013-09-12 10:33:44 +0200961 rc = vcpu_post_run(vcpu, exit_reason);
962 } while (!signal_pending(current) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200963
Thomas Huth800c1062013-09-12 10:33:45 +0200964 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +0100965 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100966}
967
968int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
969{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100970 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100971 sigset_t sigsaved;
972
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100973 if (vcpu->sigset_active)
974 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
975
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100976 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100977
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100978 switch (kvm_run->exit_reason) {
979 case KVM_EXIT_S390_SIEIC:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100980 case KVM_EXIT_UNKNOWN:
Christian Ehrhardt9ace9032009-05-20 15:34:55 +0200981 case KVM_EXIT_INTR:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100982 case KVM_EXIT_S390_RESET:
Carsten Ottee168bf82012-01-04 10:25:22 +0100983 case KVM_EXIT_S390_UCONTROL:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100984 case KVM_EXIT_S390_TSCH:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100985 break;
986 default:
987 BUG();
988 }
989
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100990 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
991 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100992 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
993 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
994 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
995 }
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100996 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
997 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
998 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
999 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1000 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001001
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001002 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001003 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001004
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001005 if (signal_pending(current) && !rc) {
1006 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001007 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001008 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001009
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001010 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001011 /* intercept cannot be handled in-kernel, prepare kvm-run */
1012 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1013 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001014 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1015 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1016 rc = 0;
1017 }
1018
1019 if (rc == -EREMOTE) {
1020 /* intercept was handled, but userspace support is needed
1021 * kvm_run has been prepared by the handler */
1022 rc = 0;
1023 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001024
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001025 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1026 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +01001027 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001028 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001029
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001030 if (vcpu->sigset_active)
1031 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1032
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001033 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001034 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001035}
1036
Carsten Otte092670c2011-07-24 10:48:22 +02001037static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001038 unsigned long n, int prefix)
1039{
1040 if (prefix)
1041 return copy_to_guest(vcpu, guestdest, from, n);
1042 else
1043 return copy_to_guest_absolute(vcpu, guestdest, from, n);
1044}
1045
1046/*
1047 * store status at address
1048 * we use have two special cases:
1049 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1050 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1051 */
Thomas Huthe8798922013-11-06 15:46:33 +01001052int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001053{
Carsten Otte092670c2011-07-24 10:48:22 +02001054 unsigned char archmode = 1;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001055 int prefix;
Thomas Huth178bd782013-11-13 20:28:18 +01001056 u64 clkcomp;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001057
1058 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
1059 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1060 return -EFAULT;
1061 addr = SAVE_AREA_BASE;
1062 prefix = 0;
1063 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1064 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1065 return -EFAULT;
1066 addr = SAVE_AREA_BASE;
1067 prefix = 1;
1068 } else
1069 prefix = 0;
1070
Heiko Carstensf64ca212010-02-26 22:37:32 +01001071 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001072 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1073 return -EFAULT;
1074
Heiko Carstensf64ca212010-02-26 22:37:32 +01001075 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001076 vcpu->run->s.regs.gprs, 128, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001077 return -EFAULT;
1078
Heiko Carstensf64ca212010-02-26 22:37:32 +01001079 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001080 &vcpu->arch.sie_block->gpsw, 16, prefix))
1081 return -EFAULT;
1082
Heiko Carstensf64ca212010-02-26 22:37:32 +01001083 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001084 &vcpu->arch.sie_block->prefix, 4, prefix))
1085 return -EFAULT;
1086
1087 if (__guestcopy(vcpu,
Heiko Carstensf64ca212010-02-26 22:37:32 +01001088 addr + offsetof(struct save_area, fp_ctrl_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001089 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1090 return -EFAULT;
1091
Heiko Carstensf64ca212010-02-26 22:37:32 +01001092 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001093 &vcpu->arch.sie_block->todpr, 4, prefix))
1094 return -EFAULT;
1095
Heiko Carstensf64ca212010-02-26 22:37:32 +01001096 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001097 &vcpu->arch.sie_block->cputm, 8, prefix))
1098 return -EFAULT;
1099
Thomas Huth178bd782013-11-13 20:28:18 +01001100 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensf64ca212010-02-26 22:37:32 +01001101 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
Thomas Huth178bd782013-11-13 20:28:18 +01001102 &clkcomp, 8, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001103 return -EFAULT;
1104
Heiko Carstensf64ca212010-02-26 22:37:32 +01001105 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
Christian Borntraeger59674c12012-01-11 11:20:33 +01001106 &vcpu->run->s.regs.acrs, 64, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001107 return -EFAULT;
1108
1109 if (__guestcopy(vcpu,
Heiko Carstensf64ca212010-02-26 22:37:32 +01001110 addr + offsetof(struct save_area, ctrl_regs),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001111 &vcpu->arch.sie_block->gcr, 128, prefix))
1112 return -EFAULT;
1113 return 0;
1114}
1115
Thomas Huthe8798922013-11-06 15:46:33 +01001116int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1117{
1118 /*
1119 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1120 * copying in vcpu load/put. Lets update our copies before we save
1121 * it into the save area
1122 */
1123 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1124 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1125 save_access_regs(vcpu->run->s.regs.acrs);
1126
1127 return kvm_s390_store_status_unloaded(vcpu, addr);
1128}
1129
Cornelia Huckd6712df2012-12-20 15:32:11 +01001130static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1131 struct kvm_enable_cap *cap)
1132{
1133 int r;
1134
1135 if (cap->flags)
1136 return -EINVAL;
1137
1138 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001139 case KVM_CAP_S390_CSS_SUPPORT:
1140 if (!vcpu->kvm->arch.css_support) {
1141 vcpu->kvm->arch.css_support = 1;
1142 trace_kvm_s390_enable_css(vcpu->kvm);
1143 }
1144 r = 0;
1145 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001146 default:
1147 r = -EINVAL;
1148 break;
1149 }
1150 return r;
1151}
1152
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001153long kvm_arch_vcpu_ioctl(struct file *filp,
1154 unsigned int ioctl, unsigned long arg)
1155{
1156 struct kvm_vcpu *vcpu = filp->private_data;
1157 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001158 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001159 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001160
Avi Kivity93736622010-05-13 12:35:17 +03001161 switch (ioctl) {
1162 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001163 struct kvm_s390_interrupt s390int;
1164
Avi Kivity93736622010-05-13 12:35:17 +03001165 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001166 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03001167 break;
1168 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1169 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001170 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001171 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001172 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001173 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001174 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001175 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001176 case KVM_S390_SET_INITIAL_PSW: {
1177 psw_t psw;
1178
Avi Kivitybc923cc2010-05-13 12:21:46 +03001179 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001180 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001181 break;
1182 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1183 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001184 }
1185 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001186 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1187 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001188 case KVM_SET_ONE_REG:
1189 case KVM_GET_ONE_REG: {
1190 struct kvm_one_reg reg;
1191 r = -EFAULT;
1192 if (copy_from_user(&reg, argp, sizeof(reg)))
1193 break;
1194 if (ioctl == KVM_SET_ONE_REG)
1195 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1196 else
1197 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1198 break;
1199 }
Carsten Otte27e03932012-01-04 10:25:21 +01001200#ifdef CONFIG_KVM_S390_UCONTROL
1201 case KVM_S390_UCAS_MAP: {
1202 struct kvm_s390_ucas_mapping ucasmap;
1203
1204 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1205 r = -EFAULT;
1206 break;
1207 }
1208
1209 if (!kvm_is_ucontrol(vcpu->kvm)) {
1210 r = -EINVAL;
1211 break;
1212 }
1213
1214 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1215 ucasmap.vcpu_addr, ucasmap.length);
1216 break;
1217 }
1218 case KVM_S390_UCAS_UNMAP: {
1219 struct kvm_s390_ucas_mapping ucasmap;
1220
1221 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1222 r = -EFAULT;
1223 break;
1224 }
1225
1226 if (!kvm_is_ucontrol(vcpu->kvm)) {
1227 r = -EINVAL;
1228 break;
1229 }
1230
1231 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1232 ucasmap.length);
1233 break;
1234 }
1235#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001236 case KVM_S390_VCPU_FAULT: {
1237 r = gmap_fault(arg, vcpu->arch.gmap);
1238 if (!IS_ERR_VALUE(r))
1239 r = 0;
1240 break;
1241 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001242 case KVM_ENABLE_CAP:
1243 {
1244 struct kvm_enable_cap cap;
1245 r = -EFAULT;
1246 if (copy_from_user(&cap, argp, sizeof(cap)))
1247 break;
1248 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1249 break;
1250 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001251 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001252 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001253 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001254 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001255}
1256
Carsten Otte5b1c1492012-01-04 10:25:23 +01001257int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1258{
1259#ifdef CONFIG_KVM_S390_UCONTROL
1260 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1261 && (kvm_is_ucontrol(vcpu->kvm))) {
1262 vmf->page = virt_to_page(vcpu->arch.sie_block);
1263 get_page(vmf->page);
1264 return 0;
1265 }
1266#endif
1267 return VM_FAULT_SIGBUS;
1268}
1269
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301270void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001271 struct kvm_memory_slot *dont)
1272{
1273}
1274
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301275int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1276 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001277{
1278 return 0;
1279}
1280
Takuya Yoshikawae59dbe02013-07-04 13:40:29 +09001281void kvm_arch_memslots_updated(struct kvm *kvm)
1282{
1283}
1284
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001285/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001286int kvm_arch_prepare_memory_region(struct kvm *kvm,
1287 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001288 struct kvm_userspace_memory_region *mem,
1289 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001290{
Nick Wangdd2887e2013-03-25 17:22:57 +01001291 /* A few sanity checks. We can have memory slots which have to be
1292 located/ended at a segment boundary (1MB). The memory in userland is
1293 ok to be fragmented into various different vmas. It is okay to mmap()
1294 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001295
Carsten Otte598841c2011-07-24 10:48:21 +02001296 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001297 return -EINVAL;
1298
Carsten Otte598841c2011-07-24 10:48:21 +02001299 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001300 return -EINVAL;
1301
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001302 return 0;
1303}
1304
1305void kvm_arch_commit_memory_region(struct kvm *kvm,
1306 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001307 const struct kvm_memory_slot *old,
1308 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001309{
Carsten Ottef7850c92011-07-24 10:48:23 +02001310 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001311
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001312 /* If the basics of the memslot do not change, we do not want
1313 * to update the gmap. Every update causes several unnecessary
1314 * segment translation exceptions. This is usually handled just
1315 * fine by the normal fault handler + gmap, but it will also
1316 * cause faults on the prefix page of running guest CPUs.
1317 */
1318 if (old->userspace_addr == mem->userspace_addr &&
1319 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1320 old->npages * PAGE_SIZE == mem->memory_size)
1321 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001322
1323 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1324 mem->guest_phys_addr, mem->memory_size);
1325 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001326 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001327 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001328}
1329
Marcelo Tosatti2df72e92012-08-24 15:54:57 -03001330void kvm_arch_flush_shadow_all(struct kvm *kvm)
1331{
1332}
1333
1334void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1335 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001336{
1337}
1338
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001339static int __init kvm_s390_init(void)
1340{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001341 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001342 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001343 if (ret)
1344 return ret;
1345
1346 /*
1347 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001348 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001349 * only set facilities that are known to work in KVM.
1350 */
Michael Mueller78c4b592013-07-26 15:04:04 +02001351 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1352 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001353 kvm_exit();
1354 return -ENOMEM;
1355 }
Michael Mueller78c4b592013-07-26 15:04:04 +02001356 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Thomas Huthd208c792013-12-12 13:40:40 +01001357 vfacilities[0] &= 0xff82fff3f4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001358 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001359 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001360}
1361
1362static void __exit kvm_s390_exit(void)
1363{
Michael Mueller78c4b592013-07-26 15:04:04 +02001364 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001365 kvm_exit();
1366}
1367
1368module_init(kvm_s390_init);
1369module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001370
1371/*
1372 * Enable autoloading of the kvm module.
1373 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1374 * since x86 takes a different approach.
1375 */
1376#include <linux/miscdevice.h>
1377MODULE_ALIAS_MISCDEV(KVM_MINOR);
1378MODULE_ALIAS("devname:kvm");