blob: 83b79447de5528171c9e86c7ec61c21e7f8310d5 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010014 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020019#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010020#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010025#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010026#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010027#include <asm/lowcore.h>
28#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010029#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010030#include <asm/switch_to.h>
Michael Mueller78c4b592013-07-26 15:04:04 +020031#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020032#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010033#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010034#include "gaccess.h"
35
Cornelia Huck5786fff2012-07-23 17:20:29 +020036#define CREATE_TRACE_POINTS
37#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020038#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020039
Heiko Carstensb0c632d2008-03-25 18:47:20 +010040#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020044 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010045 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010049 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020052 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010053 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020055 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010056 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020063 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010064 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020073 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010074 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010075 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020076 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010077 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010082 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010083 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020084 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010085 { NULL }
86};
87
Michael Mueller78c4b592013-07-26 15:04:04 +020088unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020089static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010090
Michael Mueller78c4b592013-07-26 15:04:04 +020091/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
Heiko Carstensb0c632d2008-03-25 18:47:20 +010097/* Section: not file related */
Alexander Graf10474ae2009-09-15 11:37:46 +020098int kvm_arch_hardware_enable(void *garbage)
Heiko Carstensb0c632d2008-03-25 18:47:20 +010099{
100 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200101 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100110int kvm_arch_hardware_setup(void)
111{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200119 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100146 int r;
147
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200148 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100149 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200150 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100151 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200155 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100156 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200157 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100158 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100159 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100160 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200161 case KVM_CAP_DEVICE_CTRL:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100162 r = 1;
163 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200164 case KVM_CAP_NR_VCPUS:
165 case KVM_CAP_MAX_VCPUS:
166 r = KVM_MAX_VCPUS;
167 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100168 case KVM_CAP_NR_MEMSLOTS:
169 r = KVM_USER_MEM_SLOTS;
170 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200171 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100172 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200173 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200174 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100175 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200176 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100177 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100178}
179
180/* Section: vm related */
181/*
182 * Get (and clear) the dirty memory log for a memory slot.
183 */
184int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
185 struct kvm_dirty_log *log)
186{
187 return 0;
188}
189
190long kvm_arch_vm_ioctl(struct file *filp,
191 unsigned int ioctl, unsigned long arg)
192{
193 struct kvm *kvm = filp->private_data;
194 void __user *argp = (void __user *)arg;
195 int r;
196
197 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100198 case KVM_S390_INTERRUPT: {
199 struct kvm_s390_interrupt s390int;
200
201 r = -EFAULT;
202 if (copy_from_user(&s390int, argp, sizeof(s390int)))
203 break;
204 r = kvm_s390_inject_vm(kvm, &s390int);
205 break;
206 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100207 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300208 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100209 }
210
211 return r;
212}
213
Carsten Ottee08b9632012-01-04 10:25:20 +0100214int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100215{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100216 int rc;
217 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100218 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100219
Carsten Ottee08b9632012-01-04 10:25:20 +0100220 rc = -EINVAL;
221#ifdef CONFIG_KVM_S390_UCONTROL
222 if (type & ~KVM_VM_S390_UCONTROL)
223 goto out_err;
224 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
225 goto out_err;
226#else
227 if (type)
228 goto out_err;
229#endif
230
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100231 rc = s390_enable_sie();
232 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100233 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100234
Carsten Otteb2904112011-10-18 12:27:13 +0200235 rc = -ENOMEM;
236
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100237 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
238 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100239 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100240 spin_lock(&kvm_lock);
241 sca_offset = (sca_offset + 16) & 0x7f0;
242 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
243 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100244
245 sprintf(debug_name, "kvm-%u", current->pid);
246
247 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
248 if (!kvm->arch.dbf)
249 goto out_nodbf;
250
Carsten Otteba5c1e92008-03-25 18:47:26 +0100251 spin_lock_init(&kvm->arch.float_int.lock);
252 INIT_LIST_HEAD(&kvm->arch.float_int.list);
253
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100254 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
255 VM_EVENT(kvm, 3, "%s", "vm created");
256
Carsten Ottee08b9632012-01-04 10:25:20 +0100257 if (type & KVM_VM_S390_UCONTROL) {
258 kvm->arch.gmap = NULL;
259 } else {
260 kvm->arch.gmap = gmap_alloc(current->mm);
261 if (!kvm->arch.gmap)
262 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200263 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200264 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100265 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100266
267 kvm->arch.css_support = 0;
268
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100269 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200270out_nogmap:
271 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100272out_nodbf:
273 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100274out_err:
275 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100276}
277
Christian Borntraegerd329c032008-11-26 14:50:27 +0100278void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
279{
280 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200281 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200282 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100283 if (!kvm_is_ucontrol(vcpu->kvm)) {
284 clear_bit(63 - vcpu->vcpu_id,
285 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
286 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
287 (__u64) vcpu->arch.sie_block)
288 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
289 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200290 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100291
292 if (kvm_is_ucontrol(vcpu->kvm))
293 gmap_free(vcpu->arch.gmap);
294
Christian Borntraegerd329c032008-11-26 14:50:27 +0100295 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100296 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200297 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100298}
299
300static void kvm_free_vcpus(struct kvm *kvm)
301{
302 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300303 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100304
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300305 kvm_for_each_vcpu(i, vcpu, kvm)
306 kvm_arch_vcpu_destroy(vcpu);
307
308 mutex_lock(&kvm->lock);
309 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
310 kvm->vcpus[i] = NULL;
311
312 atomic_set(&kvm->online_vcpus, 0);
313 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100314}
315
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800316void kvm_arch_sync_events(struct kvm *kvm)
317{
318}
319
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100320void kvm_arch_destroy_vm(struct kvm *kvm)
321{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100322 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100323 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100324 debug_unregister(kvm->arch.dbf);
Carsten Otte27e03932012-01-04 10:25:21 +0100325 if (!kvm_is_ucontrol(kvm))
326 gmap_free(kvm->arch.gmap);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100327}
328
329/* Section: vcpu related */
330int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
331{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200332 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
333 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100334 if (kvm_is_ucontrol(vcpu->kvm)) {
335 vcpu->arch.gmap = gmap_alloc(current->mm);
336 if (!vcpu->arch.gmap)
337 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200338 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100339 return 0;
340 }
341
Carsten Otte598841c2011-07-24 10:48:21 +0200342 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100343 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
344 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100345 KVM_SYNC_ACRS |
346 KVM_SYNC_CRS;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100347 return 0;
348}
349
350void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
351{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100352 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100353}
354
355void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
356{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200357 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
358 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100359 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200360 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
361 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100362 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200363 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100364 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100365}
366
367void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
368{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100369 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200370 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200371 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
372 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100373 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200374 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
375 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100376 restore_access_regs(vcpu->arch.host_acrs);
377}
378
379static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
380{
381 /* this equals initial cpu reset in pop, but we don't switch to ESA */
382 vcpu->arch.sie_block->gpsw.mask = 0UL;
383 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100384 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100385 vcpu->arch.sie_block->cputm = 0UL;
386 vcpu->arch.sie_block->ckc = 0UL;
387 vcpu->arch.sie_block->todpr = 0;
388 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
389 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
390 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
391 vcpu->arch.guest_fpregs.fpc = 0;
392 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
393 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100394 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200395 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
396 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger61bde822012-06-11 16:06:57 +0200397 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100398 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100399}
400
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200401int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
402{
403 return 0;
404}
405
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100406int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
407{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100408 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
409 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200410 CPUSTAT_STOPPED |
411 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200412 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200413 if (test_vfacility(50) && test_vfacility(73))
414 vcpu->arch.sie_block->ecb |= 0x10;
415
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200416 vcpu->arch.sie_block->ecb2 = 8;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100417 vcpu->arch.sie_block->eca = 0xC1002001U;
Michael Mueller78c4b592013-07-26 15:04:04 +0200418 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Christian Borntraegerca872302009-05-12 17:21:49 +0200419 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
420 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
421 (unsigned long) vcpu);
422 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100423 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100424 vcpu->arch.cpu_id.version = 0xff;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100425 return 0;
426}
427
428struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
429 unsigned int id)
430{
Carsten Otte4d475552011-10-18 12:27:12 +0200431 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200432 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200433 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100434
Carsten Otte4d475552011-10-18 12:27:12 +0200435 if (id >= KVM_MAX_VCPUS)
436 goto out;
437
438 rc = -ENOMEM;
439
Michael Muellerb110fea2013-06-12 13:54:54 +0200440 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100441 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200442 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100443
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200444 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
445 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100446 goto out_free_cpu;
447
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200448 vcpu->arch.sie_block = &sie_page->sie_block;
449 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
450
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100451 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100452 if (!kvm_is_ucontrol(kvm)) {
453 if (!kvm->arch.sca) {
454 WARN_ON_ONCE(1);
455 goto out_free_cpu;
456 }
457 if (!kvm->arch.sca->cpu[id].sda)
458 kvm->arch.sca->cpu[id].sda =
459 (__u64) vcpu->arch.sie_block;
460 vcpu->arch.sie_block->scaoh =
461 (__u32)(((__u64)kvm->arch.sca) >> 32);
462 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
463 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
464 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100465
Carsten Otteba5c1e92008-03-25 18:47:26 +0100466 spin_lock_init(&vcpu->arch.local_int.lock);
467 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
468 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200469 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100470 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100471
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100472 rc = kvm_vcpu_init(vcpu, kvm, id);
473 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800474 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100475 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
476 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200477 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100478
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100479 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800480out_free_sie_block:
481 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100482out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200483 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200484out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100485 return ERR_PTR(rc);
486}
487
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100488int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
489{
Michael Muellerf87618e2014-02-26 16:14:17 +0100490 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100491}
492
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200493void s390_vcpu_block(struct kvm_vcpu *vcpu)
494{
495 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
496}
497
498void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
499{
500 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
501}
502
503/*
504 * Kick a guest cpu out of SIE and wait until SIE is not running.
505 * If the CPU is not running (e.g. waiting as idle) the function will
506 * return immediately. */
507void exit_sie(struct kvm_vcpu *vcpu)
508{
509 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
510 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
511 cpu_relax();
512}
513
514/* Kick a guest cpu out of SIE and prevent SIE-reentry */
515void exit_sie_sync(struct kvm_vcpu *vcpu)
516{
517 s390_vcpu_block(vcpu);
518 exit_sie(vcpu);
519}
520
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200521static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
522{
523 int i;
524 struct kvm *kvm = gmap->private;
525 struct kvm_vcpu *vcpu;
526
527 kvm_for_each_vcpu(i, vcpu, kvm) {
528 /* match against both prefix pages */
529 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
530 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
531 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
532 exit_sie_sync(vcpu);
533 }
534 }
535}
536
Christoffer Dallb6d33832012-03-08 16:44:24 -0500537int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
538{
539 /* kvm common code refers to this, but never calls it */
540 BUG();
541 return 0;
542}
543
Carsten Otte14eebd92012-05-15 14:15:26 +0200544static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
545 struct kvm_one_reg *reg)
546{
547 int r = -EINVAL;
548
549 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200550 case KVM_REG_S390_TODPR:
551 r = put_user(vcpu->arch.sie_block->todpr,
552 (u32 __user *)reg->addr);
553 break;
554 case KVM_REG_S390_EPOCHDIFF:
555 r = put_user(vcpu->arch.sie_block->epoch,
556 (u64 __user *)reg->addr);
557 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200558 case KVM_REG_S390_CPU_TIMER:
559 r = put_user(vcpu->arch.sie_block->cputm,
560 (u64 __user *)reg->addr);
561 break;
562 case KVM_REG_S390_CLOCK_COMP:
563 r = put_user(vcpu->arch.sie_block->ckc,
564 (u64 __user *)reg->addr);
565 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200566 case KVM_REG_S390_PFTOKEN:
567 r = put_user(vcpu->arch.pfault_token,
568 (u64 __user *)reg->addr);
569 break;
570 case KVM_REG_S390_PFCOMPARE:
571 r = put_user(vcpu->arch.pfault_compare,
572 (u64 __user *)reg->addr);
573 break;
574 case KVM_REG_S390_PFSELECT:
575 r = put_user(vcpu->arch.pfault_select,
576 (u64 __user *)reg->addr);
577 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100578 case KVM_REG_S390_PP:
579 r = put_user(vcpu->arch.sie_block->pp,
580 (u64 __user *)reg->addr);
581 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100582 case KVM_REG_S390_GBEA:
583 r = put_user(vcpu->arch.sie_block->gbea,
584 (u64 __user *)reg->addr);
585 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200586 default:
587 break;
588 }
589
590 return r;
591}
592
593static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
594 struct kvm_one_reg *reg)
595{
596 int r = -EINVAL;
597
598 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200599 case KVM_REG_S390_TODPR:
600 r = get_user(vcpu->arch.sie_block->todpr,
601 (u32 __user *)reg->addr);
602 break;
603 case KVM_REG_S390_EPOCHDIFF:
604 r = get_user(vcpu->arch.sie_block->epoch,
605 (u64 __user *)reg->addr);
606 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200607 case KVM_REG_S390_CPU_TIMER:
608 r = get_user(vcpu->arch.sie_block->cputm,
609 (u64 __user *)reg->addr);
610 break;
611 case KVM_REG_S390_CLOCK_COMP:
612 r = get_user(vcpu->arch.sie_block->ckc,
613 (u64 __user *)reg->addr);
614 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200615 case KVM_REG_S390_PFTOKEN:
616 r = get_user(vcpu->arch.pfault_token,
617 (u64 __user *)reg->addr);
618 break;
619 case KVM_REG_S390_PFCOMPARE:
620 r = get_user(vcpu->arch.pfault_compare,
621 (u64 __user *)reg->addr);
622 break;
623 case KVM_REG_S390_PFSELECT:
624 r = get_user(vcpu->arch.pfault_select,
625 (u64 __user *)reg->addr);
626 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100627 case KVM_REG_S390_PP:
628 r = get_user(vcpu->arch.sie_block->pp,
629 (u64 __user *)reg->addr);
630 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100631 case KVM_REG_S390_GBEA:
632 r = get_user(vcpu->arch.sie_block->gbea,
633 (u64 __user *)reg->addr);
634 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200635 default:
636 break;
637 }
638
639 return r;
640}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500641
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100642static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
643{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100644 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100645 return 0;
646}
647
648int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
649{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100650 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100651 return 0;
652}
653
654int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
655{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100656 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100657 return 0;
658}
659
660int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
661 struct kvm_sregs *sregs)
662{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100663 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100664 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100665 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100666 return 0;
667}
668
669int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
670 struct kvm_sregs *sregs)
671{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100672 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100673 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100674 return 0;
675}
676
677int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
678{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200679 if (test_fp_ctl(fpu->fpc))
680 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100681 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200682 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
683 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
684 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100685 return 0;
686}
687
688int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
689{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100690 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
691 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100692 return 0;
693}
694
695static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
696{
697 int rc = 0;
698
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100699 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100700 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100701 else {
702 vcpu->run->psw_mask = psw.mask;
703 vcpu->run->psw_addr = psw.addr;
704 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100705 return rc;
706}
707
708int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
709 struct kvm_translation *tr)
710{
711 return -EINVAL; /* not implemented yet */
712}
713
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100714int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
715 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100716{
717 return -EINVAL; /* not implemented yet */
718}
719
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300720int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
721 struct kvm_mp_state *mp_state)
722{
723 return -EINVAL; /* not implemented yet */
724}
725
726int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
727 struct kvm_mp_state *mp_state)
728{
729 return -EINVAL; /* not implemented yet */
730}
731
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200732static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
733{
734 /*
735 * We use MMU_RELOAD just to re-arm the ipte notifier for the
736 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
737 * This ensures that the ipte instruction for this request has
738 * already finished. We might race against a second unmapper that
739 * wants to set the blocking bit. Lets just retry the request loop.
740 */
741 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
742 int rc;
743 rc = gmap_ipte_notify(vcpu->arch.gmap,
744 vcpu->arch.sie_block->prefix,
745 PAGE_SIZE * 2);
746 if (rc)
747 return rc;
748 s390_vcpu_unblock(vcpu);
749 }
750 return 0;
751}
752
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200753static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
754{
755 long rc;
756 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
757 struct mm_struct *mm = current->mm;
758 down_read(&mm->mmap_sem);
759 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
760 up_read(&mm->mmap_sem);
761 return rc;
762}
763
Dominik Dingel3c038e62013-10-07 17:11:48 +0200764static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
765 unsigned long token)
766{
767 struct kvm_s390_interrupt inti;
768 inti.parm64 = token;
769
770 if (start_token) {
771 inti.type = KVM_S390_INT_PFAULT_INIT;
772 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
773 } else {
774 inti.type = KVM_S390_INT_PFAULT_DONE;
775 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
776 }
777}
778
779void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
780 struct kvm_async_pf *work)
781{
782 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
783 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
784}
785
786void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
787 struct kvm_async_pf *work)
788{
789 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
790 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
791}
792
793void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
794 struct kvm_async_pf *work)
795{
796 /* s390 will always inject the page directly */
797}
798
799bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
800{
801 /*
802 * s390 will always inject the page directly,
803 * but we still want check_async_completion to cleanup
804 */
805 return true;
806}
807
808static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
809{
810 hva_t hva;
811 struct kvm_arch_async_pf arch;
812 int rc;
813
814 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
815 return 0;
816 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
817 vcpu->arch.pfault_compare)
818 return 0;
819 if (psw_extint_disabled(vcpu))
820 return 0;
821 if (kvm_cpu_has_interrupt(vcpu))
822 return 0;
823 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
824 return 0;
825 if (!vcpu->arch.gmap->pfault_enabled)
826 return 0;
827
828 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
829 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
830 return 0;
831
832 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
833 return rc;
834}
835
Thomas Huth3fb4c402013-09-12 10:33:43 +0200836static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100837{
Thomas Huth3fb4c402013-09-12 10:33:43 +0200838 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +0100839
Dominik Dingel3c038e62013-10-07 17:11:48 +0200840 /*
841 * On s390 notifications for arriving pages will be delivered directly
842 * to the guest but the house keeping for completed pfaults is
843 * handled outside the worker.
844 */
845 kvm_check_async_pf_completion(vcpu);
846
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100847 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100848
849 if (need_resched())
850 schedule();
851
Christian Borntraeger71cde582008-05-21 13:37:34 +0200852 if (test_thread_flag(TIF_MCCK_PENDING))
853 s390_handle_mcck();
854
Carsten Otted6b6d162012-01-04 10:25:25 +0100855 if (!kvm_is_ucontrol(vcpu->kvm))
856 kvm_s390_deliver_pending_interrupts(vcpu);
Carsten Otte0ff31862008-05-21 13:37:37 +0200857
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200858 rc = kvm_s390_handle_requests(vcpu);
859 if (rc)
860 return rc;
861
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100862 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +0200863 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
864 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
865 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +0200866
Thomas Huth3fb4c402013-09-12 10:33:43 +0200867 return 0;
868}
869
870static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
871{
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200872 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +0200873
874 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
875 vcpu->arch.sie_block->icptcode);
876 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
877
Thomas Huth3fb4c402013-09-12 10:33:43 +0200878 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +0200879 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +0200880 } else if (kvm_is_ucontrol(vcpu->kvm)) {
881 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
882 vcpu->run->s390_ucontrol.trans_exc_code =
883 current->thread.gmap_addr;
884 vcpu->run->s390_ucontrol.pgm_code = 0x10;
885 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200886
887 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +0200888 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200889 current->thread.gmap_pfault = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200890 if (kvm_arch_setup_async_pf(vcpu) ||
891 (kvm_arch_fault_in_sync(vcpu) >= 0))
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200892 rc = 0;
893 }
894
895 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +0100896 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
897 trace_kvm_s390_sie_fault(vcpu);
898 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +0200899 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100900
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100901 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200902
Thomas Hutha76ccff2013-09-12 10:33:44 +0200903 if (rc == 0) {
904 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +0100905 /* Don't exit for host interrupts. */
906 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +0200907 else
908 rc = kvm_handle_sie_intercept(vcpu);
909 }
910
Thomas Huth3fb4c402013-09-12 10:33:43 +0200911 return rc;
912}
913
914static int __vcpu_run(struct kvm_vcpu *vcpu)
915{
916 int rc, exit_reason;
917
Thomas Huth800c1062013-09-12 10:33:45 +0200918 /*
919 * We try to hold kvm->srcu during most of vcpu_run (except when run-
920 * ning the guest), so that memslots (and other stuff) are protected
921 */
922 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
923
Thomas Hutha76ccff2013-09-12 10:33:44 +0200924 do {
925 rc = vcpu_pre_run(vcpu);
926 if (rc)
927 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +0200928
Thomas Huth800c1062013-09-12 10:33:45 +0200929 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +0200930 /*
931 * As PF_VCPU will be used in fault handler, between
932 * guest_enter and guest_exit should be no uaccess.
933 */
934 preempt_disable();
935 kvm_guest_enter();
936 preempt_enable();
937 exit_reason = sie64a(vcpu->arch.sie_block,
938 vcpu->run->s.regs.gprs);
939 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +0200940 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200941
Thomas Hutha76ccff2013-09-12 10:33:44 +0200942 rc = vcpu_post_run(vcpu, exit_reason);
943 } while (!signal_pending(current) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200944
Thomas Huth800c1062013-09-12 10:33:45 +0200945 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +0100946 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100947}
948
949int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
950{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100951 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100952 sigset_t sigsaved;
953
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100954 if (vcpu->sigset_active)
955 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
956
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100957 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100958
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100959 switch (kvm_run->exit_reason) {
960 case KVM_EXIT_S390_SIEIC:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100961 case KVM_EXIT_UNKNOWN:
Christian Ehrhardt9ace9032009-05-20 15:34:55 +0200962 case KVM_EXIT_INTR:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100963 case KVM_EXIT_S390_RESET:
Carsten Ottee168bf82012-01-04 10:25:22 +0100964 case KVM_EXIT_S390_UCONTROL:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100965 case KVM_EXIT_S390_TSCH:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100966 break;
967 default:
968 BUG();
969 }
970
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100971 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
972 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100973 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
974 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
975 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
976 }
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100977 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
978 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
979 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
980 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
981 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100982
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200983 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +0200984 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +0200985
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +0200986 if (signal_pending(current) && !rc) {
987 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100988 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +0200989 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100990
Heiko Carstensb8e660b2010-02-26 22:37:41 +0100991 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100992 /* intercept cannot be handled in-kernel, prepare kvm-run */
993 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
994 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100995 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
996 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
997 rc = 0;
998 }
999
1000 if (rc == -EREMOTE) {
1001 /* intercept was handled, but userspace support is needed
1002 * kvm_run has been prepared by the handler */
1003 rc = 0;
1004 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001005
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001006 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1007 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +01001008 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001009 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001010
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001011 if (vcpu->sigset_active)
1012 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1013
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001014 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001015 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001016}
1017
Carsten Otte092670c2011-07-24 10:48:22 +02001018static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001019 unsigned long n, int prefix)
1020{
1021 if (prefix)
1022 return copy_to_guest(vcpu, guestdest, from, n);
1023 else
1024 return copy_to_guest_absolute(vcpu, guestdest, from, n);
1025}
1026
1027/*
1028 * store status at address
1029 * we use have two special cases:
1030 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1031 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1032 */
Thomas Huthe8798922013-11-06 15:46:33 +01001033int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001034{
Carsten Otte092670c2011-07-24 10:48:22 +02001035 unsigned char archmode = 1;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001036 int prefix;
Thomas Huth178bd782013-11-13 20:28:18 +01001037 u64 clkcomp;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001038
1039 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
1040 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1041 return -EFAULT;
1042 addr = SAVE_AREA_BASE;
1043 prefix = 0;
1044 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1045 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1046 return -EFAULT;
1047 addr = SAVE_AREA_BASE;
1048 prefix = 1;
1049 } else
1050 prefix = 0;
1051
Heiko Carstensf64ca212010-02-26 22:37:32 +01001052 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001053 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1054 return -EFAULT;
1055
Heiko Carstensf64ca212010-02-26 22:37:32 +01001056 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001057 vcpu->run->s.regs.gprs, 128, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001058 return -EFAULT;
1059
Heiko Carstensf64ca212010-02-26 22:37:32 +01001060 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001061 &vcpu->arch.sie_block->gpsw, 16, prefix))
1062 return -EFAULT;
1063
Heiko Carstensf64ca212010-02-26 22:37:32 +01001064 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001065 &vcpu->arch.sie_block->prefix, 4, prefix))
1066 return -EFAULT;
1067
1068 if (__guestcopy(vcpu,
Heiko Carstensf64ca212010-02-26 22:37:32 +01001069 addr + offsetof(struct save_area, fp_ctrl_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001070 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1071 return -EFAULT;
1072
Heiko Carstensf64ca212010-02-26 22:37:32 +01001073 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001074 &vcpu->arch.sie_block->todpr, 4, prefix))
1075 return -EFAULT;
1076
Heiko Carstensf64ca212010-02-26 22:37:32 +01001077 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001078 &vcpu->arch.sie_block->cputm, 8, prefix))
1079 return -EFAULT;
1080
Thomas Huth178bd782013-11-13 20:28:18 +01001081 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensf64ca212010-02-26 22:37:32 +01001082 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
Thomas Huth178bd782013-11-13 20:28:18 +01001083 &clkcomp, 8, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001084 return -EFAULT;
1085
Heiko Carstensf64ca212010-02-26 22:37:32 +01001086 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
Christian Borntraeger59674c12012-01-11 11:20:33 +01001087 &vcpu->run->s.regs.acrs, 64, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001088 return -EFAULT;
1089
1090 if (__guestcopy(vcpu,
Heiko Carstensf64ca212010-02-26 22:37:32 +01001091 addr + offsetof(struct save_area, ctrl_regs),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001092 &vcpu->arch.sie_block->gcr, 128, prefix))
1093 return -EFAULT;
1094 return 0;
1095}
1096
Thomas Huthe8798922013-11-06 15:46:33 +01001097int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1098{
1099 /*
1100 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1101 * copying in vcpu load/put. Lets update our copies before we save
1102 * it into the save area
1103 */
1104 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1105 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1106 save_access_regs(vcpu->run->s.regs.acrs);
1107
1108 return kvm_s390_store_status_unloaded(vcpu, addr);
1109}
1110
Cornelia Huckd6712df2012-12-20 15:32:11 +01001111static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1112 struct kvm_enable_cap *cap)
1113{
1114 int r;
1115
1116 if (cap->flags)
1117 return -EINVAL;
1118
1119 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001120 case KVM_CAP_S390_CSS_SUPPORT:
1121 if (!vcpu->kvm->arch.css_support) {
1122 vcpu->kvm->arch.css_support = 1;
1123 trace_kvm_s390_enable_css(vcpu->kvm);
1124 }
1125 r = 0;
1126 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001127 default:
1128 r = -EINVAL;
1129 break;
1130 }
1131 return r;
1132}
1133
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001134long kvm_arch_vcpu_ioctl(struct file *filp,
1135 unsigned int ioctl, unsigned long arg)
1136{
1137 struct kvm_vcpu *vcpu = filp->private_data;
1138 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001139 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001140 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001141
Avi Kivity93736622010-05-13 12:35:17 +03001142 switch (ioctl) {
1143 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001144 struct kvm_s390_interrupt s390int;
1145
Avi Kivity93736622010-05-13 12:35:17 +03001146 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001147 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03001148 break;
1149 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1150 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001151 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001152 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001153 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001154 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001155 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001156 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001157 case KVM_S390_SET_INITIAL_PSW: {
1158 psw_t psw;
1159
Avi Kivitybc923cc2010-05-13 12:21:46 +03001160 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001161 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001162 break;
1163 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1164 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001165 }
1166 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001167 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1168 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001169 case KVM_SET_ONE_REG:
1170 case KVM_GET_ONE_REG: {
1171 struct kvm_one_reg reg;
1172 r = -EFAULT;
1173 if (copy_from_user(&reg, argp, sizeof(reg)))
1174 break;
1175 if (ioctl == KVM_SET_ONE_REG)
1176 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1177 else
1178 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1179 break;
1180 }
Carsten Otte27e03932012-01-04 10:25:21 +01001181#ifdef CONFIG_KVM_S390_UCONTROL
1182 case KVM_S390_UCAS_MAP: {
1183 struct kvm_s390_ucas_mapping ucasmap;
1184
1185 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1186 r = -EFAULT;
1187 break;
1188 }
1189
1190 if (!kvm_is_ucontrol(vcpu->kvm)) {
1191 r = -EINVAL;
1192 break;
1193 }
1194
1195 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1196 ucasmap.vcpu_addr, ucasmap.length);
1197 break;
1198 }
1199 case KVM_S390_UCAS_UNMAP: {
1200 struct kvm_s390_ucas_mapping ucasmap;
1201
1202 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1203 r = -EFAULT;
1204 break;
1205 }
1206
1207 if (!kvm_is_ucontrol(vcpu->kvm)) {
1208 r = -EINVAL;
1209 break;
1210 }
1211
1212 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1213 ucasmap.length);
1214 break;
1215 }
1216#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001217 case KVM_S390_VCPU_FAULT: {
1218 r = gmap_fault(arg, vcpu->arch.gmap);
1219 if (!IS_ERR_VALUE(r))
1220 r = 0;
1221 break;
1222 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001223 case KVM_ENABLE_CAP:
1224 {
1225 struct kvm_enable_cap cap;
1226 r = -EFAULT;
1227 if (copy_from_user(&cap, argp, sizeof(cap)))
1228 break;
1229 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1230 break;
1231 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001232 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001233 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001234 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001235 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001236}
1237
Carsten Otte5b1c1492012-01-04 10:25:23 +01001238int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1239{
1240#ifdef CONFIG_KVM_S390_UCONTROL
1241 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1242 && (kvm_is_ucontrol(vcpu->kvm))) {
1243 vmf->page = virt_to_page(vcpu->arch.sie_block);
1244 get_page(vmf->page);
1245 return 0;
1246 }
1247#endif
1248 return VM_FAULT_SIGBUS;
1249}
1250
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301251void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001252 struct kvm_memory_slot *dont)
1253{
1254}
1255
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301256int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1257 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001258{
1259 return 0;
1260}
1261
Takuya Yoshikawae59dbe02013-07-04 13:40:29 +09001262void kvm_arch_memslots_updated(struct kvm *kvm)
1263{
1264}
1265
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001266/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001267int kvm_arch_prepare_memory_region(struct kvm *kvm,
1268 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001269 struct kvm_userspace_memory_region *mem,
1270 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001271{
Nick Wangdd2887e2013-03-25 17:22:57 +01001272 /* A few sanity checks. We can have memory slots which have to be
1273 located/ended at a segment boundary (1MB). The memory in userland is
1274 ok to be fragmented into various different vmas. It is okay to mmap()
1275 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001276
Carsten Otte598841c2011-07-24 10:48:21 +02001277 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001278 return -EINVAL;
1279
Carsten Otte598841c2011-07-24 10:48:21 +02001280 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001281 return -EINVAL;
1282
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001283 return 0;
1284}
1285
1286void kvm_arch_commit_memory_region(struct kvm *kvm,
1287 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001288 const struct kvm_memory_slot *old,
1289 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001290{
Carsten Ottef7850c92011-07-24 10:48:23 +02001291 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001292
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001293 /* If the basics of the memslot do not change, we do not want
1294 * to update the gmap. Every update causes several unnecessary
1295 * segment translation exceptions. This is usually handled just
1296 * fine by the normal fault handler + gmap, but it will also
1297 * cause faults on the prefix page of running guest CPUs.
1298 */
1299 if (old->userspace_addr == mem->userspace_addr &&
1300 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1301 old->npages * PAGE_SIZE == mem->memory_size)
1302 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001303
1304 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1305 mem->guest_phys_addr, mem->memory_size);
1306 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001307 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001308 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001309}
1310
Marcelo Tosatti2df72e92012-08-24 15:54:57 -03001311void kvm_arch_flush_shadow_all(struct kvm *kvm)
1312{
1313}
1314
1315void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1316 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001317{
1318}
1319
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001320static int __init kvm_s390_init(void)
1321{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001322 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001323 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001324 if (ret)
1325 return ret;
1326
1327 /*
1328 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001329 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001330 * only set facilities that are known to work in KVM.
1331 */
Michael Mueller78c4b592013-07-26 15:04:04 +02001332 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1333 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001334 kvm_exit();
1335 return -ENOMEM;
1336 }
Michael Mueller78c4b592013-07-26 15:04:04 +02001337 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Thomas Huthd208c792013-12-12 13:40:40 +01001338 vfacilities[0] &= 0xff82fff3f4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001339 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001340 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001341}
1342
1343static void __exit kvm_s390_exit(void)
1344{
Michael Mueller78c4b592013-07-26 15:04:04 +02001345 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001346 kvm_exit();
1347}
1348
1349module_init(kvm_s390_init);
1350module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001351
1352/*
1353 * Enable autoloading of the kvm module.
1354 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1355 * since x86 takes a different approach.
1356 */
1357#include <linux/miscdevice.h>
1358MODULE_ALIAS_MISCDEV(KVM_MINOR);
1359MODULE_ALIAS("devname:kvm");