blob: b767ec97368a9bb1743451bad2b66b2b111b67a6 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010014 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020019#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010020#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010025#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010026#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010027#include <asm/lowcore.h>
28#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010029#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010030#include <asm/switch_to.h>
Michael Mueller78c4b59f2013-07-26 15:04:04 +020031#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020032#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010033#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010034#include "gaccess.h"
35
Cornelia Huck5786fff2012-07-23 17:20:29 +020036#define CREATE_TRACE_POINTS
37#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020038#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020039
Heiko Carstensb0c632d2008-03-25 18:47:20 +010040#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020044 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010045 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010049 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020052 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010053 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020055 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010056 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020063 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010064 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020071 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010072 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
73 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020074 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010075 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010076 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020077 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010078 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
79 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
80 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
81 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
82 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010083 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010084 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020085 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010086 { NULL }
87};
88
Michael Mueller78c4b59f2013-07-26 15:04:04 +020089unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020090static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010091
Michael Mueller78c4b59f2013-07-26 15:04:04 +020092/* test availability of vfacility */
93static inline int test_vfacility(unsigned long nr)
94{
95 return __test_facility(nr, (void *) vfacilities);
96}
97
Heiko Carstensb0c632d2008-03-25 18:47:20 +010098/* Section: not file related */
Alexander Graf10474ae2009-09-15 11:37:46 +020099int kvm_arch_hardware_enable(void *garbage)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100100{
101 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200102 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100103}
104
105void kvm_arch_hardware_disable(void *garbage)
106{
107}
108
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200109static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
110
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100111int kvm_arch_hardware_setup(void)
112{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200113 gmap_notifier.notifier_call = kvm_gmap_notifier;
114 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100115 return 0;
116}
117
118void kvm_arch_hardware_unsetup(void)
119{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200120 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121}
122
123void kvm_arch_check_processor_compat(void *rtn)
124{
125}
126
127int kvm_arch_init(void *opaque)
128{
129 return 0;
130}
131
132void kvm_arch_exit(void)
133{
134}
135
136/* Section: device related */
137long kvm_arch_dev_ioctl(struct file *filp,
138 unsigned int ioctl, unsigned long arg)
139{
140 if (ioctl == KVM_S390_ENABLE_SIE)
141 return s390_enable_sie();
142 return -EINVAL;
143}
144
145int kvm_dev_ioctl_check_extension(long ext)
146{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100147 int r;
148
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200149 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100150 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200151 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100152 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100153#ifdef CONFIG_KVM_S390_UCONTROL
154 case KVM_CAP_S390_UCONTROL:
155#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200156 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100157 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200158 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100159 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100160 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100161 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200162 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200163 case KVM_CAP_ENABLE_CAP_VM:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100164 r = 1;
165 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200166 case KVM_CAP_NR_VCPUS:
167 case KVM_CAP_MAX_VCPUS:
168 r = KVM_MAX_VCPUS;
169 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100170 case KVM_CAP_NR_MEMSLOTS:
171 r = KVM_USER_MEM_SLOTS;
172 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200173 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100174 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200175 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200176 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100177 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200178 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100179 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100180}
181
182/* Section: vm related */
183/*
184 * Get (and clear) the dirty memory log for a memory slot.
185 */
186int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
187 struct kvm_dirty_log *log)
188{
189 return 0;
190}
191
Cornelia Huckd938dc52013-10-23 18:26:34 +0200192static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
193{
194 int r;
195
196 if (cap->flags)
197 return -EINVAL;
198
199 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200200 case KVM_CAP_S390_IRQCHIP:
201 kvm->arch.use_irqchip = 1;
202 r = 0;
203 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200204 default:
205 r = -EINVAL;
206 break;
207 }
208 return r;
209}
210
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100211long kvm_arch_vm_ioctl(struct file *filp,
212 unsigned int ioctl, unsigned long arg)
213{
214 struct kvm *kvm = filp->private_data;
215 void __user *argp = (void __user *)arg;
216 int r;
217
218 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100219 case KVM_S390_INTERRUPT: {
220 struct kvm_s390_interrupt s390int;
221
222 r = -EFAULT;
223 if (copy_from_user(&s390int, argp, sizeof(s390int)))
224 break;
225 r = kvm_s390_inject_vm(kvm, &s390int);
226 break;
227 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200228 case KVM_ENABLE_CAP: {
229 struct kvm_enable_cap cap;
230 r = -EFAULT;
231 if (copy_from_user(&cap, argp, sizeof(cap)))
232 break;
233 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
234 break;
235 }
Cornelia Huck84223592013-07-15 13:36:01 +0200236 case KVM_CREATE_IRQCHIP: {
237 struct kvm_irq_routing_entry routing;
238
239 r = -EINVAL;
240 if (kvm->arch.use_irqchip) {
241 /* Set up dummy routing. */
242 memset(&routing, 0, sizeof(routing));
243 kvm_set_irq_routing(kvm, &routing, 0, 0);
244 r = 0;
245 }
246 break;
247 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100248 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300249 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100250 }
251
252 return r;
253}
254
Carsten Ottee08b9632012-01-04 10:25:20 +0100255int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100256{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100257 int rc;
258 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100259 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100260
Carsten Ottee08b9632012-01-04 10:25:20 +0100261 rc = -EINVAL;
262#ifdef CONFIG_KVM_S390_UCONTROL
263 if (type & ~KVM_VM_S390_UCONTROL)
264 goto out_err;
265 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
266 goto out_err;
267#else
268 if (type)
269 goto out_err;
270#endif
271
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100272 rc = s390_enable_sie();
273 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100274 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100275
Carsten Otteb2904112011-10-18 12:27:13 +0200276 rc = -ENOMEM;
277
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100278 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
279 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100280 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100281 spin_lock(&kvm_lock);
282 sca_offset = (sca_offset + 16) & 0x7f0;
283 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
284 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100285
286 sprintf(debug_name, "kvm-%u", current->pid);
287
288 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
289 if (!kvm->arch.dbf)
290 goto out_nodbf;
291
Carsten Otteba5c1e92008-03-25 18:47:26 +0100292 spin_lock_init(&kvm->arch.float_int.lock);
293 INIT_LIST_HEAD(&kvm->arch.float_int.list);
294
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100295 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
296 VM_EVENT(kvm, 3, "%s", "vm created");
297
Carsten Ottee08b9632012-01-04 10:25:20 +0100298 if (type & KVM_VM_S390_UCONTROL) {
299 kvm->arch.gmap = NULL;
300 } else {
301 kvm->arch.gmap = gmap_alloc(current->mm);
302 if (!kvm->arch.gmap)
303 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200304 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200305 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100306 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100307
308 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200309 kvm->arch.use_irqchip = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100310
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100311 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200312out_nogmap:
313 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100314out_nodbf:
315 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100316out_err:
317 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100318}
319
Christian Borntraegerd329c032008-11-26 14:50:27 +0100320void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
321{
322 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200323 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200324 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100325 if (!kvm_is_ucontrol(vcpu->kvm)) {
326 clear_bit(63 - vcpu->vcpu_id,
327 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
328 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
329 (__u64) vcpu->arch.sie_block)
330 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
331 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200332 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100333
334 if (kvm_is_ucontrol(vcpu->kvm))
335 gmap_free(vcpu->arch.gmap);
336
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200337 if (vcpu->arch.sie_block->cbrlo)
338 __free_page(__pfn_to_page(
339 vcpu->arch.sie_block->cbrlo >> PAGE_SHIFT));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100340 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200341
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100342 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200343 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100344}
345
346static void kvm_free_vcpus(struct kvm *kvm)
347{
348 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300349 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100350
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300351 kvm_for_each_vcpu(i, vcpu, kvm)
352 kvm_arch_vcpu_destroy(vcpu);
353
354 mutex_lock(&kvm->lock);
355 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
356 kvm->vcpus[i] = NULL;
357
358 atomic_set(&kvm->online_vcpus, 0);
359 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100360}
361
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800362void kvm_arch_sync_events(struct kvm *kvm)
363{
364}
365
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100366void kvm_arch_destroy_vm(struct kvm *kvm)
367{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100368 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100369 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100370 debug_unregister(kvm->arch.dbf);
Carsten Otte27e03932012-01-04 10:25:21 +0100371 if (!kvm_is_ucontrol(kvm))
372 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200373 kvm_s390_destroy_adapters(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100374}
375
376/* Section: vcpu related */
377int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
378{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200379 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
380 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100381 if (kvm_is_ucontrol(vcpu->kvm)) {
382 vcpu->arch.gmap = gmap_alloc(current->mm);
383 if (!vcpu->arch.gmap)
384 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200385 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100386 return 0;
387 }
388
Carsten Otte598841c2011-07-24 10:48:21 +0200389 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100390 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
391 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100392 KVM_SYNC_ACRS |
393 KVM_SYNC_CRS;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100394 return 0;
395}
396
397void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
398{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100399 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100400}
401
402void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
403{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200404 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
405 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100406 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200407 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
408 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100409 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200410 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100411 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100412}
413
414void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
415{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100416 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200417 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200418 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
419 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100420 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200421 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
422 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100423 restore_access_regs(vcpu->arch.host_acrs);
424}
425
426static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
427{
428 /* this equals initial cpu reset in pop, but we don't switch to ESA */
429 vcpu->arch.sie_block->gpsw.mask = 0UL;
430 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100431 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100432 vcpu->arch.sie_block->cputm = 0UL;
433 vcpu->arch.sie_block->ckc = 0UL;
434 vcpu->arch.sie_block->todpr = 0;
435 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
436 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
437 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
438 vcpu->arch.guest_fpregs.fpc = 0;
439 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
440 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100441 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200442 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
443 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger61bde822012-06-11 16:06:57 +0200444 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100445 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100446}
447
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200448int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
449{
450 return 0;
451}
452
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100453int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
454{
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200455 struct page *cbrl;
456
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100457 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
458 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200459 CPUSTAT_STOPPED |
460 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200461 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200462 if (test_vfacility(50) && test_vfacility(73))
463 vcpu->arch.sie_block->ecb |= 0x10;
464
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200465 vcpu->arch.sie_block->ecb2 = 8;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100466 vcpu->arch.sie_block->eca = 0xC1002001U;
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200467 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Dominik Dingel693ffc02014-01-14 18:11:14 +0100468 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200469 if (kvm_enabled_cmma()) {
470 cbrl = alloc_page(GFP_KERNEL | __GFP_ZERO);
471 if (cbrl) {
472 vcpu->arch.sie_block->ecb2 |= 0x80;
473 vcpu->arch.sie_block->ecb2 &= ~0x08;
474 vcpu->arch.sie_block->cbrlo = page_to_phys(cbrl);
475 }
476 }
Christian Borntraegerca872302009-05-12 17:21:49 +0200477 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
478 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
479 (unsigned long) vcpu);
480 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100481 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100482 vcpu->arch.cpu_id.version = 0xff;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100483 return 0;
484}
485
486struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
487 unsigned int id)
488{
Carsten Otte4d475552011-10-18 12:27:12 +0200489 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200490 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200491 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100492
Carsten Otte4d475552011-10-18 12:27:12 +0200493 if (id >= KVM_MAX_VCPUS)
494 goto out;
495
496 rc = -ENOMEM;
497
Michael Muellerb110fea2013-06-12 13:54:54 +0200498 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100499 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200500 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100501
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200502 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
503 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100504 goto out_free_cpu;
505
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200506 vcpu->arch.sie_block = &sie_page->sie_block;
507 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
508
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100509 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100510 if (!kvm_is_ucontrol(kvm)) {
511 if (!kvm->arch.sca) {
512 WARN_ON_ONCE(1);
513 goto out_free_cpu;
514 }
515 if (!kvm->arch.sca->cpu[id].sda)
516 kvm->arch.sca->cpu[id].sda =
517 (__u64) vcpu->arch.sie_block;
518 vcpu->arch.sie_block->scaoh =
519 (__u32)(((__u64)kvm->arch.sca) >> 32);
520 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
521 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
522 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100523
Carsten Otteba5c1e92008-03-25 18:47:26 +0100524 spin_lock_init(&vcpu->arch.local_int.lock);
525 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
526 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200527 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100528 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100529
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100530 rc = kvm_vcpu_init(vcpu, kvm, id);
531 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800532 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100533 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
534 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200535 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100536
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100537 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800538out_free_sie_block:
539 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100540out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200541 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200542out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100543 return ERR_PTR(rc);
544}
545
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100546int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
547{
Michael Muellerf87618e2014-02-26 16:14:17 +0100548 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100549}
550
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200551void s390_vcpu_block(struct kvm_vcpu *vcpu)
552{
553 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
554}
555
556void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
557{
558 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
559}
560
561/*
562 * Kick a guest cpu out of SIE and wait until SIE is not running.
563 * If the CPU is not running (e.g. waiting as idle) the function will
564 * return immediately. */
565void exit_sie(struct kvm_vcpu *vcpu)
566{
567 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
568 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
569 cpu_relax();
570}
571
572/* Kick a guest cpu out of SIE and prevent SIE-reentry */
573void exit_sie_sync(struct kvm_vcpu *vcpu)
574{
575 s390_vcpu_block(vcpu);
576 exit_sie(vcpu);
577}
578
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200579static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
580{
581 int i;
582 struct kvm *kvm = gmap->private;
583 struct kvm_vcpu *vcpu;
584
585 kvm_for_each_vcpu(i, vcpu, kvm) {
586 /* match against both prefix pages */
587 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
588 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
589 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
590 exit_sie_sync(vcpu);
591 }
592 }
593}
594
Christoffer Dallb6d33832012-03-08 16:44:24 -0500595int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
596{
597 /* kvm common code refers to this, but never calls it */
598 BUG();
599 return 0;
600}
601
Carsten Otte14eebd92012-05-15 14:15:26 +0200602static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
603 struct kvm_one_reg *reg)
604{
605 int r = -EINVAL;
606
607 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200608 case KVM_REG_S390_TODPR:
609 r = put_user(vcpu->arch.sie_block->todpr,
610 (u32 __user *)reg->addr);
611 break;
612 case KVM_REG_S390_EPOCHDIFF:
613 r = put_user(vcpu->arch.sie_block->epoch,
614 (u64 __user *)reg->addr);
615 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200616 case KVM_REG_S390_CPU_TIMER:
617 r = put_user(vcpu->arch.sie_block->cputm,
618 (u64 __user *)reg->addr);
619 break;
620 case KVM_REG_S390_CLOCK_COMP:
621 r = put_user(vcpu->arch.sie_block->ckc,
622 (u64 __user *)reg->addr);
623 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200624 case KVM_REG_S390_PFTOKEN:
625 r = put_user(vcpu->arch.pfault_token,
626 (u64 __user *)reg->addr);
627 break;
628 case KVM_REG_S390_PFCOMPARE:
629 r = put_user(vcpu->arch.pfault_compare,
630 (u64 __user *)reg->addr);
631 break;
632 case KVM_REG_S390_PFSELECT:
633 r = put_user(vcpu->arch.pfault_select,
634 (u64 __user *)reg->addr);
635 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100636 case KVM_REG_S390_PP:
637 r = put_user(vcpu->arch.sie_block->pp,
638 (u64 __user *)reg->addr);
639 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100640 case KVM_REG_S390_GBEA:
641 r = put_user(vcpu->arch.sie_block->gbea,
642 (u64 __user *)reg->addr);
643 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200644 default:
645 break;
646 }
647
648 return r;
649}
650
651static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
652 struct kvm_one_reg *reg)
653{
654 int r = -EINVAL;
655
656 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200657 case KVM_REG_S390_TODPR:
658 r = get_user(vcpu->arch.sie_block->todpr,
659 (u32 __user *)reg->addr);
660 break;
661 case KVM_REG_S390_EPOCHDIFF:
662 r = get_user(vcpu->arch.sie_block->epoch,
663 (u64 __user *)reg->addr);
664 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200665 case KVM_REG_S390_CPU_TIMER:
666 r = get_user(vcpu->arch.sie_block->cputm,
667 (u64 __user *)reg->addr);
668 break;
669 case KVM_REG_S390_CLOCK_COMP:
670 r = get_user(vcpu->arch.sie_block->ckc,
671 (u64 __user *)reg->addr);
672 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200673 case KVM_REG_S390_PFTOKEN:
674 r = get_user(vcpu->arch.pfault_token,
675 (u64 __user *)reg->addr);
676 break;
677 case KVM_REG_S390_PFCOMPARE:
678 r = get_user(vcpu->arch.pfault_compare,
679 (u64 __user *)reg->addr);
680 break;
681 case KVM_REG_S390_PFSELECT:
682 r = get_user(vcpu->arch.pfault_select,
683 (u64 __user *)reg->addr);
684 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100685 case KVM_REG_S390_PP:
686 r = get_user(vcpu->arch.sie_block->pp,
687 (u64 __user *)reg->addr);
688 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100689 case KVM_REG_S390_GBEA:
690 r = get_user(vcpu->arch.sie_block->gbea,
691 (u64 __user *)reg->addr);
692 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200693 default:
694 break;
695 }
696
697 return r;
698}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500699
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100700static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
701{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100702 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100703 return 0;
704}
705
706int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
707{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100708 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100709 return 0;
710}
711
712int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
713{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100714 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100715 return 0;
716}
717
718int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
719 struct kvm_sregs *sregs)
720{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100721 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100722 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100723 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100724 return 0;
725}
726
727int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
728 struct kvm_sregs *sregs)
729{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100730 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100731 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100732 return 0;
733}
734
735int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
736{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200737 if (test_fp_ctl(fpu->fpc))
738 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100739 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200740 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
741 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
742 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100743 return 0;
744}
745
746int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
747{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100748 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
749 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100750 return 0;
751}
752
753static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
754{
755 int rc = 0;
756
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100757 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100758 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100759 else {
760 vcpu->run->psw_mask = psw.mask;
761 vcpu->run->psw_addr = psw.addr;
762 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100763 return rc;
764}
765
766int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
767 struct kvm_translation *tr)
768{
769 return -EINVAL; /* not implemented yet */
770}
771
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100772int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
773 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100774{
775 return -EINVAL; /* not implemented yet */
776}
777
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300778int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
779 struct kvm_mp_state *mp_state)
780{
781 return -EINVAL; /* not implemented yet */
782}
783
784int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
785 struct kvm_mp_state *mp_state)
786{
787 return -EINVAL; /* not implemented yet */
788}
789
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200790static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
791{
792 /*
793 * We use MMU_RELOAD just to re-arm the ipte notifier for the
794 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
795 * This ensures that the ipte instruction for this request has
796 * already finished. We might race against a second unmapper that
797 * wants to set the blocking bit. Lets just retry the request loop.
798 */
799 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
800 int rc;
801 rc = gmap_ipte_notify(vcpu->arch.gmap,
802 vcpu->arch.sie_block->prefix,
803 PAGE_SIZE * 2);
804 if (rc)
805 return rc;
806 s390_vcpu_unblock(vcpu);
807 }
808 return 0;
809}
810
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200811static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
812{
813 long rc;
814 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
815 struct mm_struct *mm = current->mm;
816 down_read(&mm->mmap_sem);
817 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
818 up_read(&mm->mmap_sem);
819 return rc;
820}
821
Dominik Dingel3c038e62013-10-07 17:11:48 +0200822static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
823 unsigned long token)
824{
825 struct kvm_s390_interrupt inti;
826 inti.parm64 = token;
827
828 if (start_token) {
829 inti.type = KVM_S390_INT_PFAULT_INIT;
830 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
831 } else {
832 inti.type = KVM_S390_INT_PFAULT_DONE;
833 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
834 }
835}
836
837void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
838 struct kvm_async_pf *work)
839{
840 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
841 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
842}
843
844void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
845 struct kvm_async_pf *work)
846{
847 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
848 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
849}
850
851void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
852 struct kvm_async_pf *work)
853{
854 /* s390 will always inject the page directly */
855}
856
857bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
858{
859 /*
860 * s390 will always inject the page directly,
861 * but we still want check_async_completion to cleanup
862 */
863 return true;
864}
865
866static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
867{
868 hva_t hva;
869 struct kvm_arch_async_pf arch;
870 int rc;
871
872 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
873 return 0;
874 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
875 vcpu->arch.pfault_compare)
876 return 0;
877 if (psw_extint_disabled(vcpu))
878 return 0;
879 if (kvm_cpu_has_interrupt(vcpu))
880 return 0;
881 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
882 return 0;
883 if (!vcpu->arch.gmap->pfault_enabled)
884 return 0;
885
886 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
887 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
888 return 0;
889
890 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
891 return rc;
892}
893
Thomas Huth3fb4c402013-09-12 10:33:43 +0200894static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100895{
Thomas Huth3fb4c402013-09-12 10:33:43 +0200896 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +0100897
Dominik Dingel3c038e62013-10-07 17:11:48 +0200898 /*
899 * On s390 notifications for arriving pages will be delivered directly
900 * to the guest but the house keeping for completed pfaults is
901 * handled outside the worker.
902 */
903 kvm_check_async_pf_completion(vcpu);
904
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100905 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100906
907 if (need_resched())
908 schedule();
909
Christian Borntraeger71cde582008-05-21 13:37:34 +0200910 if (test_thread_flag(TIF_MCCK_PENDING))
911 s390_handle_mcck();
912
Carsten Otted6b6d162012-01-04 10:25:25 +0100913 if (!kvm_is_ucontrol(vcpu->kvm))
914 kvm_s390_deliver_pending_interrupts(vcpu);
Carsten Otte0ff31862008-05-21 13:37:37 +0200915
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200916 rc = kvm_s390_handle_requests(vcpu);
917 if (rc)
918 return rc;
919
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100920 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +0200921 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
922 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
923 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +0200924
Thomas Huth3fb4c402013-09-12 10:33:43 +0200925 return 0;
926}
927
928static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
929{
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200930 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +0200931
932 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
933 vcpu->arch.sie_block->icptcode);
934 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
935
Thomas Huth3fb4c402013-09-12 10:33:43 +0200936 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +0200937 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +0200938 } else if (kvm_is_ucontrol(vcpu->kvm)) {
939 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
940 vcpu->run->s390_ucontrol.trans_exc_code =
941 current->thread.gmap_addr;
942 vcpu->run->s390_ucontrol.pgm_code = 0x10;
943 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200944
945 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +0200946 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200947 current->thread.gmap_pfault = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200948 if (kvm_arch_setup_async_pf(vcpu) ||
949 (kvm_arch_fault_in_sync(vcpu) >= 0))
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200950 rc = 0;
951 }
952
953 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +0100954 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
955 trace_kvm_s390_sie_fault(vcpu);
956 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +0200957 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100958
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100959 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +0200960
Thomas Hutha76ccff2013-09-12 10:33:44 +0200961 if (rc == 0) {
962 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +0100963 /* Don't exit for host interrupts. */
964 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +0200965 else
966 rc = kvm_handle_sie_intercept(vcpu);
967 }
968
Thomas Huth3fb4c402013-09-12 10:33:43 +0200969 return rc;
970}
971
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200972bool kvm_enabled_cmma(void)
973{
974 if (!MACHINE_IS_LPAR)
975 return false;
976 /* only enable for z10 and later */
977 if (!MACHINE_HAS_EDAT1)
978 return false;
979 return true;
980}
981
Thomas Huth3fb4c402013-09-12 10:33:43 +0200982static int __vcpu_run(struct kvm_vcpu *vcpu)
983{
984 int rc, exit_reason;
985
Thomas Huth800c1062013-09-12 10:33:45 +0200986 /*
987 * We try to hold kvm->srcu during most of vcpu_run (except when run-
988 * ning the guest), so that memslots (and other stuff) are protected
989 */
990 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
991
Thomas Hutha76ccff2013-09-12 10:33:44 +0200992 do {
993 rc = vcpu_pre_run(vcpu);
994 if (rc)
995 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +0200996
Thomas Huth800c1062013-09-12 10:33:45 +0200997 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +0200998 /*
999 * As PF_VCPU will be used in fault handler, between
1000 * guest_enter and guest_exit should be no uaccess.
1001 */
1002 preempt_disable();
1003 kvm_guest_enter();
1004 preempt_enable();
1005 exit_reason = sie64a(vcpu->arch.sie_block,
1006 vcpu->run->s.regs.gprs);
1007 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001008 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001009
Thomas Hutha76ccff2013-09-12 10:33:44 +02001010 rc = vcpu_post_run(vcpu, exit_reason);
1011 } while (!signal_pending(current) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001012
Thomas Huth800c1062013-09-12 10:33:45 +02001013 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001014 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001015}
1016
1017int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1018{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001019 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001020 sigset_t sigsaved;
1021
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001022 if (vcpu->sigset_active)
1023 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1024
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001025 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001026
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001027 switch (kvm_run->exit_reason) {
1028 case KVM_EXIT_S390_SIEIC:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001029 case KVM_EXIT_UNKNOWN:
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001030 case KVM_EXIT_INTR:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001031 case KVM_EXIT_S390_RESET:
Carsten Ottee168bf82012-01-04 10:25:22 +01001032 case KVM_EXIT_S390_UCONTROL:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001033 case KVM_EXIT_S390_TSCH:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001034 break;
1035 default:
1036 BUG();
1037 }
1038
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001039 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1040 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +01001041 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1042 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1043 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1044 }
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001045 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1046 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1047 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1048 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1049 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001050
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001051 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001052 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001053
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001054 if (signal_pending(current) && !rc) {
1055 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001056 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001057 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001058
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001059 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001060 /* intercept cannot be handled in-kernel, prepare kvm-run */
1061 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1062 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001063 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1064 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1065 rc = 0;
1066 }
1067
1068 if (rc == -EREMOTE) {
1069 /* intercept was handled, but userspace support is needed
1070 * kvm_run has been prepared by the handler */
1071 rc = 0;
1072 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001073
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001074 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1075 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +01001076 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001077 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001078
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001079 if (vcpu->sigset_active)
1080 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1081
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001082 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001083 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001084}
1085
Carsten Otte092670c2011-07-24 10:48:22 +02001086static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001087 unsigned long n, int prefix)
1088{
1089 if (prefix)
1090 return copy_to_guest(vcpu, guestdest, from, n);
1091 else
1092 return copy_to_guest_absolute(vcpu, guestdest, from, n);
1093}
1094
1095/*
1096 * store status at address
1097 * we use have two special cases:
1098 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1099 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1100 */
Thomas Huthe8798922013-11-06 15:46:33 +01001101int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001102{
Carsten Otte092670c2011-07-24 10:48:22 +02001103 unsigned char archmode = 1;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001104 int prefix;
Thomas Huth178bd782013-11-13 20:28:18 +01001105 u64 clkcomp;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001106
1107 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
1108 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1109 return -EFAULT;
1110 addr = SAVE_AREA_BASE;
1111 prefix = 0;
1112 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1113 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1114 return -EFAULT;
1115 addr = SAVE_AREA_BASE;
1116 prefix = 1;
1117 } else
1118 prefix = 0;
1119
Heiko Carstensf64ca212010-02-26 22:37:32 +01001120 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001121 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1122 return -EFAULT;
1123
Heiko Carstensf64ca212010-02-26 22:37:32 +01001124 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001125 vcpu->run->s.regs.gprs, 128, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001126 return -EFAULT;
1127
Heiko Carstensf64ca212010-02-26 22:37:32 +01001128 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001129 &vcpu->arch.sie_block->gpsw, 16, prefix))
1130 return -EFAULT;
1131
Heiko Carstensf64ca212010-02-26 22:37:32 +01001132 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001133 &vcpu->arch.sie_block->prefix, 4, prefix))
1134 return -EFAULT;
1135
1136 if (__guestcopy(vcpu,
Heiko Carstensf64ca212010-02-26 22:37:32 +01001137 addr + offsetof(struct save_area, fp_ctrl_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001138 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1139 return -EFAULT;
1140
Heiko Carstensf64ca212010-02-26 22:37:32 +01001141 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001142 &vcpu->arch.sie_block->todpr, 4, prefix))
1143 return -EFAULT;
1144
Heiko Carstensf64ca212010-02-26 22:37:32 +01001145 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001146 &vcpu->arch.sie_block->cputm, 8, prefix))
1147 return -EFAULT;
1148
Thomas Huth178bd782013-11-13 20:28:18 +01001149 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensf64ca212010-02-26 22:37:32 +01001150 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
Thomas Huth178bd782013-11-13 20:28:18 +01001151 &clkcomp, 8, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001152 return -EFAULT;
1153
Heiko Carstensf64ca212010-02-26 22:37:32 +01001154 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
Christian Borntraeger59674c12012-01-11 11:20:33 +01001155 &vcpu->run->s.regs.acrs, 64, prefix))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001156 return -EFAULT;
1157
1158 if (__guestcopy(vcpu,
Heiko Carstensf64ca212010-02-26 22:37:32 +01001159 addr + offsetof(struct save_area, ctrl_regs),
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001160 &vcpu->arch.sie_block->gcr, 128, prefix))
1161 return -EFAULT;
1162 return 0;
1163}
1164
Thomas Huthe8798922013-11-06 15:46:33 +01001165int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1166{
1167 /*
1168 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1169 * copying in vcpu load/put. Lets update our copies before we save
1170 * it into the save area
1171 */
1172 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1173 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1174 save_access_regs(vcpu->run->s.regs.acrs);
1175
1176 return kvm_s390_store_status_unloaded(vcpu, addr);
1177}
1178
Cornelia Huckd6712df2012-12-20 15:32:11 +01001179static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1180 struct kvm_enable_cap *cap)
1181{
1182 int r;
1183
1184 if (cap->flags)
1185 return -EINVAL;
1186
1187 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001188 case KVM_CAP_S390_CSS_SUPPORT:
1189 if (!vcpu->kvm->arch.css_support) {
1190 vcpu->kvm->arch.css_support = 1;
1191 trace_kvm_s390_enable_css(vcpu->kvm);
1192 }
1193 r = 0;
1194 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001195 default:
1196 r = -EINVAL;
1197 break;
1198 }
1199 return r;
1200}
1201
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001202long kvm_arch_vcpu_ioctl(struct file *filp,
1203 unsigned int ioctl, unsigned long arg)
1204{
1205 struct kvm_vcpu *vcpu = filp->private_data;
1206 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001207 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001208 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001209
Avi Kivity937366242010-05-13 12:35:17 +03001210 switch (ioctl) {
1211 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001212 struct kvm_s390_interrupt s390int;
1213
Avi Kivity937366242010-05-13 12:35:17 +03001214 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001215 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03001216 break;
1217 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1218 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001219 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001220 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001221 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001222 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001223 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001224 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001225 case KVM_S390_SET_INITIAL_PSW: {
1226 psw_t psw;
1227
Avi Kivitybc923cc2010-05-13 12:21:46 +03001228 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001229 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001230 break;
1231 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1232 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001233 }
1234 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001235 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1236 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001237 case KVM_SET_ONE_REG:
1238 case KVM_GET_ONE_REG: {
1239 struct kvm_one_reg reg;
1240 r = -EFAULT;
1241 if (copy_from_user(&reg, argp, sizeof(reg)))
1242 break;
1243 if (ioctl == KVM_SET_ONE_REG)
1244 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1245 else
1246 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1247 break;
1248 }
Carsten Otte27e03932012-01-04 10:25:21 +01001249#ifdef CONFIG_KVM_S390_UCONTROL
1250 case KVM_S390_UCAS_MAP: {
1251 struct kvm_s390_ucas_mapping ucasmap;
1252
1253 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1254 r = -EFAULT;
1255 break;
1256 }
1257
1258 if (!kvm_is_ucontrol(vcpu->kvm)) {
1259 r = -EINVAL;
1260 break;
1261 }
1262
1263 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1264 ucasmap.vcpu_addr, ucasmap.length);
1265 break;
1266 }
1267 case KVM_S390_UCAS_UNMAP: {
1268 struct kvm_s390_ucas_mapping ucasmap;
1269
1270 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1271 r = -EFAULT;
1272 break;
1273 }
1274
1275 if (!kvm_is_ucontrol(vcpu->kvm)) {
1276 r = -EINVAL;
1277 break;
1278 }
1279
1280 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1281 ucasmap.length);
1282 break;
1283 }
1284#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001285 case KVM_S390_VCPU_FAULT: {
1286 r = gmap_fault(arg, vcpu->arch.gmap);
1287 if (!IS_ERR_VALUE(r))
1288 r = 0;
1289 break;
1290 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001291 case KVM_ENABLE_CAP:
1292 {
1293 struct kvm_enable_cap cap;
1294 r = -EFAULT;
1295 if (copy_from_user(&cap, argp, sizeof(cap)))
1296 break;
1297 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1298 break;
1299 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001300 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001301 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001302 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001303 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001304}
1305
Carsten Otte5b1c1492012-01-04 10:25:23 +01001306int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1307{
1308#ifdef CONFIG_KVM_S390_UCONTROL
1309 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1310 && (kvm_is_ucontrol(vcpu->kvm))) {
1311 vmf->page = virt_to_page(vcpu->arch.sie_block);
1312 get_page(vmf->page);
1313 return 0;
1314 }
1315#endif
1316 return VM_FAULT_SIGBUS;
1317}
1318
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301319void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001320 struct kvm_memory_slot *dont)
1321{
1322}
1323
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301324int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1325 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001326{
1327 return 0;
1328}
1329
Takuya Yoshikawae59dbe02013-07-04 13:40:29 +09001330void kvm_arch_memslots_updated(struct kvm *kvm)
1331{
1332}
1333
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001334/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001335int kvm_arch_prepare_memory_region(struct kvm *kvm,
1336 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001337 struct kvm_userspace_memory_region *mem,
1338 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001339{
Nick Wangdd2887e2013-03-25 17:22:57 +01001340 /* A few sanity checks. We can have memory slots which have to be
1341 located/ended at a segment boundary (1MB). The memory in userland is
1342 ok to be fragmented into various different vmas. It is okay to mmap()
1343 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001344
Carsten Otte598841c2011-07-24 10:48:21 +02001345 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001346 return -EINVAL;
1347
Carsten Otte598841c2011-07-24 10:48:21 +02001348 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001349 return -EINVAL;
1350
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001351 return 0;
1352}
1353
1354void kvm_arch_commit_memory_region(struct kvm *kvm,
1355 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001356 const struct kvm_memory_slot *old,
1357 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001358{
Carsten Ottef7850c92011-07-24 10:48:23 +02001359 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001360
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001361 /* If the basics of the memslot do not change, we do not want
1362 * to update the gmap. Every update causes several unnecessary
1363 * segment translation exceptions. This is usually handled just
1364 * fine by the normal fault handler + gmap, but it will also
1365 * cause faults on the prefix page of running guest CPUs.
1366 */
1367 if (old->userspace_addr == mem->userspace_addr &&
1368 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1369 old->npages * PAGE_SIZE == mem->memory_size)
1370 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001371
1372 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1373 mem->guest_phys_addr, mem->memory_size);
1374 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001375 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001376 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001377}
1378
Marcelo Tosatti2df72e92012-08-24 15:54:57 -03001379void kvm_arch_flush_shadow_all(struct kvm *kvm)
1380{
1381}
1382
1383void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1384 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001385{
1386}
1387
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001388static int __init kvm_s390_init(void)
1389{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001390 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001391 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001392 if (ret)
1393 return ret;
1394
1395 /*
1396 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001397 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001398 * only set facilities that are known to work in KVM.
1399 */
Michael Mueller78c4b59f2013-07-26 15:04:04 +02001400 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1401 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001402 kvm_exit();
1403 return -ENOMEM;
1404 }
Michael Mueller78c4b59f2013-07-26 15:04:04 +02001405 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Thomas Huthd208c792013-12-12 13:40:40 +01001406 vfacilities[0] &= 0xff82fff3f4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001407 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001408 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001409}
1410
1411static void __exit kvm_s390_exit(void)
1412{
Michael Mueller78c4b59f2013-07-26 15:04:04 +02001413 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001414 kvm_exit();
1415}
1416
1417module_init(kvm_s390_init);
1418module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001419
1420/*
1421 * Enable autoloading of the kvm module.
1422 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1423 * since x86 takes a different approach.
1424 */
1425#include <linux/miscdevice.h>
1426MODULE_ALIAS_MISCDEV(KVM_MINOR);
1427MODULE_ALIAS("devname:kvm");