blob: a0907795f31db30d8d5a45682f4fad5977c55c6c [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010029#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010030#include <asm/lowcore.h>
Fan Zhangfdf03652015-05-13 10:58:41 +020031#include <asm/etr.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010032#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010033#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010034#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020035#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020036#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010037#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include "gaccess.h"
39
David Hildenbrandea2cdd22015-05-20 13:24:02 +020040#define KMSG_COMPONENT "kvm-s390"
41#undef pr_fmt
42#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43
Cornelia Huck5786fff2012-07-23 17:20:29 +020044#define CREATE_TRACE_POINTS
45#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020046#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020047
Thomas Huth41408c282015-02-06 15:01:21 +010048#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010049#define LOCAL_IRQS 32
50#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010052
Heiko Carstensb0c632d2008-03-25 18:47:20 +010053#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54
55struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020057 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010058 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010062 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010065 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020066 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020067 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020068 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010069 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010070 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020073 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010074 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020081 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010082 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
83 { "instruction_spx", VCPU_STAT(instruction_spx) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
85 { "instruction_stap", VCPU_STAT(instruction_stap) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010087 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010088 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020090 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010091 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020093 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010094 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010095 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020096 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010097 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020098 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100110 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100111 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200112 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200113 { "diagnose_258", VCPU_STAT(diagnose_258) },
114 { "diagnose_308", VCPU_STAT(diagnose_308) },
115 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100116 { NULL }
117};
118
Michael Mueller9d8d5782015-02-02 15:42:51 +0100119/* upper facilities limit for kvm */
120unsigned long kvm_s390_fac_list_mask[] = {
Christian Borntraegera3ed8da2015-03-18 13:54:31 +0100121 0xffe6fffbfcfdfc40UL,
Guenther Hutzl53df84f2015-02-18 11:13:03 +0100122 0x005e800000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100123};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100124
Michael Mueller9d8d5782015-02-02 15:42:51 +0100125unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200126{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200129}
130
Michael Mueller9d8d5782015-02-02 15:42:51 +0100131static struct gmap_notifier gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200132debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100133
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100134/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200135int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100136{
137 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200138 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100139}
140
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200141static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
142
Fan Zhangfdf03652015-05-13 10:58:41 +0200143/*
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
148 */
149static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
150 void *v)
151{
152 struct kvm *kvm;
153 struct kvm_vcpu *vcpu;
154 int i;
155 unsigned long long *delta = v;
156
157 list_for_each_entry(kvm, &vm_list, vm_list) {
158 kvm->arch.epoch -= *delta;
159 kvm_for_each_vcpu(i, vcpu, kvm) {
160 vcpu->arch.sie_block->epoch -= *delta;
161 }
162 }
163 return NOTIFY_OK;
164}
165
166static struct notifier_block kvm_clock_notifier = {
167 .notifier_call = kvm_clock_sync,
168};
169
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100170int kvm_arch_hardware_setup(void)
171{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200172 gmap_notifier.notifier_call = kvm_gmap_notifier;
173 gmap_register_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200174 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
175 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100176 return 0;
177}
178
179void kvm_arch_hardware_unsetup(void)
180{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200181 gmap_unregister_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200182 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
183 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100184}
185
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100186int kvm_arch_init(void *opaque)
187{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200188 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
189 if (!kvm_s390_dbf)
190 return -ENOMEM;
191
192 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
193 debug_unregister(kvm_s390_dbf);
194 return -ENOMEM;
195 }
196
Cornelia Huck84877d92014-09-02 10:27:35 +0100197 /* Register floating interrupt controller interface. */
198 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100199}
200
Christian Borntraeger78f26132015-07-22 15:50:58 +0200201void kvm_arch_exit(void)
202{
203 debug_unregister(kvm_s390_dbf);
204}
205
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100206/* Section: device related */
207long kvm_arch_dev_ioctl(struct file *filp,
208 unsigned int ioctl, unsigned long arg)
209{
210 if (ioctl == KVM_S390_ENABLE_SIE)
211 return s390_enable_sie();
212 return -EINVAL;
213}
214
Alexander Graf784aa3d2014-07-14 18:27:35 +0200215int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100216{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100217 int r;
218
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200219 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100220 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200221 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100222 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100223#ifdef CONFIG_KVM_S390_UCONTROL
224 case KVM_CAP_S390_UCONTROL:
225#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200226 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100227 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200228 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100229 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100230 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100231 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200232 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200233 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200234 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200235 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200236 case KVM_CAP_MP_STATE:
Jens Freimann47b43c52014-11-11 20:57:06 +0100237 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200238 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100239 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400240 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100241 case KVM_CAP_S390_IRQ_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100242 r = 1;
243 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100244 case KVM_CAP_S390_MEM_OP:
245 r = MEM_OP_MAX_SIZE;
246 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200247 case KVM_CAP_NR_VCPUS:
248 case KVM_CAP_MAX_VCPUS:
249 r = KVM_MAX_VCPUS;
250 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100251 case KVM_CAP_NR_MEMSLOTS:
252 r = KVM_USER_MEM_SLOTS;
253 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200254 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100255 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200256 break;
Eric Farman68c55752014-06-09 10:57:26 -0400257 case KVM_CAP_S390_VECTOR_REGISTERS:
258 r = MACHINE_HAS_VX;
259 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200260 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100261 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200262 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100263 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100264}
265
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400266static void kvm_s390_sync_dirty_log(struct kvm *kvm,
267 struct kvm_memory_slot *memslot)
268{
269 gfn_t cur_gfn, last_gfn;
270 unsigned long address;
271 struct gmap *gmap = kvm->arch.gmap;
272
273 down_read(&gmap->mm->mmap_sem);
274 /* Loop over all guest pages */
275 last_gfn = memslot->base_gfn + memslot->npages;
276 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
277 address = gfn_to_hva_memslot(memslot, cur_gfn);
278
279 if (gmap_test_and_clear_dirty(address, gmap))
280 mark_page_dirty(kvm, cur_gfn);
281 }
282 up_read(&gmap->mm->mmap_sem);
283}
284
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100285/* Section: vm related */
286/*
287 * Get (and clear) the dirty memory log for a memory slot.
288 */
289int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
290 struct kvm_dirty_log *log)
291{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400292 int r;
293 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200294 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400295 struct kvm_memory_slot *memslot;
296 int is_dirty = 0;
297
298 mutex_lock(&kvm->slots_lock);
299
300 r = -EINVAL;
301 if (log->slot >= KVM_USER_MEM_SLOTS)
302 goto out;
303
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200304 slots = kvm_memslots(kvm);
305 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400306 r = -ENOENT;
307 if (!memslot->dirty_bitmap)
308 goto out;
309
310 kvm_s390_sync_dirty_log(kvm, memslot);
311 r = kvm_get_dirty_log(kvm, log, &is_dirty);
312 if (r)
313 goto out;
314
315 /* Clear the dirty log */
316 if (is_dirty) {
317 n = kvm_dirty_bitmap_bytes(memslot);
318 memset(memslot->dirty_bitmap, 0, n);
319 }
320 r = 0;
321out:
322 mutex_unlock(&kvm->slots_lock);
323 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100324}
325
Cornelia Huckd938dc52013-10-23 18:26:34 +0200326static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
327{
328 int r;
329
330 if (cap->flags)
331 return -EINVAL;
332
333 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200334 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200335 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200336 kvm->arch.use_irqchip = 1;
337 r = 0;
338 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200339 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200340 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200341 kvm->arch.user_sigp = 1;
342 r = 0;
343 break;
Eric Farman68c55752014-06-09 10:57:26 -0400344 case KVM_CAP_S390_VECTOR_REGISTERS:
Michael Mueller18280d82015-03-16 16:05:41 +0100345 if (MACHINE_HAS_VX) {
346 set_kvm_facility(kvm->arch.model.fac->mask, 129);
347 set_kvm_facility(kvm->arch.model.fac->list, 129);
348 r = 0;
349 } else
350 r = -EINVAL;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200351 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
352 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400353 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100354 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200355 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100356 kvm->arch.user_stsi = 1;
357 r = 0;
358 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200359 default:
360 r = -EINVAL;
361 break;
362 }
363 return r;
364}
365
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100366static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
367{
368 int ret;
369
370 switch (attr->attr) {
371 case KVM_S390_VM_MEM_LIMIT_SIZE:
372 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200373 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
374 kvm->arch.gmap->asce_end);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100375 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
376 ret = -EFAULT;
377 break;
378 default:
379 ret = -ENXIO;
380 break;
381 }
382 return ret;
383}
384
385static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200386{
387 int ret;
388 unsigned int idx;
389 switch (attr->attr) {
390 case KVM_S390_VM_MEM_ENABLE_CMMA:
Dominik Dingele6db1d62015-05-07 15:41:57 +0200391 /* enable CMMA only for z10 and later (EDAT_1) */
392 ret = -EINVAL;
393 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
394 break;
395
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200396 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200397 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200398 mutex_lock(&kvm->lock);
399 if (atomic_read(&kvm->online_vcpus) == 0) {
400 kvm->arch.use_cmma = 1;
401 ret = 0;
402 }
403 mutex_unlock(&kvm->lock);
404 break;
405 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingelc3489152015-06-18 13:17:11 +0200406 ret = -EINVAL;
407 if (!kvm->arch.use_cmma)
408 break;
409
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200410 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200411 mutex_lock(&kvm->lock);
412 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200413 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200414 srcu_read_unlock(&kvm->srcu, idx);
415 mutex_unlock(&kvm->lock);
416 ret = 0;
417 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100418 case KVM_S390_VM_MEM_LIMIT_SIZE: {
419 unsigned long new_limit;
420
421 if (kvm_is_ucontrol(kvm))
422 return -EINVAL;
423
424 if (get_user(new_limit, (u64 __user *)attr->addr))
425 return -EFAULT;
426
427 if (new_limit > kvm->arch.gmap->asce_end)
428 return -E2BIG;
429
430 ret = -EBUSY;
431 mutex_lock(&kvm->lock);
432 if (atomic_read(&kvm->online_vcpus) == 0) {
433 /* gmap_alloc will round the limit up */
434 struct gmap *new = gmap_alloc(current->mm, new_limit);
435
436 if (!new) {
437 ret = -ENOMEM;
438 } else {
439 gmap_free(kvm->arch.gmap);
440 new->private = kvm;
441 kvm->arch.gmap = new;
442 ret = 0;
443 }
444 }
445 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200446 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100447 break;
448 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200449 default:
450 ret = -ENXIO;
451 break;
452 }
453 return ret;
454}
455
Tony Krowiaka374e892014-09-03 10:13:53 +0200456static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
457
458static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
459{
460 struct kvm_vcpu *vcpu;
461 int i;
462
Michael Mueller9d8d5782015-02-02 15:42:51 +0100463 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200464 return -EINVAL;
465
466 mutex_lock(&kvm->lock);
467 switch (attr->attr) {
468 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
469 get_random_bytes(
470 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
471 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
472 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200473 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200474 break;
475 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
476 get_random_bytes(
477 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
478 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
479 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200480 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200481 break;
482 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
483 kvm->arch.crypto.aes_kw = 0;
484 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
485 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200486 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200487 break;
488 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
489 kvm->arch.crypto.dea_kw = 0;
490 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
491 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200492 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200493 break;
494 default:
495 mutex_unlock(&kvm->lock);
496 return -ENXIO;
497 }
498
499 kvm_for_each_vcpu(i, vcpu, kvm) {
500 kvm_s390_vcpu_crypto_setup(vcpu);
501 exit_sie(vcpu);
502 }
503 mutex_unlock(&kvm->lock);
504 return 0;
505}
506
Jason J. Herne72f25022014-11-25 09:46:02 -0500507static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
508{
509 u8 gtod_high;
510
511 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
512 sizeof(gtod_high)))
513 return -EFAULT;
514
515 if (gtod_high != 0)
516 return -EINVAL;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200517 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500518
519 return 0;
520}
521
522static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
523{
524 struct kvm_vcpu *cur_vcpu;
525 unsigned int vcpu_idx;
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200526 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500527
528 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
529 return -EFAULT;
530
Jason J. Herne72f25022014-11-25 09:46:02 -0500531 mutex_lock(&kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +0200532 preempt_disable();
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200533 kvm->arch.epoch = gtod - get_tod_clock();
Christian Borntraeger27406cd2015-04-14 12:17:34 +0200534 kvm_s390_vcpu_block_all(kvm);
535 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
Jason J. Herne72f25022014-11-25 09:46:02 -0500536 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
Christian Borntraeger27406cd2015-04-14 12:17:34 +0200537 kvm_s390_vcpu_unblock_all(kvm);
Fan Zhangfdf03652015-05-13 10:58:41 +0200538 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -0500539 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200540 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500541 return 0;
542}
543
544static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
545{
546 int ret;
547
548 if (attr->flags)
549 return -EINVAL;
550
551 switch (attr->attr) {
552 case KVM_S390_VM_TOD_HIGH:
553 ret = kvm_s390_set_tod_high(kvm, attr);
554 break;
555 case KVM_S390_VM_TOD_LOW:
556 ret = kvm_s390_set_tod_low(kvm, attr);
557 break;
558 default:
559 ret = -ENXIO;
560 break;
561 }
562 return ret;
563}
564
565static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
566{
567 u8 gtod_high = 0;
568
569 if (copy_to_user((void __user *)attr->addr, &gtod_high,
570 sizeof(gtod_high)))
571 return -EFAULT;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200572 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500573
574 return 0;
575}
576
577static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
578{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200579 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500580
Fan Zhangfdf03652015-05-13 10:58:41 +0200581 preempt_disable();
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200582 gtod = get_tod_clock() + kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +0200583 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -0500584 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
585 return -EFAULT;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200586 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500587
588 return 0;
589}
590
591static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
592{
593 int ret;
594
595 if (attr->flags)
596 return -EINVAL;
597
598 switch (attr->attr) {
599 case KVM_S390_VM_TOD_HIGH:
600 ret = kvm_s390_get_tod_high(kvm, attr);
601 break;
602 case KVM_S390_VM_TOD_LOW:
603 ret = kvm_s390_get_tod_low(kvm, attr);
604 break;
605 default:
606 ret = -ENXIO;
607 break;
608 }
609 return ret;
610}
611
Michael Mueller658b6ed2015-02-02 15:49:35 +0100612static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
613{
614 struct kvm_s390_vm_cpu_processor *proc;
615 int ret = 0;
616
617 mutex_lock(&kvm->lock);
618 if (atomic_read(&kvm->online_vcpus)) {
619 ret = -EBUSY;
620 goto out;
621 }
622 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
623 if (!proc) {
624 ret = -ENOMEM;
625 goto out;
626 }
627 if (!copy_from_user(proc, (void __user *)attr->addr,
628 sizeof(*proc))) {
629 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
630 sizeof(struct cpuid));
631 kvm->arch.model.ibc = proc->ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100632 memcpy(kvm->arch.model.fac->list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100633 S390_ARCH_FAC_LIST_SIZE_BYTE);
634 } else
635 ret = -EFAULT;
636 kfree(proc);
637out:
638 mutex_unlock(&kvm->lock);
639 return ret;
640}
641
642static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
643{
644 int ret = -ENXIO;
645
646 switch (attr->attr) {
647 case KVM_S390_VM_CPU_PROCESSOR:
648 ret = kvm_s390_set_processor(kvm, attr);
649 break;
650 }
651 return ret;
652}
653
654static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
655{
656 struct kvm_s390_vm_cpu_processor *proc;
657 int ret = 0;
658
659 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
660 if (!proc) {
661 ret = -ENOMEM;
662 goto out;
663 }
664 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
665 proc->ibc = kvm->arch.model.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100666 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100667 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
668 ret = -EFAULT;
669 kfree(proc);
670out:
671 return ret;
672}
673
674static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
675{
676 struct kvm_s390_vm_cpu_machine *mach;
677 int ret = 0;
678
679 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
680 if (!mach) {
681 ret = -ENOMEM;
682 goto out;
683 }
684 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200685 mach->ibc = sclp.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100686 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
687 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100688 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100689 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100690 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
691 ret = -EFAULT;
692 kfree(mach);
693out:
694 return ret;
695}
696
697static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
698{
699 int ret = -ENXIO;
700
701 switch (attr->attr) {
702 case KVM_S390_VM_CPU_PROCESSOR:
703 ret = kvm_s390_get_processor(kvm, attr);
704 break;
705 case KVM_S390_VM_CPU_MACHINE:
706 ret = kvm_s390_get_machine(kvm, attr);
707 break;
708 }
709 return ret;
710}
711
Dominik Dingelf2061652014-04-09 13:13:00 +0200712static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
713{
714 int ret;
715
716 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200717 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100718 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200719 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500720 case KVM_S390_VM_TOD:
721 ret = kvm_s390_set_tod(kvm, attr);
722 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100723 case KVM_S390_VM_CPU_MODEL:
724 ret = kvm_s390_set_cpu_model(kvm, attr);
725 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200726 case KVM_S390_VM_CRYPTO:
727 ret = kvm_s390_vm_set_crypto(kvm, attr);
728 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200729 default:
730 ret = -ENXIO;
731 break;
732 }
733
734 return ret;
735}
736
737static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
738{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100739 int ret;
740
741 switch (attr->group) {
742 case KVM_S390_VM_MEM_CTRL:
743 ret = kvm_s390_get_mem_control(kvm, attr);
744 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500745 case KVM_S390_VM_TOD:
746 ret = kvm_s390_get_tod(kvm, attr);
747 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100748 case KVM_S390_VM_CPU_MODEL:
749 ret = kvm_s390_get_cpu_model(kvm, attr);
750 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100751 default:
752 ret = -ENXIO;
753 break;
754 }
755
756 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200757}
758
759static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
760{
761 int ret;
762
763 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200764 case KVM_S390_VM_MEM_CTRL:
765 switch (attr->attr) {
766 case KVM_S390_VM_MEM_ENABLE_CMMA:
767 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100768 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200769 ret = 0;
770 break;
771 default:
772 ret = -ENXIO;
773 break;
774 }
775 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500776 case KVM_S390_VM_TOD:
777 switch (attr->attr) {
778 case KVM_S390_VM_TOD_LOW:
779 case KVM_S390_VM_TOD_HIGH:
780 ret = 0;
781 break;
782 default:
783 ret = -ENXIO;
784 break;
785 }
786 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100787 case KVM_S390_VM_CPU_MODEL:
788 switch (attr->attr) {
789 case KVM_S390_VM_CPU_PROCESSOR:
790 case KVM_S390_VM_CPU_MACHINE:
791 ret = 0;
792 break;
793 default:
794 ret = -ENXIO;
795 break;
796 }
797 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200798 case KVM_S390_VM_CRYPTO:
799 switch (attr->attr) {
800 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
801 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
802 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
803 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
804 ret = 0;
805 break;
806 default:
807 ret = -ENXIO;
808 break;
809 }
810 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200811 default:
812 ret = -ENXIO;
813 break;
814 }
815
816 return ret;
817}
818
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400819static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
820{
821 uint8_t *keys;
822 uint64_t hva;
823 unsigned long curkey;
824 int i, r = 0;
825
826 if (args->flags != 0)
827 return -EINVAL;
828
829 /* Is this guest using storage keys? */
830 if (!mm_use_skey(current->mm))
831 return KVM_S390_GET_SKEYS_NONE;
832
833 /* Enforce sane limit on memory allocation */
834 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
835 return -EINVAL;
836
837 keys = kmalloc_array(args->count, sizeof(uint8_t),
838 GFP_KERNEL | __GFP_NOWARN);
839 if (!keys)
840 keys = vmalloc(sizeof(uint8_t) * args->count);
841 if (!keys)
842 return -ENOMEM;
843
844 for (i = 0; i < args->count; i++) {
845 hva = gfn_to_hva(kvm, args->start_gfn + i);
846 if (kvm_is_error_hva(hva)) {
847 r = -EFAULT;
848 goto out;
849 }
850
851 curkey = get_guest_storage_key(current->mm, hva);
852 if (IS_ERR_VALUE(curkey)) {
853 r = curkey;
854 goto out;
855 }
856 keys[i] = curkey;
857 }
858
859 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
860 sizeof(uint8_t) * args->count);
861 if (r)
862 r = -EFAULT;
863out:
864 kvfree(keys);
865 return r;
866}
867
868static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
869{
870 uint8_t *keys;
871 uint64_t hva;
872 int i, r = 0;
873
874 if (args->flags != 0)
875 return -EINVAL;
876
877 /* Enforce sane limit on memory allocation */
878 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
879 return -EINVAL;
880
881 keys = kmalloc_array(args->count, sizeof(uint8_t),
882 GFP_KERNEL | __GFP_NOWARN);
883 if (!keys)
884 keys = vmalloc(sizeof(uint8_t) * args->count);
885 if (!keys)
886 return -ENOMEM;
887
888 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
889 sizeof(uint8_t) * args->count);
890 if (r) {
891 r = -EFAULT;
892 goto out;
893 }
894
895 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +0200896 r = s390_enable_skey();
897 if (r)
898 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400899
900 for (i = 0; i < args->count; i++) {
901 hva = gfn_to_hva(kvm, args->start_gfn + i);
902 if (kvm_is_error_hva(hva)) {
903 r = -EFAULT;
904 goto out;
905 }
906
907 /* Lowest order bit is reserved */
908 if (keys[i] & 0x01) {
909 r = -EINVAL;
910 goto out;
911 }
912
913 r = set_guest_storage_key(current->mm, hva,
914 (unsigned long)keys[i], 0);
915 if (r)
916 goto out;
917 }
918out:
919 kvfree(keys);
920 return r;
921}
922
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100923long kvm_arch_vm_ioctl(struct file *filp,
924 unsigned int ioctl, unsigned long arg)
925{
926 struct kvm *kvm = filp->private_data;
927 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200928 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100929 int r;
930
931 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100932 case KVM_S390_INTERRUPT: {
933 struct kvm_s390_interrupt s390int;
934
935 r = -EFAULT;
936 if (copy_from_user(&s390int, argp, sizeof(s390int)))
937 break;
938 r = kvm_s390_inject_vm(kvm, &s390int);
939 break;
940 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200941 case KVM_ENABLE_CAP: {
942 struct kvm_enable_cap cap;
943 r = -EFAULT;
944 if (copy_from_user(&cap, argp, sizeof(cap)))
945 break;
946 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
947 break;
948 }
Cornelia Huck84223592013-07-15 13:36:01 +0200949 case KVM_CREATE_IRQCHIP: {
950 struct kvm_irq_routing_entry routing;
951
952 r = -EINVAL;
953 if (kvm->arch.use_irqchip) {
954 /* Set up dummy routing. */
955 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -0400956 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +0200957 }
958 break;
959 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200960 case KVM_SET_DEVICE_ATTR: {
961 r = -EFAULT;
962 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
963 break;
964 r = kvm_s390_vm_set_attr(kvm, &attr);
965 break;
966 }
967 case KVM_GET_DEVICE_ATTR: {
968 r = -EFAULT;
969 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
970 break;
971 r = kvm_s390_vm_get_attr(kvm, &attr);
972 break;
973 }
974 case KVM_HAS_DEVICE_ATTR: {
975 r = -EFAULT;
976 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
977 break;
978 r = kvm_s390_vm_has_attr(kvm, &attr);
979 break;
980 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400981 case KVM_S390_GET_SKEYS: {
982 struct kvm_s390_skeys args;
983
984 r = -EFAULT;
985 if (copy_from_user(&args, argp,
986 sizeof(struct kvm_s390_skeys)))
987 break;
988 r = kvm_s390_get_skeys(kvm, &args);
989 break;
990 }
991 case KVM_S390_SET_SKEYS: {
992 struct kvm_s390_skeys args;
993
994 r = -EFAULT;
995 if (copy_from_user(&args, argp,
996 sizeof(struct kvm_s390_skeys)))
997 break;
998 r = kvm_s390_set_skeys(kvm, &args);
999 break;
1000 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001001 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001002 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001003 }
1004
1005 return r;
1006}
1007
Tony Krowiak45c9b472015-01-13 11:33:26 -05001008static int kvm_s390_query_ap_config(u8 *config)
1009{
1010 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001011 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001012
Christian Borntraeger86044c82015-02-26 13:53:47 +01001013 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001014 asm volatile(
1015 "lgr 0,%1\n"
1016 "lgr 2,%2\n"
1017 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001018 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001019 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001020 "1:\n"
1021 EX_TABLE(0b, 1b)
1022 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001023 : "r" (fcn_code), "r" (config)
1024 : "cc", "0", "2", "memory"
1025 );
1026
1027 return cc;
1028}
1029
1030static int kvm_s390_apxa_installed(void)
1031{
1032 u8 config[128];
1033 int cc;
1034
1035 if (test_facility(2) && test_facility(12)) {
1036 cc = kvm_s390_query_ap_config(config);
1037
1038 if (cc)
1039 pr_err("PQAP(QCI) failed with cc=%d", cc);
1040 else
1041 return config[0] & 0x40;
1042 }
1043
1044 return 0;
1045}
1046
1047static void kvm_s390_set_crycb_format(struct kvm *kvm)
1048{
1049 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1050
1051 if (kvm_s390_apxa_installed())
1052 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1053 else
1054 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1055}
1056
Michael Mueller9d8d5782015-02-02 15:42:51 +01001057static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1058{
1059 get_cpu_id(cpu_id);
1060 cpu_id->version = 0xff;
1061}
1062
Tony Krowiak5102ee82014-06-27 14:46:01 -04001063static int kvm_s390_crypto_init(struct kvm *kvm)
1064{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001065 if (!test_kvm_facility(kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001066 return 0;
1067
1068 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1069 GFP_KERNEL | GFP_DMA);
1070 if (!kvm->arch.crypto.crycb)
1071 return -ENOMEM;
1072
Tony Krowiak45c9b472015-01-13 11:33:26 -05001073 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001074
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001075 /* Enable AES/DEA protected key functions by default */
1076 kvm->arch.crypto.aes_kw = 1;
1077 kvm->arch.crypto.dea_kw = 1;
1078 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1079 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1080 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1081 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiaka374e892014-09-03 10:13:53 +02001082
Tony Krowiak5102ee82014-06-27 14:46:01 -04001083 return 0;
1084}
1085
Carsten Ottee08b9632012-01-04 10:25:20 +01001086int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001087{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001088 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001089 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001090 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001091
Carsten Ottee08b9632012-01-04 10:25:20 +01001092 rc = -EINVAL;
1093#ifdef CONFIG_KVM_S390_UCONTROL
1094 if (type & ~KVM_VM_S390_UCONTROL)
1095 goto out_err;
1096 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1097 goto out_err;
1098#else
1099 if (type)
1100 goto out_err;
1101#endif
1102
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001103 rc = s390_enable_sie();
1104 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001105 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001106
Carsten Otteb2904112011-10-18 12:27:13 +02001107 rc = -ENOMEM;
1108
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001109 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1110 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001111 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001112 spin_lock(&kvm_lock);
1113 sca_offset = (sca_offset + 16) & 0x7f0;
1114 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1115 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001116
1117 sprintf(debug_name, "kvm-%u", current->pid);
1118
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001119 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001120 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001121 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001122
Michael Mueller9d8d5782015-02-02 15:42:51 +01001123 /*
1124 * The architectural maximum amount of facilities is 16 kbit. To store
1125 * this amount, 2 kbyte of memory is required. Thus we need a full
Michael Mueller981467c2015-02-24 13:51:04 +01001126 * page to hold the guest facility list (arch.model.fac->list) and the
1127 * facility mask (arch.model.fac->mask). Its address size has to be
Michael Mueller9d8d5782015-02-02 15:42:51 +01001128 * 31 bits and word aligned.
1129 */
1130 kvm->arch.model.fac =
Michael Mueller981467c2015-02-24 13:51:04 +01001131 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001132 if (!kvm->arch.model.fac)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001133 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001134
Michael Muellerfb5bf932015-02-27 14:25:10 +01001135 /* Populate the facility mask initially. */
Michael Mueller981467c2015-02-24 13:51:04 +01001136 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001137 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001138 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1139 if (i < kvm_s390_fac_list_mask_size())
Michael Mueller981467c2015-02-24 13:51:04 +01001140 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001141 else
Michael Mueller981467c2015-02-24 13:51:04 +01001142 kvm->arch.model.fac->mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001143 }
1144
Michael Mueller981467c2015-02-24 13:51:04 +01001145 /* Populate the facility list initially. */
1146 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1147 S390_ARCH_FAC_LIST_SIZE_BYTE);
1148
Michael Mueller9d8d5782015-02-02 15:42:51 +01001149 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001150 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001151
Tony Krowiak5102ee82014-06-27 14:46:01 -04001152 if (kvm_s390_crypto_init(kvm) < 0)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001153 goto out_err;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001154
Carsten Otteba5c1e92008-03-25 18:47:26 +01001155 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001156 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1157 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001158 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001159 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001160
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001161 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001162 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001163
Carsten Ottee08b9632012-01-04 10:25:20 +01001164 if (type & KVM_VM_S390_UCONTROL) {
1165 kvm->arch.gmap = NULL;
1166 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +02001167 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001168 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001169 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001170 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001171 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001172 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001173
1174 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001175 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001176 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001177
David Hildenbrand8ad35752014-03-14 11:00:21 +01001178 spin_lock_init(&kvm->arch.start_stop_lock);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001179 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001180
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001181 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001182out_err:
Dominik Dingel40f5b732015-03-12 13:55:53 +01001183 kfree(kvm->arch.crypto.crycb);
1184 free_page((unsigned long)kvm->arch.model.fac);
1185 debug_unregister(kvm->arch.dbf);
1186 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraeger78f26132015-07-22 15:50:58 +02001187 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001188 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001189}
1190
Christian Borntraegerd329c032008-11-26 14:50:27 +01001191void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1192{
1193 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001194 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001195 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001196 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +01001197 if (!kvm_is_ucontrol(vcpu->kvm)) {
1198 clear_bit(63 - vcpu->vcpu_id,
1199 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1200 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1201 (__u64) vcpu->arch.sie_block)
1202 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1203 }
Carsten Otteabf4a712009-05-12 17:21:51 +02001204 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +01001205
1206 if (kvm_is_ucontrol(vcpu->kvm))
1207 gmap_free(vcpu->arch.gmap);
1208
Dominik Dingele6db1d62015-05-07 15:41:57 +02001209 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001210 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001211 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001212
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001213 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001214 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001215}
1216
1217static void kvm_free_vcpus(struct kvm *kvm)
1218{
1219 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001220 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001221
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001222 kvm_for_each_vcpu(i, vcpu, kvm)
1223 kvm_arch_vcpu_destroy(vcpu);
1224
1225 mutex_lock(&kvm->lock);
1226 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1227 kvm->vcpus[i] = NULL;
1228
1229 atomic_set(&kvm->online_vcpus, 0);
1230 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001231}
1232
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001233void kvm_arch_destroy_vm(struct kvm *kvm)
1234{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001235 kvm_free_vcpus(kvm);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001236 free_page((unsigned long)kvm->arch.model.fac);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001237 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +01001238 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001239 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +01001240 if (!kvm_is_ucontrol(kvm))
1241 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001242 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001243 kvm_s390_clear_float_irqs(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001244 KVM_EVENT(3, "vm 0x%p destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001245}
1246
1247/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001248static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1249{
1250 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1251 if (!vcpu->arch.gmap)
1252 return -ENOMEM;
1253 vcpu->arch.gmap->private = vcpu->kvm;
1254
1255 return 0;
1256}
1257
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001258int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1259{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001260 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1261 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001262 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1263 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001264 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001265 KVM_SYNC_CRS |
1266 KVM_SYNC_ARCH0 |
1267 KVM_SYNC_PFAULT;
Eric Farman68c55752014-06-09 10:57:26 -04001268 if (test_kvm_facility(vcpu->kvm, 129))
1269 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001270
1271 if (kvm_is_ucontrol(vcpu->kvm))
1272 return __kvm_ucontrol_vcpu_init(vcpu);
1273
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001274 return 0;
1275}
1276
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001277/*
1278 * Backs up the current FP/VX register save area on a particular
1279 * destination. Used to switch between different register save
1280 * areas.
1281 */
1282static inline void save_fpu_to(struct fpu *dst)
1283{
1284 dst->fpc = current->thread.fpu.fpc;
1285 dst->flags = current->thread.fpu.flags;
1286 dst->regs = current->thread.fpu.regs;
1287}
1288
1289/*
1290 * Switches the FP/VX register save area from which to lazy
1291 * restore register contents.
1292 */
1293static inline void load_fpu_from(struct fpu *from)
1294{
1295 current->thread.fpu.fpc = from->fpc;
1296 current->thread.fpu.flags = from->flags;
1297 current->thread.fpu.regs = from->regs;
1298}
1299
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001300void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1301{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001302 /* Save host register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001303 save_fpu_regs();
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001304 save_fpu_to(&vcpu->arch.host_fpregs);
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001305
Michael Mueller18280d82015-03-16 16:05:41 +01001306 if (test_kvm_facility(vcpu->kvm, 129)) {
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001307 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1308 current->thread.fpu.flags = FPU_USE_VX;
1309 /*
1310 * Use the register save area in the SIE-control block
1311 * for register restore and save in kvm_arch_vcpu_put()
1312 */
1313 current->thread.fpu.vxrs =
1314 (__vector128 *)&vcpu->run->s.regs.vrs;
1315 /* Always enable the vector extension for KVM */
1316 __ctl_set_vx();
1317 } else
1318 load_fpu_from(&vcpu->arch.guest_fpregs);
1319
1320 if (test_fp_ctl(current->thread.fpu.fpc))
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001321 /* User space provided an invalid FPC, let's clear it */
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001322 current->thread.fpu.fpc = 0;
1323
1324 save_access_regs(vcpu->arch.host_acrs);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001325 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001326 gmap_enable(vcpu->arch.gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001327 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001328}
1329
1330void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1331{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001332 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001333 gmap_disable(vcpu->arch.gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001334
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001335 save_fpu_regs();
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001336
Michael Mueller18280d82015-03-16 16:05:41 +01001337 if (test_kvm_facility(vcpu->kvm, 129))
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001338 /*
1339 * kvm_arch_vcpu_load() set up the register save area to
1340 * the &vcpu->run->s.regs.vrs and, thus, the vector registers
1341 * are already saved. Only the floating-point control must be
1342 * copied.
1343 */
1344 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Eric Farman68c55752014-06-09 10:57:26 -04001345 else
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001346 save_fpu_to(&vcpu->arch.guest_fpregs);
1347 load_fpu_from(&vcpu->arch.host_fpregs);
1348
1349 save_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001350 restore_access_regs(vcpu->arch.host_acrs);
1351}
1352
1353static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1354{
1355 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1356 vcpu->arch.sie_block->gpsw.mask = 0UL;
1357 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001358 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001359 vcpu->arch.sie_block->cputm = 0UL;
1360 vcpu->arch.sie_block->ckc = 0UL;
1361 vcpu->arch.sie_block->todpr = 0;
1362 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1363 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1364 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1365 vcpu->arch.guest_fpregs.fpc = 0;
1366 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1367 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001368 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001369 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1370 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001371 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1372 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001373 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001374}
1375
Dominik Dingel31928aa2014-12-04 15:47:07 +01001376void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001377{
Jason J. Herne72f25022014-11-25 09:46:02 -05001378 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001379 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001380 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001381 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001382 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001383 if (!kvm_is_ucontrol(vcpu->kvm))
1384 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001385}
1386
Tony Krowiak5102ee82014-06-27 14:46:01 -04001387static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1388{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001389 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001390 return;
1391
Tony Krowiaka374e892014-09-03 10:13:53 +02001392 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1393
1394 if (vcpu->kvm->arch.crypto.aes_kw)
1395 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1396 if (vcpu->kvm->arch.crypto.dea_kw)
1397 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1398
Tony Krowiak5102ee82014-06-27 14:46:01 -04001399 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1400}
1401
Dominik Dingelb31605c2014-03-25 13:47:11 +01001402void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1403{
1404 free_page(vcpu->arch.sie_block->cbrlo);
1405 vcpu->arch.sie_block->cbrlo = 0;
1406}
1407
1408int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1409{
1410 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1411 if (!vcpu->arch.sie_block->cbrlo)
1412 return -ENOMEM;
1413
1414 vcpu->arch.sie_block->ecb2 |= 0x80;
1415 vcpu->arch.sie_block->ecb2 &= ~0x08;
1416 return 0;
1417}
1418
Michael Mueller91520f12015-02-27 14:32:11 +01001419static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1420{
1421 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1422
1423 vcpu->arch.cpu_id = model->cpu_id;
1424 vcpu->arch.sie_block->ibc = model->ibc;
1425 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1426}
1427
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001428int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1429{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001430 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001431
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001432 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1433 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001434 CPUSTAT_STOPPED);
1435
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001436 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001437 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001438 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001439 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001440
Michael Mueller91520f12015-02-27 14:32:11 +01001441 kvm_s390_vcpu_setup_model(vcpu);
1442
Christian Borntraegerfc345312010-06-17 23:16:20 +02001443 vcpu->arch.sie_block->ecb = 6;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001444 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001445 vcpu->arch.sie_block->ecb |= 0x10;
1446
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001447 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001448 vcpu->arch.sie_block->eca = 0xC1002000U;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001449 if (sclp.has_siif)
Heiko Carstens217a4402013-12-30 12:54:14 +01001450 vcpu->arch.sie_block->eca |= 1;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001451 if (sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001452 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller18280d82015-03-16 16:05:41 +01001453 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001454 vcpu->arch.sie_block->eca |= 0x00020000;
1455 vcpu->arch.sie_block->ecd |= 0x20000000;
1456 }
Thomas Huth492d8642015-02-10 16:11:01 +01001457 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001458
Dominik Dingele6db1d62015-05-07 15:41:57 +02001459 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01001460 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1461 if (rc)
1462 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001463 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001464 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001465 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001466
Tony Krowiak5102ee82014-06-27 14:46:01 -04001467 kvm_s390_vcpu_crypto_setup(vcpu);
1468
Dominik Dingelb31605c2014-03-25 13:47:11 +01001469 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001470}
1471
1472struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1473 unsigned int id)
1474{
Carsten Otte4d475552011-10-18 12:27:12 +02001475 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001476 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001477 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001478
Carsten Otte4d475552011-10-18 12:27:12 +02001479 if (id >= KVM_MAX_VCPUS)
1480 goto out;
1481
1482 rc = -ENOMEM;
1483
Michael Muellerb110fea2013-06-12 13:54:54 +02001484 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001485 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001486 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001487
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001488 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1489 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001490 goto out_free_cpu;
1491
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001492 vcpu->arch.sie_block = &sie_page->sie_block;
1493 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1494
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001495 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001496 if (!kvm_is_ucontrol(kvm)) {
1497 if (!kvm->arch.sca) {
1498 WARN_ON_ONCE(1);
1499 goto out_free_cpu;
1500 }
1501 if (!kvm->arch.sca->cpu[id].sda)
1502 kvm->arch.sca->cpu[id].sda =
1503 (__u64) vcpu->arch.sie_block;
1504 vcpu->arch.sie_block->scaoh =
1505 (__u32)(((__u64)kvm->arch.sca) >> 32);
1506 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1507 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1508 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001509
Carsten Otteba5c1e92008-03-25 18:47:26 +01001510 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001511 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001512 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001513 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001514
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001515 /*
1516 * Allocate a save area for floating-point registers. If the vector
1517 * extension is available, register contents are saved in the SIE
1518 * control block. The allocated save area is still required in
1519 * particular places, for example, in kvm_s390_vcpu_store_status().
1520 */
1521 vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
1522 GFP_KERNEL);
1523 if (!vcpu->arch.guest_fpregs.fprs) {
1524 rc = -ENOMEM;
1525 goto out_free_sie_block;
1526 }
1527
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001528 rc = kvm_vcpu_init(vcpu, kvm, id);
1529 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001530 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001531 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1532 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001533 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001534
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001535 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001536out_free_sie_block:
1537 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001538out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001539 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001540out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001541 return ERR_PTR(rc);
1542}
1543
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001544int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1545{
David Hildenbrand9a022062014-08-05 17:40:47 +02001546 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001547}
1548
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001549void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001550{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001551 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001552 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001553}
1554
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001555void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001556{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001557 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001558}
1559
Christian Borntraeger8e236542015-04-09 13:49:04 +02001560static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1561{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001562 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001563 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001564}
1565
1566static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1567{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04001568 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001569}
1570
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001571/*
1572 * Kick a guest cpu out of SIE and wait until SIE is not running.
1573 * If the CPU is not running (e.g. waiting as idle) the function will
1574 * return immediately. */
1575void exit_sie(struct kvm_vcpu *vcpu)
1576{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001577 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001578 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1579 cpu_relax();
1580}
1581
Christian Borntraeger8e236542015-04-09 13:49:04 +02001582/* Kick a guest cpu out of SIE to process a request synchronously */
1583void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001584{
Christian Borntraeger8e236542015-04-09 13:49:04 +02001585 kvm_make_request(req, vcpu);
1586 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001587}
1588
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001589static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1590{
1591 int i;
1592 struct kvm *kvm = gmap->private;
1593 struct kvm_vcpu *vcpu;
1594
1595 kvm_for_each_vcpu(i, vcpu, kvm) {
1596 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001597 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001598 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001599 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001600 }
1601 }
1602}
1603
Christoffer Dallb6d33832012-03-08 16:44:24 -05001604int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1605{
1606 /* kvm common code refers to this, but never calls it */
1607 BUG();
1608 return 0;
1609}
1610
Carsten Otte14eebd92012-05-15 14:15:26 +02001611static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1612 struct kvm_one_reg *reg)
1613{
1614 int r = -EINVAL;
1615
1616 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001617 case KVM_REG_S390_TODPR:
1618 r = put_user(vcpu->arch.sie_block->todpr,
1619 (u32 __user *)reg->addr);
1620 break;
1621 case KVM_REG_S390_EPOCHDIFF:
1622 r = put_user(vcpu->arch.sie_block->epoch,
1623 (u64 __user *)reg->addr);
1624 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001625 case KVM_REG_S390_CPU_TIMER:
1626 r = put_user(vcpu->arch.sie_block->cputm,
1627 (u64 __user *)reg->addr);
1628 break;
1629 case KVM_REG_S390_CLOCK_COMP:
1630 r = put_user(vcpu->arch.sie_block->ckc,
1631 (u64 __user *)reg->addr);
1632 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001633 case KVM_REG_S390_PFTOKEN:
1634 r = put_user(vcpu->arch.pfault_token,
1635 (u64 __user *)reg->addr);
1636 break;
1637 case KVM_REG_S390_PFCOMPARE:
1638 r = put_user(vcpu->arch.pfault_compare,
1639 (u64 __user *)reg->addr);
1640 break;
1641 case KVM_REG_S390_PFSELECT:
1642 r = put_user(vcpu->arch.pfault_select,
1643 (u64 __user *)reg->addr);
1644 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001645 case KVM_REG_S390_PP:
1646 r = put_user(vcpu->arch.sie_block->pp,
1647 (u64 __user *)reg->addr);
1648 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001649 case KVM_REG_S390_GBEA:
1650 r = put_user(vcpu->arch.sie_block->gbea,
1651 (u64 __user *)reg->addr);
1652 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001653 default:
1654 break;
1655 }
1656
1657 return r;
1658}
1659
1660static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1661 struct kvm_one_reg *reg)
1662{
1663 int r = -EINVAL;
1664
1665 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001666 case KVM_REG_S390_TODPR:
1667 r = get_user(vcpu->arch.sie_block->todpr,
1668 (u32 __user *)reg->addr);
1669 break;
1670 case KVM_REG_S390_EPOCHDIFF:
1671 r = get_user(vcpu->arch.sie_block->epoch,
1672 (u64 __user *)reg->addr);
1673 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001674 case KVM_REG_S390_CPU_TIMER:
1675 r = get_user(vcpu->arch.sie_block->cputm,
1676 (u64 __user *)reg->addr);
1677 break;
1678 case KVM_REG_S390_CLOCK_COMP:
1679 r = get_user(vcpu->arch.sie_block->ckc,
1680 (u64 __user *)reg->addr);
1681 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001682 case KVM_REG_S390_PFTOKEN:
1683 r = get_user(vcpu->arch.pfault_token,
1684 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001685 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1686 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001687 break;
1688 case KVM_REG_S390_PFCOMPARE:
1689 r = get_user(vcpu->arch.pfault_compare,
1690 (u64 __user *)reg->addr);
1691 break;
1692 case KVM_REG_S390_PFSELECT:
1693 r = get_user(vcpu->arch.pfault_select,
1694 (u64 __user *)reg->addr);
1695 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001696 case KVM_REG_S390_PP:
1697 r = get_user(vcpu->arch.sie_block->pp,
1698 (u64 __user *)reg->addr);
1699 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001700 case KVM_REG_S390_GBEA:
1701 r = get_user(vcpu->arch.sie_block->gbea,
1702 (u64 __user *)reg->addr);
1703 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001704 default:
1705 break;
1706 }
1707
1708 return r;
1709}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001710
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001711static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1712{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001713 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001714 return 0;
1715}
1716
1717int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1718{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001719 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001720 return 0;
1721}
1722
1723int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1724{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001725 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001726 return 0;
1727}
1728
1729int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1730 struct kvm_sregs *sregs)
1731{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001732 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001733 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001734 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001735 return 0;
1736}
1737
1738int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1739 struct kvm_sregs *sregs)
1740{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001741 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001742 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001743 return 0;
1744}
1745
1746int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1747{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001748 if (test_fp_ctl(fpu->fpc))
1749 return -EINVAL;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001750 memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001751 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001752 save_fpu_regs();
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001753 load_fpu_from(&vcpu->arch.guest_fpregs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001754 return 0;
1755}
1756
1757int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1758{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001759 memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001760 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001761 return 0;
1762}
1763
1764static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1765{
1766 int rc = 0;
1767
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001768 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001769 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001770 else {
1771 vcpu->run->psw_mask = psw.mask;
1772 vcpu->run->psw_addr = psw.addr;
1773 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001774 return rc;
1775}
1776
1777int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1778 struct kvm_translation *tr)
1779{
1780 return -EINVAL; /* not implemented yet */
1781}
1782
David Hildenbrand27291e22014-01-23 12:26:52 +01001783#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1784 KVM_GUESTDBG_USE_HW_BP | \
1785 KVM_GUESTDBG_ENABLE)
1786
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001787int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1788 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001789{
David Hildenbrand27291e22014-01-23 12:26:52 +01001790 int rc = 0;
1791
1792 vcpu->guest_debug = 0;
1793 kvm_s390_clear_bp_data(vcpu);
1794
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001795 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001796 return -EINVAL;
1797
1798 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1799 vcpu->guest_debug = dbg->control;
1800 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001801 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01001802
1803 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1804 rc = kvm_s390_import_bp_data(vcpu, dbg);
1805 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001806 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01001807 vcpu->arch.guestdbg.last_bp = 0;
1808 }
1809
1810 if (rc) {
1811 vcpu->guest_debug = 0;
1812 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001813 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01001814 }
1815
1816 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001817}
1818
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001819int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1820 struct kvm_mp_state *mp_state)
1821{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001822 /* CHECK_STOP and LOAD are not supported yet */
1823 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1824 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001825}
1826
1827int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1828 struct kvm_mp_state *mp_state)
1829{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001830 int rc = 0;
1831
1832 /* user space knows about this interface - let it control the state */
1833 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1834
1835 switch (mp_state->mp_state) {
1836 case KVM_MP_STATE_STOPPED:
1837 kvm_s390_vcpu_stop(vcpu);
1838 break;
1839 case KVM_MP_STATE_OPERATING:
1840 kvm_s390_vcpu_start(vcpu);
1841 break;
1842 case KVM_MP_STATE_LOAD:
1843 case KVM_MP_STATE_CHECK_STOP:
1844 /* fall through - CHECK_STOP and LOAD are not supported yet */
1845 default:
1846 rc = -ENXIO;
1847 }
1848
1849 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001850}
1851
David Hildenbrand8ad35752014-03-14 11:00:21 +01001852static bool ibs_enabled(struct kvm_vcpu *vcpu)
1853{
1854 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1855}
1856
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001857static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1858{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001859retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02001860 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02001861 if (!vcpu->requests)
1862 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001863 /*
1864 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1865 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1866 * This ensures that the ipte instruction for this request has
1867 * already finished. We might race against a second unmapper that
1868 * wants to set the blocking bit. Lets just retry the request loop.
1869 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001870 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001871 int rc;
1872 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001873 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001874 PAGE_SIZE * 2);
1875 if (rc)
1876 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001877 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001878 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001879
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001880 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1881 vcpu->arch.sie_block->ihcpu = 0xffff;
1882 goto retry;
1883 }
1884
David Hildenbrand8ad35752014-03-14 11:00:21 +01001885 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1886 if (!ibs_enabled(vcpu)) {
1887 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001888 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01001889 &vcpu->arch.sie_block->cpuflags);
1890 }
1891 goto retry;
1892 }
1893
1894 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1895 if (ibs_enabled(vcpu)) {
1896 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001897 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01001898 &vcpu->arch.sie_block->cpuflags);
1899 }
1900 goto retry;
1901 }
1902
David Hildenbrand0759d062014-05-13 16:54:32 +02001903 /* nothing to do, just clear the request */
1904 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1905
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001906 return 0;
1907}
1908
Thomas Huthfa576c52014-05-06 17:20:16 +02001909/**
1910 * kvm_arch_fault_in_page - fault-in guest page if necessary
1911 * @vcpu: The corresponding virtual cpu
1912 * @gpa: Guest physical address
1913 * @writable: Whether the page should be writable or not
1914 *
1915 * Make sure that a guest page has been faulted-in on the host.
1916 *
1917 * Return: Zero on success, negative error code otherwise.
1918 */
1919long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001920{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001921 return gmap_fault(vcpu->arch.gmap, gpa,
1922 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001923}
1924
Dominik Dingel3c038e62013-10-07 17:11:48 +02001925static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1926 unsigned long token)
1927{
1928 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001929 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001930
1931 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001932 irq.u.ext.ext_params2 = token;
1933 irq.type = KVM_S390_INT_PFAULT_INIT;
1934 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001935 } else {
1936 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001937 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001938 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1939 }
1940}
1941
1942void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1943 struct kvm_async_pf *work)
1944{
1945 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1946 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1947}
1948
1949void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1950 struct kvm_async_pf *work)
1951{
1952 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1953 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1954}
1955
1956void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1957 struct kvm_async_pf *work)
1958{
1959 /* s390 will always inject the page directly */
1960}
1961
1962bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1963{
1964 /*
1965 * s390 will always inject the page directly,
1966 * but we still want check_async_completion to cleanup
1967 */
1968 return true;
1969}
1970
1971static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1972{
1973 hva_t hva;
1974 struct kvm_arch_async_pf arch;
1975 int rc;
1976
1977 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1978 return 0;
1979 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1980 vcpu->arch.pfault_compare)
1981 return 0;
1982 if (psw_extint_disabled(vcpu))
1983 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001984 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001985 return 0;
1986 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1987 return 0;
1988 if (!vcpu->arch.gmap->pfault_enabled)
1989 return 0;
1990
Heiko Carstens81480cc2014-01-01 16:36:07 +01001991 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1992 hva += current->thread.gmap_addr & ~PAGE_MASK;
1993 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001994 return 0;
1995
1996 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1997 return rc;
1998}
1999
Thomas Huth3fb4c402013-09-12 10:33:43 +02002000static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002001{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002002 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002003
Dominik Dingel3c038e62013-10-07 17:11:48 +02002004 /*
2005 * On s390 notifications for arriving pages will be delivered directly
2006 * to the guest but the house keeping for completed pfaults is
2007 * handled outside the worker.
2008 */
2009 kvm_check_async_pf_completion(vcpu);
2010
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002011 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002012
2013 if (need_resched())
2014 schedule();
2015
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002016 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002017 s390_handle_mcck();
2018
Jens Freimann79395032014-04-17 10:10:30 +02002019 if (!kvm_is_ucontrol(vcpu->kvm)) {
2020 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2021 if (rc)
2022 return rc;
2023 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002024
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002025 rc = kvm_s390_handle_requests(vcpu);
2026 if (rc)
2027 return rc;
2028
David Hildenbrand27291e22014-01-23 12:26:52 +01002029 if (guestdbg_enabled(vcpu)) {
2030 kvm_s390_backup_guest_per_regs(vcpu);
2031 kvm_s390_patch_guest_per_regs(vcpu);
2032 }
2033
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002034 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002035 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2036 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2037 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002038
Thomas Huth3fb4c402013-09-12 10:33:43 +02002039 return 0;
2040}
2041
Thomas Huth492d8642015-02-10 16:11:01 +01002042static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2043{
2044 psw_t *psw = &vcpu->arch.sie_block->gpsw;
2045 u8 opcode;
2046 int rc;
2047
2048 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2049 trace_kvm_s390_sie_fault(vcpu);
2050
2051 /*
2052 * We want to inject an addressing exception, which is defined as a
2053 * suppressing or terminating exception. However, since we came here
2054 * by a DAT access exception, the PSW still points to the faulting
2055 * instruction since DAT exceptions are nullifying. So we've got
2056 * to look up the current opcode to get the length of the instruction
2057 * to be able to forward the PSW.
2058 */
Alexander Yarygin8ae04b82015-01-19 13:24:51 +03002059 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
Thomas Huth492d8642015-02-10 16:11:01 +01002060 if (rc)
2061 return kvm_s390_inject_prog_cond(vcpu, rc);
2062 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2063
2064 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2065}
2066
Thomas Huth3fb4c402013-09-12 10:33:43 +02002067static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2068{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002069 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002070
2071 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2072 vcpu->arch.sie_block->icptcode);
2073 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2074
David Hildenbrand27291e22014-01-23 12:26:52 +01002075 if (guestdbg_enabled(vcpu))
2076 kvm_s390_restore_guest_per_regs(vcpu);
2077
Thomas Huth3fb4c402013-09-12 10:33:43 +02002078 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02002079 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002080 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2081 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2082 vcpu->run->s390_ucontrol.trans_exc_code =
2083 current->thread.gmap_addr;
2084 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2085 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002086
2087 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002088 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002089 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02002090 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002091 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02002092 } else {
2093 gpa_t gpa = current->thread.gmap_addr;
2094 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
2095 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002096 }
2097
Thomas Huth492d8642015-02-10 16:11:01 +01002098 if (rc == -1)
2099 rc = vcpu_post_run_fault_in_sie(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002100
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002101 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002102
Thomas Hutha76ccff2013-09-12 10:33:44 +02002103 if (rc == 0) {
2104 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01002105 /* Don't exit for host interrupts. */
2106 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02002107 else
2108 rc = kvm_handle_sie_intercept(vcpu);
2109 }
2110
Thomas Huth3fb4c402013-09-12 10:33:43 +02002111 return rc;
2112}
2113
2114static int __vcpu_run(struct kvm_vcpu *vcpu)
2115{
2116 int rc, exit_reason;
2117
Thomas Huth800c1062013-09-12 10:33:45 +02002118 /*
2119 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2120 * ning the guest), so that memslots (and other stuff) are protected
2121 */
2122 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2123
Thomas Hutha76ccff2013-09-12 10:33:44 +02002124 do {
2125 rc = vcpu_pre_run(vcpu);
2126 if (rc)
2127 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002128
Thomas Huth800c1062013-09-12 10:33:45 +02002129 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002130 /*
2131 * As PF_VCPU will be used in fault handler, between
2132 * guest_enter and guest_exit should be no uaccess.
2133 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002134 local_irq_disable();
2135 __kvm_guest_enter();
2136 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002137 exit_reason = sie64a(vcpu->arch.sie_block,
2138 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002139 local_irq_disable();
2140 __kvm_guest_exit();
2141 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002142 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002143
Thomas Hutha76ccff2013-09-12 10:33:44 +02002144 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002145 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002146
Thomas Huth800c1062013-09-12 10:33:45 +02002147 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002148 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002149}
2150
David Hildenbrandb028ee32014-07-17 10:47:43 +02002151static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2152{
2153 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2154 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2155 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2156 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2157 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2158 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002159 /* some control register changes require a tlb flush */
2160 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002161 }
2162 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2163 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2164 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2165 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2166 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2167 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2168 }
2169 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2170 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2171 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2172 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002173 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2174 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002175 }
2176 kvm_run->kvm_dirty_regs = 0;
2177}
2178
2179static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2180{
2181 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2182 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2183 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2184 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2185 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2186 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2187 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2188 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2189 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2190 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2191 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2192 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2193}
2194
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002195int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2196{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002197 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002198 sigset_t sigsaved;
2199
David Hildenbrand27291e22014-01-23 12:26:52 +01002200 if (guestdbg_exit_pending(vcpu)) {
2201 kvm_s390_prepare_debug_exit(vcpu);
2202 return 0;
2203 }
2204
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002205 if (vcpu->sigset_active)
2206 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2207
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002208 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2209 kvm_s390_vcpu_start(vcpu);
2210 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002211 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002212 vcpu->vcpu_id);
2213 return -EINVAL;
2214 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002215
David Hildenbrandb028ee32014-07-17 10:47:43 +02002216 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002217
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002218 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002219 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002220
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002221 if (signal_pending(current) && !rc) {
2222 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002223 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002224 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002225
David Hildenbrand27291e22014-01-23 12:26:52 +01002226 if (guestdbg_exit_pending(vcpu) && !rc) {
2227 kvm_s390_prepare_debug_exit(vcpu);
2228 rc = 0;
2229 }
2230
Heiko Carstensb8e660b2010-02-26 22:37:41 +01002231 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002232 /* intercept cannot be handled in-kernel, prepare kvm-run */
2233 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2234 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002235 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2236 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2237 rc = 0;
2238 }
2239
2240 if (rc == -EREMOTE) {
2241 /* intercept was handled, but userspace support is needed
2242 * kvm_run has been prepared by the handler */
2243 rc = 0;
2244 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002245
David Hildenbrandb028ee32014-07-17 10:47:43 +02002246 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002247
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002248 if (vcpu->sigset_active)
2249 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2250
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002251 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002252 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002253}
2254
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002255/*
2256 * store status at address
2257 * we use have two special cases:
2258 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2259 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2260 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002261int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002262{
Carsten Otte092670c2011-07-24 10:48:22 +02002263 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02002264 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01002265 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002266 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002267
Heiko Carstensd0bce602014-01-01 16:45:58 +01002268 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2269 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002270 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002271 gpa = SAVE_AREA_BASE;
2272 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2273 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002274 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002275 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2276 }
2277 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2278 vcpu->arch.guest_fpregs.fprs, 128);
2279 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2280 vcpu->run->s.regs.gprs, 128);
2281 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2282 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02002283 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002284 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02002285 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002286 rc |= write_guest_abs(vcpu,
2287 gpa + offsetof(struct save_area, fp_ctrl_reg),
2288 &vcpu->arch.guest_fpregs.fpc, 4);
2289 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2290 &vcpu->arch.sie_block->todpr, 4);
2291 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2292 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002293 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002294 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2295 &clkcomp, 8);
2296 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2297 &vcpu->run->s.regs.acrs, 64);
2298 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2299 &vcpu->arch.sie_block->gcr, 128);
2300 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002301}
2302
Thomas Huthe8798922013-11-06 15:46:33 +01002303int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2304{
2305 /*
2306 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2307 * copying in vcpu load/put. Lets update our copies before we save
2308 * it into the save area
2309 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002310 save_fpu_regs();
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002311 if (test_kvm_facility(vcpu->kvm, 129)) {
2312 /*
2313 * If the vector extension is available, the vector registers
2314 * which overlaps with floating-point registers are saved in
2315 * the SIE-control block. Hence, extract the floating-point
2316 * registers and the FPC value and store them in the
2317 * guest_fpregs structure.
2318 */
2319 WARN_ON(!is_vx_task(current)); /* XXX remove later */
2320 vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
2321 convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
2322 current->thread.fpu.vxrs);
2323 } else
2324 save_fpu_to(&vcpu->arch.guest_fpregs);
Thomas Huthe8798922013-11-06 15:46:33 +01002325 save_access_regs(vcpu->run->s.regs.acrs);
2326
2327 return kvm_s390_store_status_unloaded(vcpu, addr);
2328}
2329
Eric Farmanbc17de72014-04-14 16:01:09 -04002330/*
2331 * store additional status at address
2332 */
2333int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2334 unsigned long gpa)
2335{
2336 /* Only bits 0-53 are used for address formation */
2337 if (!(gpa & ~0x3ff))
2338 return 0;
2339
2340 return write_guest_abs(vcpu, gpa & ~0x3ff,
2341 (void *)&vcpu->run->s.regs.vrs, 512);
2342}
2343
2344int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2345{
2346 if (!test_kvm_facility(vcpu->kvm, 129))
2347 return 0;
2348
2349 /*
2350 * The guest VXRS are in the host VXRs due to the lazy
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002351 * copying in vcpu load/put. We can simply call save_fpu_regs()
2352 * to save the current register state because we are in the
2353 * middle of a load/put cycle.
2354 *
2355 * Let's update our copies before we save it into the save area.
Eric Farmanbc17de72014-04-14 16:01:09 -04002356 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002357 save_fpu_regs();
Eric Farmanbc17de72014-04-14 16:01:09 -04002358
2359 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2360}
2361
David Hildenbrand8ad35752014-03-14 11:00:21 +01002362static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2363{
2364 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002365 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002366}
2367
2368static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2369{
2370 unsigned int i;
2371 struct kvm_vcpu *vcpu;
2372
2373 kvm_for_each_vcpu(i, vcpu, kvm) {
2374 __disable_ibs_on_vcpu(vcpu);
2375 }
2376}
2377
2378static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2379{
2380 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002381 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002382}
2383
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002384void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2385{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002386 int i, online_vcpus, started_vcpus = 0;
2387
2388 if (!is_vcpu_stopped(vcpu))
2389 return;
2390
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002391 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002392 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002393 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002394 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2395
2396 for (i = 0; i < online_vcpus; i++) {
2397 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2398 started_vcpus++;
2399 }
2400
2401 if (started_vcpus == 0) {
2402 /* we're the only active VCPU -> speed it up */
2403 __enable_ibs_on_vcpu(vcpu);
2404 } else if (started_vcpus == 1) {
2405 /*
2406 * As we are starting a second VCPU, we have to disable
2407 * the IBS facility on all VCPUs to remove potentially
2408 * oustanding ENABLE requests.
2409 */
2410 __disable_ibs_on_all_vcpus(vcpu->kvm);
2411 }
2412
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002413 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002414 /*
2415 * Another VCPU might have used IBS while we were offline.
2416 * Let's play safe and flush the VCPU at startup.
2417 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002418 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002419 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002420 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002421}
2422
2423void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2424{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002425 int i, online_vcpus, started_vcpus = 0;
2426 struct kvm_vcpu *started_vcpu = NULL;
2427
2428 if (is_vcpu_stopped(vcpu))
2429 return;
2430
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002431 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002432 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002433 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002434 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2435
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002436 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002437 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002438
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002439 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002440 __disable_ibs_on_vcpu(vcpu);
2441
2442 for (i = 0; i < online_vcpus; i++) {
2443 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2444 started_vcpus++;
2445 started_vcpu = vcpu->kvm->vcpus[i];
2446 }
2447 }
2448
2449 if (started_vcpus == 1) {
2450 /*
2451 * As we only have one VCPU left, we want to enable the
2452 * IBS facility for that VCPU to speed it up.
2453 */
2454 __enable_ibs_on_vcpu(started_vcpu);
2455 }
2456
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002457 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002458 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002459}
2460
Cornelia Huckd6712df2012-12-20 15:32:11 +01002461static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2462 struct kvm_enable_cap *cap)
2463{
2464 int r;
2465
2466 if (cap->flags)
2467 return -EINVAL;
2468
2469 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002470 case KVM_CAP_S390_CSS_SUPPORT:
2471 if (!vcpu->kvm->arch.css_support) {
2472 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02002473 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002474 trace_kvm_s390_enable_css(vcpu->kvm);
2475 }
2476 r = 0;
2477 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002478 default:
2479 r = -EINVAL;
2480 break;
2481 }
2482 return r;
2483}
2484
Thomas Huth41408c282015-02-06 15:01:21 +01002485static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2486 struct kvm_s390_mem_op *mop)
2487{
2488 void __user *uaddr = (void __user *)mop->buf;
2489 void *tmpbuf = NULL;
2490 int r, srcu_idx;
2491 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2492 | KVM_S390_MEMOP_F_CHECK_ONLY;
2493
2494 if (mop->flags & ~supported_flags)
2495 return -EINVAL;
2496
2497 if (mop->size > MEM_OP_MAX_SIZE)
2498 return -E2BIG;
2499
2500 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2501 tmpbuf = vmalloc(mop->size);
2502 if (!tmpbuf)
2503 return -ENOMEM;
2504 }
2505
2506 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2507
2508 switch (mop->op) {
2509 case KVM_S390_MEMOP_LOGICAL_READ:
2510 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2511 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2512 break;
2513 }
2514 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2515 if (r == 0) {
2516 if (copy_to_user(uaddr, tmpbuf, mop->size))
2517 r = -EFAULT;
2518 }
2519 break;
2520 case KVM_S390_MEMOP_LOGICAL_WRITE:
2521 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2522 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2523 break;
2524 }
2525 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2526 r = -EFAULT;
2527 break;
2528 }
2529 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2530 break;
2531 default:
2532 r = -EINVAL;
2533 }
2534
2535 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2536
2537 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2538 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2539
2540 vfree(tmpbuf);
2541 return r;
2542}
2543
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002544long kvm_arch_vcpu_ioctl(struct file *filp,
2545 unsigned int ioctl, unsigned long arg)
2546{
2547 struct kvm_vcpu *vcpu = filp->private_data;
2548 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002549 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002550 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002551
Avi Kivity937366242010-05-13 12:35:17 +03002552 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01002553 case KVM_S390_IRQ: {
2554 struct kvm_s390_irq s390irq;
2555
2556 r = -EFAULT;
2557 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2558 break;
2559 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2560 break;
2561 }
Avi Kivity937366242010-05-13 12:35:17 +03002562 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002563 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002564 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002565
Avi Kivity937366242010-05-13 12:35:17 +03002566 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002567 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03002568 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002569 if (s390int_to_s390irq(&s390int, &s390irq))
2570 return -EINVAL;
2571 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03002572 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002573 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002574 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002575 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002576 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002577 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002578 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002579 case KVM_S390_SET_INITIAL_PSW: {
2580 psw_t psw;
2581
Avi Kivitybc923cc2010-05-13 12:21:46 +03002582 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002583 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002584 break;
2585 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2586 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002587 }
2588 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002589 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2590 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002591 case KVM_SET_ONE_REG:
2592 case KVM_GET_ONE_REG: {
2593 struct kvm_one_reg reg;
2594 r = -EFAULT;
2595 if (copy_from_user(&reg, argp, sizeof(reg)))
2596 break;
2597 if (ioctl == KVM_SET_ONE_REG)
2598 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2599 else
2600 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2601 break;
2602 }
Carsten Otte27e03932012-01-04 10:25:21 +01002603#ifdef CONFIG_KVM_S390_UCONTROL
2604 case KVM_S390_UCAS_MAP: {
2605 struct kvm_s390_ucas_mapping ucasmap;
2606
2607 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2608 r = -EFAULT;
2609 break;
2610 }
2611
2612 if (!kvm_is_ucontrol(vcpu->kvm)) {
2613 r = -EINVAL;
2614 break;
2615 }
2616
2617 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2618 ucasmap.vcpu_addr, ucasmap.length);
2619 break;
2620 }
2621 case KVM_S390_UCAS_UNMAP: {
2622 struct kvm_s390_ucas_mapping ucasmap;
2623
2624 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2625 r = -EFAULT;
2626 break;
2627 }
2628
2629 if (!kvm_is_ucontrol(vcpu->kvm)) {
2630 r = -EINVAL;
2631 break;
2632 }
2633
2634 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2635 ucasmap.length);
2636 break;
2637 }
2638#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002639 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002640 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002641 break;
2642 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002643 case KVM_ENABLE_CAP:
2644 {
2645 struct kvm_enable_cap cap;
2646 r = -EFAULT;
2647 if (copy_from_user(&cap, argp, sizeof(cap)))
2648 break;
2649 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2650 break;
2651 }
Thomas Huth41408c282015-02-06 15:01:21 +01002652 case KVM_S390_MEM_OP: {
2653 struct kvm_s390_mem_op mem_op;
2654
2655 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2656 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2657 else
2658 r = -EFAULT;
2659 break;
2660 }
Jens Freimann816c7662014-11-24 17:13:46 +01002661 case KVM_S390_SET_IRQ_STATE: {
2662 struct kvm_s390_irq_state irq_state;
2663
2664 r = -EFAULT;
2665 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2666 break;
2667 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2668 irq_state.len == 0 ||
2669 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2670 r = -EINVAL;
2671 break;
2672 }
2673 r = kvm_s390_set_irq_state(vcpu,
2674 (void __user *) irq_state.buf,
2675 irq_state.len);
2676 break;
2677 }
2678 case KVM_S390_GET_IRQ_STATE: {
2679 struct kvm_s390_irq_state irq_state;
2680
2681 r = -EFAULT;
2682 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2683 break;
2684 if (irq_state.len == 0) {
2685 r = -EINVAL;
2686 break;
2687 }
2688 r = kvm_s390_get_irq_state(vcpu,
2689 (__u8 __user *) irq_state.buf,
2690 irq_state.len);
2691 break;
2692 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002693 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002694 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002695 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002696 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002697}
2698
Carsten Otte5b1c1492012-01-04 10:25:23 +01002699int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2700{
2701#ifdef CONFIG_KVM_S390_UCONTROL
2702 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2703 && (kvm_is_ucontrol(vcpu->kvm))) {
2704 vmf->page = virt_to_page(vcpu->arch.sie_block);
2705 get_page(vmf->page);
2706 return 0;
2707 }
2708#endif
2709 return VM_FAULT_SIGBUS;
2710}
2711
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302712int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2713 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002714{
2715 return 0;
2716}
2717
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002718/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002719int kvm_arch_prepare_memory_region(struct kvm *kvm,
2720 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002721 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002722 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002723{
Nick Wangdd2887e2013-03-25 17:22:57 +01002724 /* A few sanity checks. We can have memory slots which have to be
2725 located/ended at a segment boundary (1MB). The memory in userland is
2726 ok to be fragmented into various different vmas. It is okay to mmap()
2727 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002728
Carsten Otte598841c2011-07-24 10:48:21 +02002729 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002730 return -EINVAL;
2731
Carsten Otte598841c2011-07-24 10:48:21 +02002732 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002733 return -EINVAL;
2734
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002735 return 0;
2736}
2737
2738void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002739 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002740 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02002741 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002742 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002743{
Carsten Ottef7850c92011-07-24 10:48:23 +02002744 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002745
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002746 /* If the basics of the memslot do not change, we do not want
2747 * to update the gmap. Every update causes several unnecessary
2748 * segment translation exceptions. This is usually handled just
2749 * fine by the normal fault handler + gmap, but it will also
2750 * cause faults on the prefix page of running guest CPUs.
2751 */
2752 if (old->userspace_addr == mem->userspace_addr &&
2753 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2754 old->npages * PAGE_SIZE == mem->memory_size)
2755 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002756
2757 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2758 mem->guest_phys_addr, mem->memory_size);
2759 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002760 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002761 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002762}
2763
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002764static int __init kvm_s390_init(void)
2765{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002766 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002767}
2768
2769static void __exit kvm_s390_exit(void)
2770{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002771 kvm_exit();
2772}
2773
2774module_init(kvm_s390_init);
2775module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002776
2777/*
2778 * Enable autoloading of the kvm module.
2779 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2780 * since x86 takes a different approach.
2781 */
2782#include <linux/miscdevice.h>
2783MODULE_ALIAS_MISCDEV(KVM_MINOR);
2784MODULE_ALIAS("devname:kvm");