blob: c2683529b25c97dab521490896f0645105342a12 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010028#include <linux/vmalloc.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010029#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010030#include <asm/lowcore.h>
Fan Zhangfdf03652015-05-13 10:58:41 +020031#include <asm/etr.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010032#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010033#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010034#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020035#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020036#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010037#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include "gaccess.h"
39
David Hildenbrandea2cdd22015-05-20 13:24:02 +020040#define KMSG_COMPONENT "kvm-s390"
41#undef pr_fmt
42#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43
Cornelia Huck5786fff2012-07-23 17:20:29 +020044#define CREATE_TRACE_POINTS
45#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020046#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020047
Thomas Huth41408c282015-02-06 15:01:21 +010048#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010049#define LOCAL_IRQS 32
50#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010052
Heiko Carstensb0c632d2008-03-25 18:47:20 +010053#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54
55struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020057 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010058 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010062 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010065 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020066 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020067 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020068 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010069 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010070 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020073 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010074 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020081 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010082 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
83 { "instruction_spx", VCPU_STAT(instruction_spx) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
85 { "instruction_stap", VCPU_STAT(instruction_stap) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010087 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010088 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020090 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010091 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020093 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010094 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010095 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020096 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010097 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020098 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100110 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100111 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200112 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200113 { "diagnose_258", VCPU_STAT(diagnose_258) },
114 { "diagnose_308", VCPU_STAT(diagnose_308) },
115 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100116 { NULL }
117};
118
Michael Mueller9d8d5782015-02-02 15:42:51 +0100119/* upper facilities limit for kvm */
120unsigned long kvm_s390_fac_list_mask[] = {
Christian Borntraegera3ed8da2015-03-18 13:54:31 +0100121 0xffe6fffbfcfdfc40UL,
Guenther Hutzl53df84f2015-02-18 11:13:03 +0100122 0x005e800000000000UL,
Michael Mueller9d8d5782015-02-02 15:42:51 +0100123};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100124
Michael Mueller9d8d5782015-02-02 15:42:51 +0100125unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200126{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200129}
130
Michael Mueller9d8d5782015-02-02 15:42:51 +0100131static struct gmap_notifier gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200132debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100133
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100134/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200135int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100136{
137 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200138 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100139}
140
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200141static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
142
Fan Zhangfdf03652015-05-13 10:58:41 +0200143/*
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
148 */
149static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
150 void *v)
151{
152 struct kvm *kvm;
153 struct kvm_vcpu *vcpu;
154 int i;
155 unsigned long long *delta = v;
156
157 list_for_each_entry(kvm, &vm_list, vm_list) {
158 kvm->arch.epoch -= *delta;
159 kvm_for_each_vcpu(i, vcpu, kvm) {
160 vcpu->arch.sie_block->epoch -= *delta;
161 }
162 }
163 return NOTIFY_OK;
164}
165
166static struct notifier_block kvm_clock_notifier = {
167 .notifier_call = kvm_clock_sync,
168};
169
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100170int kvm_arch_hardware_setup(void)
171{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200172 gmap_notifier.notifier_call = kvm_gmap_notifier;
173 gmap_register_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200174 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
175 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100176 return 0;
177}
178
179void kvm_arch_hardware_unsetup(void)
180{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200181 gmap_unregister_ipte_notifier(&gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200182 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
183 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100184}
185
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100186int kvm_arch_init(void *opaque)
187{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200188 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
189 if (!kvm_s390_dbf)
190 return -ENOMEM;
191
192 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
193 debug_unregister(kvm_s390_dbf);
194 return -ENOMEM;
195 }
196
Cornelia Huck84877d92014-09-02 10:27:35 +0100197 /* Register floating interrupt controller interface. */
198 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100199}
200
Christian Borntraeger78f26132015-07-22 15:50:58 +0200201void kvm_arch_exit(void)
202{
203 debug_unregister(kvm_s390_dbf);
204}
205
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100206/* Section: device related */
207long kvm_arch_dev_ioctl(struct file *filp,
208 unsigned int ioctl, unsigned long arg)
209{
210 if (ioctl == KVM_S390_ENABLE_SIE)
211 return s390_enable_sie();
212 return -EINVAL;
213}
214
Alexander Graf784aa3d2014-07-14 18:27:35 +0200215int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100216{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100217 int r;
218
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200219 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100220 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200221 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100222 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100223#ifdef CONFIG_KVM_S390_UCONTROL
224 case KVM_CAP_S390_UCONTROL:
225#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200226 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100227 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200228 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100229 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100230 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100231 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200232 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200233 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200234 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200235 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200236 case KVM_CAP_MP_STATE:
Jens Freimann47b43c52014-11-11 20:57:06 +0100237 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200238 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100239 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400240 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100241 case KVM_CAP_S390_IRQ_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100242 r = 1;
243 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100244 case KVM_CAP_S390_MEM_OP:
245 r = MEM_OP_MAX_SIZE;
246 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200247 case KVM_CAP_NR_VCPUS:
248 case KVM_CAP_MAX_VCPUS:
249 r = KVM_MAX_VCPUS;
250 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100251 case KVM_CAP_NR_MEMSLOTS:
252 r = KVM_USER_MEM_SLOTS;
253 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200254 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100255 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200256 break;
Eric Farman68c55752014-06-09 10:57:26 -0400257 case KVM_CAP_S390_VECTOR_REGISTERS:
258 r = MACHINE_HAS_VX;
259 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200260 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100261 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200262 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100263 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100264}
265
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400266static void kvm_s390_sync_dirty_log(struct kvm *kvm,
267 struct kvm_memory_slot *memslot)
268{
269 gfn_t cur_gfn, last_gfn;
270 unsigned long address;
271 struct gmap *gmap = kvm->arch.gmap;
272
273 down_read(&gmap->mm->mmap_sem);
274 /* Loop over all guest pages */
275 last_gfn = memslot->base_gfn + memslot->npages;
276 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
277 address = gfn_to_hva_memslot(memslot, cur_gfn);
278
279 if (gmap_test_and_clear_dirty(address, gmap))
280 mark_page_dirty(kvm, cur_gfn);
281 }
282 up_read(&gmap->mm->mmap_sem);
283}
284
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100285/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200286static void sca_del_vcpu(struct kvm_vcpu *vcpu);
287
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100288/*
289 * Get (and clear) the dirty memory log for a memory slot.
290 */
291int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
292 struct kvm_dirty_log *log)
293{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400294 int r;
295 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200296 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400297 struct kvm_memory_slot *memslot;
298 int is_dirty = 0;
299
300 mutex_lock(&kvm->slots_lock);
301
302 r = -EINVAL;
303 if (log->slot >= KVM_USER_MEM_SLOTS)
304 goto out;
305
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200306 slots = kvm_memslots(kvm);
307 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400308 r = -ENOENT;
309 if (!memslot->dirty_bitmap)
310 goto out;
311
312 kvm_s390_sync_dirty_log(kvm, memslot);
313 r = kvm_get_dirty_log(kvm, log, &is_dirty);
314 if (r)
315 goto out;
316
317 /* Clear the dirty log */
318 if (is_dirty) {
319 n = kvm_dirty_bitmap_bytes(memslot);
320 memset(memslot->dirty_bitmap, 0, n);
321 }
322 r = 0;
323out:
324 mutex_unlock(&kvm->slots_lock);
325 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100326}
327
Cornelia Huckd938dc52013-10-23 18:26:34 +0200328static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
329{
330 int r;
331
332 if (cap->flags)
333 return -EINVAL;
334
335 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200336 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200337 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200338 kvm->arch.use_irqchip = 1;
339 r = 0;
340 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200341 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200342 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200343 kvm->arch.user_sigp = 1;
344 r = 0;
345 break;
Eric Farman68c55752014-06-09 10:57:26 -0400346 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100347 mutex_lock(&kvm->lock);
348 if (atomic_read(&kvm->online_vcpus)) {
349 r = -EBUSY;
350 } else if (MACHINE_HAS_VX) {
Michael Mueller18280d82015-03-16 16:05:41 +0100351 set_kvm_facility(kvm->arch.model.fac->mask, 129);
352 set_kvm_facility(kvm->arch.model.fac->list, 129);
353 r = 0;
354 } else
355 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100356 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200357 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
358 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400359 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100360 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200361 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100362 kvm->arch.user_stsi = 1;
363 r = 0;
364 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200365 default:
366 r = -EINVAL;
367 break;
368 }
369 return r;
370}
371
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100372static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
373{
374 int ret;
375
376 switch (attr->attr) {
377 case KVM_S390_VM_MEM_LIMIT_SIZE:
378 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200379 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
380 kvm->arch.gmap->asce_end);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100381 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
382 ret = -EFAULT;
383 break;
384 default:
385 ret = -ENXIO;
386 break;
387 }
388 return ret;
389}
390
391static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200392{
393 int ret;
394 unsigned int idx;
395 switch (attr->attr) {
396 case KVM_S390_VM_MEM_ENABLE_CMMA:
Dominik Dingele6db1d62015-05-07 15:41:57 +0200397 /* enable CMMA only for z10 and later (EDAT_1) */
398 ret = -EINVAL;
399 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
400 break;
401
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200402 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200403 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200404 mutex_lock(&kvm->lock);
405 if (atomic_read(&kvm->online_vcpus) == 0) {
406 kvm->arch.use_cmma = 1;
407 ret = 0;
408 }
409 mutex_unlock(&kvm->lock);
410 break;
411 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingelc3489152015-06-18 13:17:11 +0200412 ret = -EINVAL;
413 if (!kvm->arch.use_cmma)
414 break;
415
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200416 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200417 mutex_lock(&kvm->lock);
418 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200419 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200420 srcu_read_unlock(&kvm->srcu, idx);
421 mutex_unlock(&kvm->lock);
422 ret = 0;
423 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100424 case KVM_S390_VM_MEM_LIMIT_SIZE: {
425 unsigned long new_limit;
426
427 if (kvm_is_ucontrol(kvm))
428 return -EINVAL;
429
430 if (get_user(new_limit, (u64 __user *)attr->addr))
431 return -EFAULT;
432
433 if (new_limit > kvm->arch.gmap->asce_end)
434 return -E2BIG;
435
436 ret = -EBUSY;
437 mutex_lock(&kvm->lock);
438 if (atomic_read(&kvm->online_vcpus) == 0) {
439 /* gmap_alloc will round the limit up */
440 struct gmap *new = gmap_alloc(current->mm, new_limit);
441
442 if (!new) {
443 ret = -ENOMEM;
444 } else {
445 gmap_free(kvm->arch.gmap);
446 new->private = kvm;
447 kvm->arch.gmap = new;
448 ret = 0;
449 }
450 }
451 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200452 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100453 break;
454 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200455 default:
456 ret = -ENXIO;
457 break;
458 }
459 return ret;
460}
461
Tony Krowiaka374e892014-09-03 10:13:53 +0200462static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
463
464static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
465{
466 struct kvm_vcpu *vcpu;
467 int i;
468
Michael Mueller9d8d5782015-02-02 15:42:51 +0100469 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200470 return -EINVAL;
471
472 mutex_lock(&kvm->lock);
473 switch (attr->attr) {
474 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
475 get_random_bytes(
476 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
477 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
478 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200479 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200480 break;
481 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
482 get_random_bytes(
483 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
484 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
485 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200486 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200487 break;
488 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
489 kvm->arch.crypto.aes_kw = 0;
490 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
491 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200492 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200493 break;
494 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
495 kvm->arch.crypto.dea_kw = 0;
496 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
497 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200498 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200499 break;
500 default:
501 mutex_unlock(&kvm->lock);
502 return -ENXIO;
503 }
504
505 kvm_for_each_vcpu(i, vcpu, kvm) {
506 kvm_s390_vcpu_crypto_setup(vcpu);
507 exit_sie(vcpu);
508 }
509 mutex_unlock(&kvm->lock);
510 return 0;
511}
512
Jason J. Herne72f25022014-11-25 09:46:02 -0500513static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
514{
515 u8 gtod_high;
516
517 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
518 sizeof(gtod_high)))
519 return -EFAULT;
520
521 if (gtod_high != 0)
522 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200523 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500524
525 return 0;
526}
527
528static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
529{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200530 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500531
532 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
533 return -EFAULT;
534
David Hildenbrand25ed1672015-05-12 09:49:14 +0200535 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200536 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500537 return 0;
538}
539
540static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
541{
542 int ret;
543
544 if (attr->flags)
545 return -EINVAL;
546
547 switch (attr->attr) {
548 case KVM_S390_VM_TOD_HIGH:
549 ret = kvm_s390_set_tod_high(kvm, attr);
550 break;
551 case KVM_S390_VM_TOD_LOW:
552 ret = kvm_s390_set_tod_low(kvm, attr);
553 break;
554 default:
555 ret = -ENXIO;
556 break;
557 }
558 return ret;
559}
560
561static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
562{
563 u8 gtod_high = 0;
564
565 if (copy_to_user((void __user *)attr->addr, &gtod_high,
566 sizeof(gtod_high)))
567 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200568 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500569
570 return 0;
571}
572
573static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
574{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200575 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500576
David Hildenbrand60417fc2015-09-29 16:20:36 +0200577 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500578 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
579 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200580 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500581
582 return 0;
583}
584
585static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
586{
587 int ret;
588
589 if (attr->flags)
590 return -EINVAL;
591
592 switch (attr->attr) {
593 case KVM_S390_VM_TOD_HIGH:
594 ret = kvm_s390_get_tod_high(kvm, attr);
595 break;
596 case KVM_S390_VM_TOD_LOW:
597 ret = kvm_s390_get_tod_low(kvm, attr);
598 break;
599 default:
600 ret = -ENXIO;
601 break;
602 }
603 return ret;
604}
605
Michael Mueller658b6ed2015-02-02 15:49:35 +0100606static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
607{
608 struct kvm_s390_vm_cpu_processor *proc;
609 int ret = 0;
610
611 mutex_lock(&kvm->lock);
612 if (atomic_read(&kvm->online_vcpus)) {
613 ret = -EBUSY;
614 goto out;
615 }
616 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
617 if (!proc) {
618 ret = -ENOMEM;
619 goto out;
620 }
621 if (!copy_from_user(proc, (void __user *)attr->addr,
622 sizeof(*proc))) {
623 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
624 sizeof(struct cpuid));
625 kvm->arch.model.ibc = proc->ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100626 memcpy(kvm->arch.model.fac->list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100627 S390_ARCH_FAC_LIST_SIZE_BYTE);
628 } else
629 ret = -EFAULT;
630 kfree(proc);
631out:
632 mutex_unlock(&kvm->lock);
633 return ret;
634}
635
636static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
637{
638 int ret = -ENXIO;
639
640 switch (attr->attr) {
641 case KVM_S390_VM_CPU_PROCESSOR:
642 ret = kvm_s390_set_processor(kvm, attr);
643 break;
644 }
645 return ret;
646}
647
648static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
649{
650 struct kvm_s390_vm_cpu_processor *proc;
651 int ret = 0;
652
653 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
654 if (!proc) {
655 ret = -ENOMEM;
656 goto out;
657 }
658 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
659 proc->ibc = kvm->arch.model.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100660 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100661 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
662 ret = -EFAULT;
663 kfree(proc);
664out:
665 return ret;
666}
667
668static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
669{
670 struct kvm_s390_vm_cpu_machine *mach;
671 int ret = 0;
672
673 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
674 if (!mach) {
675 ret = -ENOMEM;
676 goto out;
677 }
678 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200679 mach->ibc = sclp.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100680 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
681 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100682 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100683 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100684 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
685 ret = -EFAULT;
686 kfree(mach);
687out:
688 return ret;
689}
690
691static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
692{
693 int ret = -ENXIO;
694
695 switch (attr->attr) {
696 case KVM_S390_VM_CPU_PROCESSOR:
697 ret = kvm_s390_get_processor(kvm, attr);
698 break;
699 case KVM_S390_VM_CPU_MACHINE:
700 ret = kvm_s390_get_machine(kvm, attr);
701 break;
702 }
703 return ret;
704}
705
Dominik Dingelf2061652014-04-09 13:13:00 +0200706static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
707{
708 int ret;
709
710 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200711 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100712 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200713 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500714 case KVM_S390_VM_TOD:
715 ret = kvm_s390_set_tod(kvm, attr);
716 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100717 case KVM_S390_VM_CPU_MODEL:
718 ret = kvm_s390_set_cpu_model(kvm, attr);
719 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200720 case KVM_S390_VM_CRYPTO:
721 ret = kvm_s390_vm_set_crypto(kvm, attr);
722 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200723 default:
724 ret = -ENXIO;
725 break;
726 }
727
728 return ret;
729}
730
731static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
732{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100733 int ret;
734
735 switch (attr->group) {
736 case KVM_S390_VM_MEM_CTRL:
737 ret = kvm_s390_get_mem_control(kvm, attr);
738 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500739 case KVM_S390_VM_TOD:
740 ret = kvm_s390_get_tod(kvm, attr);
741 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100742 case KVM_S390_VM_CPU_MODEL:
743 ret = kvm_s390_get_cpu_model(kvm, attr);
744 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100745 default:
746 ret = -ENXIO;
747 break;
748 }
749
750 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200751}
752
753static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
754{
755 int ret;
756
757 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200758 case KVM_S390_VM_MEM_CTRL:
759 switch (attr->attr) {
760 case KVM_S390_VM_MEM_ENABLE_CMMA:
761 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100762 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200763 ret = 0;
764 break;
765 default:
766 ret = -ENXIO;
767 break;
768 }
769 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500770 case KVM_S390_VM_TOD:
771 switch (attr->attr) {
772 case KVM_S390_VM_TOD_LOW:
773 case KVM_S390_VM_TOD_HIGH:
774 ret = 0;
775 break;
776 default:
777 ret = -ENXIO;
778 break;
779 }
780 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100781 case KVM_S390_VM_CPU_MODEL:
782 switch (attr->attr) {
783 case KVM_S390_VM_CPU_PROCESSOR:
784 case KVM_S390_VM_CPU_MACHINE:
785 ret = 0;
786 break;
787 default:
788 ret = -ENXIO;
789 break;
790 }
791 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200792 case KVM_S390_VM_CRYPTO:
793 switch (attr->attr) {
794 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
795 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
796 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
797 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
798 ret = 0;
799 break;
800 default:
801 ret = -ENXIO;
802 break;
803 }
804 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200805 default:
806 ret = -ENXIO;
807 break;
808 }
809
810 return ret;
811}
812
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400813static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
814{
815 uint8_t *keys;
816 uint64_t hva;
817 unsigned long curkey;
818 int i, r = 0;
819
820 if (args->flags != 0)
821 return -EINVAL;
822
823 /* Is this guest using storage keys? */
824 if (!mm_use_skey(current->mm))
825 return KVM_S390_GET_SKEYS_NONE;
826
827 /* Enforce sane limit on memory allocation */
828 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
829 return -EINVAL;
830
831 keys = kmalloc_array(args->count, sizeof(uint8_t),
832 GFP_KERNEL | __GFP_NOWARN);
833 if (!keys)
834 keys = vmalloc(sizeof(uint8_t) * args->count);
835 if (!keys)
836 return -ENOMEM;
837
838 for (i = 0; i < args->count; i++) {
839 hva = gfn_to_hva(kvm, args->start_gfn + i);
840 if (kvm_is_error_hva(hva)) {
841 r = -EFAULT;
842 goto out;
843 }
844
845 curkey = get_guest_storage_key(current->mm, hva);
846 if (IS_ERR_VALUE(curkey)) {
847 r = curkey;
848 goto out;
849 }
850 keys[i] = curkey;
851 }
852
853 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
854 sizeof(uint8_t) * args->count);
855 if (r)
856 r = -EFAULT;
857out:
858 kvfree(keys);
859 return r;
860}
861
862static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
863{
864 uint8_t *keys;
865 uint64_t hva;
866 int i, r = 0;
867
868 if (args->flags != 0)
869 return -EINVAL;
870
871 /* Enforce sane limit on memory allocation */
872 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
873 return -EINVAL;
874
875 keys = kmalloc_array(args->count, sizeof(uint8_t),
876 GFP_KERNEL | __GFP_NOWARN);
877 if (!keys)
878 keys = vmalloc(sizeof(uint8_t) * args->count);
879 if (!keys)
880 return -ENOMEM;
881
882 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
883 sizeof(uint8_t) * args->count);
884 if (r) {
885 r = -EFAULT;
886 goto out;
887 }
888
889 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +0200890 r = s390_enable_skey();
891 if (r)
892 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400893
894 for (i = 0; i < args->count; i++) {
895 hva = gfn_to_hva(kvm, args->start_gfn + i);
896 if (kvm_is_error_hva(hva)) {
897 r = -EFAULT;
898 goto out;
899 }
900
901 /* Lowest order bit is reserved */
902 if (keys[i] & 0x01) {
903 r = -EINVAL;
904 goto out;
905 }
906
907 r = set_guest_storage_key(current->mm, hva,
908 (unsigned long)keys[i], 0);
909 if (r)
910 goto out;
911 }
912out:
913 kvfree(keys);
914 return r;
915}
916
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100917long kvm_arch_vm_ioctl(struct file *filp,
918 unsigned int ioctl, unsigned long arg)
919{
920 struct kvm *kvm = filp->private_data;
921 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200922 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100923 int r;
924
925 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100926 case KVM_S390_INTERRUPT: {
927 struct kvm_s390_interrupt s390int;
928
929 r = -EFAULT;
930 if (copy_from_user(&s390int, argp, sizeof(s390int)))
931 break;
932 r = kvm_s390_inject_vm(kvm, &s390int);
933 break;
934 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200935 case KVM_ENABLE_CAP: {
936 struct kvm_enable_cap cap;
937 r = -EFAULT;
938 if (copy_from_user(&cap, argp, sizeof(cap)))
939 break;
940 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
941 break;
942 }
Cornelia Huck84223592013-07-15 13:36:01 +0200943 case KVM_CREATE_IRQCHIP: {
944 struct kvm_irq_routing_entry routing;
945
946 r = -EINVAL;
947 if (kvm->arch.use_irqchip) {
948 /* Set up dummy routing. */
949 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -0400950 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +0200951 }
952 break;
953 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200954 case KVM_SET_DEVICE_ATTR: {
955 r = -EFAULT;
956 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
957 break;
958 r = kvm_s390_vm_set_attr(kvm, &attr);
959 break;
960 }
961 case KVM_GET_DEVICE_ATTR: {
962 r = -EFAULT;
963 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
964 break;
965 r = kvm_s390_vm_get_attr(kvm, &attr);
966 break;
967 }
968 case KVM_HAS_DEVICE_ATTR: {
969 r = -EFAULT;
970 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
971 break;
972 r = kvm_s390_vm_has_attr(kvm, &attr);
973 break;
974 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400975 case KVM_S390_GET_SKEYS: {
976 struct kvm_s390_skeys args;
977
978 r = -EFAULT;
979 if (copy_from_user(&args, argp,
980 sizeof(struct kvm_s390_skeys)))
981 break;
982 r = kvm_s390_get_skeys(kvm, &args);
983 break;
984 }
985 case KVM_S390_SET_SKEYS: {
986 struct kvm_s390_skeys args;
987
988 r = -EFAULT;
989 if (copy_from_user(&args, argp,
990 sizeof(struct kvm_s390_skeys)))
991 break;
992 r = kvm_s390_set_skeys(kvm, &args);
993 break;
994 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100995 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300996 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100997 }
998
999 return r;
1000}
1001
Tony Krowiak45c9b472015-01-13 11:33:26 -05001002static int kvm_s390_query_ap_config(u8 *config)
1003{
1004 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001005 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001006
Christian Borntraeger86044c82015-02-26 13:53:47 +01001007 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001008 asm volatile(
1009 "lgr 0,%1\n"
1010 "lgr 2,%2\n"
1011 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001012 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001013 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001014 "1:\n"
1015 EX_TABLE(0b, 1b)
1016 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001017 : "r" (fcn_code), "r" (config)
1018 : "cc", "0", "2", "memory"
1019 );
1020
1021 return cc;
1022}
1023
1024static int kvm_s390_apxa_installed(void)
1025{
1026 u8 config[128];
1027 int cc;
1028
1029 if (test_facility(2) && test_facility(12)) {
1030 cc = kvm_s390_query_ap_config(config);
1031
1032 if (cc)
1033 pr_err("PQAP(QCI) failed with cc=%d", cc);
1034 else
1035 return config[0] & 0x40;
1036 }
1037
1038 return 0;
1039}
1040
1041static void kvm_s390_set_crycb_format(struct kvm *kvm)
1042{
1043 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1044
1045 if (kvm_s390_apxa_installed())
1046 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1047 else
1048 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1049}
1050
Michael Mueller9d8d5782015-02-02 15:42:51 +01001051static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1052{
1053 get_cpu_id(cpu_id);
1054 cpu_id->version = 0xff;
1055}
1056
Tony Krowiak5102ee82014-06-27 14:46:01 -04001057static int kvm_s390_crypto_init(struct kvm *kvm)
1058{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001059 if (!test_kvm_facility(kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001060 return 0;
1061
1062 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1063 GFP_KERNEL | GFP_DMA);
1064 if (!kvm->arch.crypto.crycb)
1065 return -ENOMEM;
1066
Tony Krowiak45c9b472015-01-13 11:33:26 -05001067 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001068
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001069 /* Enable AES/DEA protected key functions by default */
1070 kvm->arch.crypto.aes_kw = 1;
1071 kvm->arch.crypto.dea_kw = 1;
1072 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1073 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1074 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1075 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiaka374e892014-09-03 10:13:53 +02001076
Tony Krowiak5102ee82014-06-27 14:46:01 -04001077 return 0;
1078}
1079
Carsten Ottee08b9632012-01-04 10:25:20 +01001080int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001081{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001082 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001083 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001084 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001085
Carsten Ottee08b9632012-01-04 10:25:20 +01001086 rc = -EINVAL;
1087#ifdef CONFIG_KVM_S390_UCONTROL
1088 if (type & ~KVM_VM_S390_UCONTROL)
1089 goto out_err;
1090 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1091 goto out_err;
1092#else
1093 if (type)
1094 goto out_err;
1095#endif
1096
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001097 rc = s390_enable_sie();
1098 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001099 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001100
Carsten Otteb2904112011-10-18 12:27:13 +02001101 rc = -ENOMEM;
1102
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001103 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001104 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001105 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001106 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001107 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001108 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001109 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001110 kvm->arch.sca = (struct bsca_block *)
1111 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001112 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001113
1114 sprintf(debug_name, "kvm-%u", current->pid);
1115
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001116 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001117 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001118 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001119
Michael Mueller9d8d5782015-02-02 15:42:51 +01001120 /*
1121 * The architectural maximum amount of facilities is 16 kbit. To store
1122 * this amount, 2 kbyte of memory is required. Thus we need a full
Michael Mueller981467c2015-02-24 13:51:04 +01001123 * page to hold the guest facility list (arch.model.fac->list) and the
1124 * facility mask (arch.model.fac->mask). Its address size has to be
Michael Mueller9d8d5782015-02-02 15:42:51 +01001125 * 31 bits and word aligned.
1126 */
1127 kvm->arch.model.fac =
Michael Mueller981467c2015-02-24 13:51:04 +01001128 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001129 if (!kvm->arch.model.fac)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001130 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001131
Michael Muellerfb5bf932015-02-27 14:25:10 +01001132 /* Populate the facility mask initially. */
Michael Mueller981467c2015-02-24 13:51:04 +01001133 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +01001134 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001135 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1136 if (i < kvm_s390_fac_list_mask_size())
Michael Mueller981467c2015-02-24 13:51:04 +01001137 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001138 else
Michael Mueller981467c2015-02-24 13:51:04 +01001139 kvm->arch.model.fac->mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001140 }
1141
Michael Mueller981467c2015-02-24 13:51:04 +01001142 /* Populate the facility list initially. */
1143 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1144 S390_ARCH_FAC_LIST_SIZE_BYTE);
1145
Michael Mueller9d8d5782015-02-02 15:42:51 +01001146 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001147 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001148
Tony Krowiak5102ee82014-06-27 14:46:01 -04001149 if (kvm_s390_crypto_init(kvm) < 0)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001150 goto out_err;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001151
Carsten Otteba5c1e92008-03-25 18:47:26 +01001152 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001153 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1154 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001155 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001156 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001157
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001158 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001159 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001160
Carsten Ottee08b9632012-01-04 10:25:20 +01001161 if (type & KVM_VM_S390_UCONTROL) {
1162 kvm->arch.gmap = NULL;
1163 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +02001164 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001165 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001166 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001167 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001168 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001169 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001170
1171 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001172 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001173 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001174
David Hildenbrand8ad35752014-03-14 11:00:21 +01001175 spin_lock_init(&kvm->arch.start_stop_lock);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001176 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001177
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001178 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001179out_err:
Dominik Dingel40f5b732015-03-12 13:55:53 +01001180 kfree(kvm->arch.crypto.crycb);
1181 free_page((unsigned long)kvm->arch.model.fac);
1182 debug_unregister(kvm->arch.dbf);
1183 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraeger78f26132015-07-22 15:50:58 +02001184 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001185 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001186}
1187
Christian Borntraegerd329c032008-11-26 14:50:27 +01001188void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1189{
1190 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001191 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001192 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001193 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001194 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001195 sca_del_vcpu(vcpu);
Carsten Otteabf4a712009-05-12 17:21:51 +02001196 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +01001197
1198 if (kvm_is_ucontrol(vcpu->kvm))
1199 gmap_free(vcpu->arch.gmap);
1200
Dominik Dingele6db1d62015-05-07 15:41:57 +02001201 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001202 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001203 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001204
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001205 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001206 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001207}
1208
1209static void kvm_free_vcpus(struct kvm *kvm)
1210{
1211 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001212 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001213
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001214 kvm_for_each_vcpu(i, vcpu, kvm)
1215 kvm_arch_vcpu_destroy(vcpu);
1216
1217 mutex_lock(&kvm->lock);
1218 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1219 kvm->vcpus[i] = NULL;
1220
1221 atomic_set(&kvm->online_vcpus, 0);
1222 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001223}
1224
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001225void kvm_arch_destroy_vm(struct kvm *kvm)
1226{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001227 kvm_free_vcpus(kvm);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001228 free_page((unsigned long)kvm->arch.model.fac);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001229 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +01001230 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001231 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +01001232 if (!kvm_is_ucontrol(kvm))
1233 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001234 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001235 kvm_s390_clear_float_irqs(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001236 KVM_EVENT(3, "vm 0x%p destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001237}
1238
1239/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001240static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1241{
1242 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1243 if (!vcpu->arch.gmap)
1244 return -ENOMEM;
1245 vcpu->arch.gmap->private = vcpu->kvm;
1246
1247 return 0;
1248}
1249
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001250static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1251{
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001252 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001253
1254 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1255 if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block)
1256 sca->cpu[vcpu->vcpu_id].sda = 0;
1257}
1258
1259static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
1260 unsigned int id)
1261{
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001262 struct bsca_block *sca = kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001263
1264 if (!sca->cpu[id].sda)
1265 sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
1266 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1267 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1268 set_bit_inv(id, (unsigned long *) &sca->mcn);
1269}
1270
1271static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1272{
1273 return id < KVM_MAX_VCPUS;
1274}
1275
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001276int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1277{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001278 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1279 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001280 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1281 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001282 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001283 KVM_SYNC_CRS |
1284 KVM_SYNC_ARCH0 |
1285 KVM_SYNC_PFAULT;
Eric Farman68c55752014-06-09 10:57:26 -04001286 if (test_kvm_facility(vcpu->kvm, 129))
1287 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001288
1289 if (kvm_is_ucontrol(vcpu->kvm))
1290 return __kvm_ucontrol_vcpu_init(vcpu);
1291
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001292 return 0;
1293}
1294
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001295/*
1296 * Backs up the current FP/VX register save area on a particular
1297 * destination. Used to switch between different register save
1298 * areas.
1299 */
1300static inline void save_fpu_to(struct fpu *dst)
1301{
1302 dst->fpc = current->thread.fpu.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001303 dst->regs = current->thread.fpu.regs;
1304}
1305
1306/*
1307 * Switches the FP/VX register save area from which to lazy
1308 * restore register contents.
1309 */
1310static inline void load_fpu_from(struct fpu *from)
1311{
1312 current->thread.fpu.fpc = from->fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001313 current->thread.fpu.regs = from->regs;
1314}
1315
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001316void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1317{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001318 /* Save host register state */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001319 save_fpu_regs();
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001320 save_fpu_to(&vcpu->arch.host_fpregs);
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001321
Michael Mueller18280d82015-03-16 16:05:41 +01001322 if (test_kvm_facility(vcpu->kvm, 129)) {
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001323 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001324 /*
1325 * Use the register save area in the SIE-control block
1326 * for register restore and save in kvm_arch_vcpu_put()
1327 */
1328 current->thread.fpu.vxrs =
1329 (__vector128 *)&vcpu->run->s.regs.vrs;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001330 } else
1331 load_fpu_from(&vcpu->arch.guest_fpregs);
1332
1333 if (test_fp_ctl(current->thread.fpu.fpc))
Hendrik Brueckner96b2d7a2015-06-12 13:53:51 +02001334 /* User space provided an invalid FPC, let's clear it */
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001335 current->thread.fpu.fpc = 0;
1336
1337 save_access_regs(vcpu->arch.host_acrs);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001338 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001339 gmap_enable(vcpu->arch.gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001340 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001341}
1342
1343void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1344{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001345 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001346 gmap_disable(vcpu->arch.gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001347
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001348 save_fpu_regs();
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001349
Michael Mueller18280d82015-03-16 16:05:41 +01001350 if (test_kvm_facility(vcpu->kvm, 129))
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001351 /*
1352 * kvm_arch_vcpu_load() set up the register save area to
1353 * the &vcpu->run->s.regs.vrs and, thus, the vector registers
1354 * are already saved. Only the floating-point control must be
1355 * copied.
1356 */
1357 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Eric Farman68c55752014-06-09 10:57:26 -04001358 else
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001359 save_fpu_to(&vcpu->arch.guest_fpregs);
1360 load_fpu_from(&vcpu->arch.host_fpregs);
1361
1362 save_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001363 restore_access_regs(vcpu->arch.host_acrs);
1364}
1365
1366static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1367{
1368 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1369 vcpu->arch.sie_block->gpsw.mask = 0UL;
1370 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001371 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001372 vcpu->arch.sie_block->cputm = 0UL;
1373 vcpu->arch.sie_block->ckc = 0UL;
1374 vcpu->arch.sie_block->todpr = 0;
1375 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1376 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1377 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1378 vcpu->arch.guest_fpregs.fpc = 0;
1379 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1380 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001381 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001382 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1383 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001384 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1385 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001386 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001387}
1388
Dominik Dingel31928aa2014-12-04 15:47:07 +01001389void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001390{
Jason J. Herne72f25022014-11-25 09:46:02 -05001391 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02001392 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001393 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02001394 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05001395 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001396 if (!kvm_is_ucontrol(vcpu->kvm))
1397 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001398}
1399
Tony Krowiak5102ee82014-06-27 14:46:01 -04001400static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1401{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001402 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001403 return;
1404
Tony Krowiaka374e892014-09-03 10:13:53 +02001405 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1406
1407 if (vcpu->kvm->arch.crypto.aes_kw)
1408 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1409 if (vcpu->kvm->arch.crypto.dea_kw)
1410 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1411
Tony Krowiak5102ee82014-06-27 14:46:01 -04001412 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1413}
1414
Dominik Dingelb31605c2014-03-25 13:47:11 +01001415void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1416{
1417 free_page(vcpu->arch.sie_block->cbrlo);
1418 vcpu->arch.sie_block->cbrlo = 0;
1419}
1420
1421int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1422{
1423 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1424 if (!vcpu->arch.sie_block->cbrlo)
1425 return -ENOMEM;
1426
1427 vcpu->arch.sie_block->ecb2 |= 0x80;
1428 vcpu->arch.sie_block->ecb2 &= ~0x08;
1429 return 0;
1430}
1431
Michael Mueller91520f12015-02-27 14:32:11 +01001432static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1433{
1434 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1435
1436 vcpu->arch.cpu_id = model->cpu_id;
1437 vcpu->arch.sie_block->ibc = model->ibc;
1438 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1439}
1440
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001441int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1442{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001443 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001444
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001445 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1446 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001447 CPUSTAT_STOPPED);
1448
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001449 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001450 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01001451 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001452 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02001453
Michael Mueller91520f12015-02-27 14:32:11 +01001454 kvm_s390_vcpu_setup_model(vcpu);
1455
Christian Borntraegerfc345312010-06-17 23:16:20 +02001456 vcpu->arch.sie_block->ecb = 6;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001457 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001458 vcpu->arch.sie_block->ecb |= 0x10;
1459
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001460 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001461 vcpu->arch.sie_block->eca = 0xC1002000U;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001462 if (sclp.has_siif)
Heiko Carstens217a4402013-12-30 12:54:14 +01001463 vcpu->arch.sie_block->eca |= 1;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001464 if (sclp.has_sigpif)
David Hildenbrandea5f4962014-10-14 15:29:30 +02001465 vcpu->arch.sie_block->eca |= 0x10000000U;
Michael Mueller18280d82015-03-16 16:05:41 +01001466 if (test_kvm_facility(vcpu->kvm, 129)) {
Eric Farman13211ea2014-04-30 13:39:46 -04001467 vcpu->arch.sie_block->eca |= 0x00020000;
1468 vcpu->arch.sie_block->ecd |= 0x20000000;
1469 }
Thomas Huth492d8642015-02-10 16:11:01 +01001470 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001471
Dominik Dingele6db1d62015-05-07 15:41:57 +02001472 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01001473 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1474 if (rc)
1475 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001476 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001477 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001478 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001479
Tony Krowiak5102ee82014-06-27 14:46:01 -04001480 kvm_s390_vcpu_crypto_setup(vcpu);
1481
Dominik Dingelb31605c2014-03-25 13:47:11 +01001482 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001483}
1484
1485struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1486 unsigned int id)
1487{
Carsten Otte4d475552011-10-18 12:27:12 +02001488 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001489 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001490 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001491
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001492 if (!sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02001493 goto out;
1494
1495 rc = -ENOMEM;
1496
Michael Muellerb110fea2013-06-12 13:54:54 +02001497 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001498 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001499 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001500
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001501 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1502 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001503 goto out_free_cpu;
1504
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001505 vcpu->arch.sie_block = &sie_page->sie_block;
1506 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1507
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001508 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001509 if (!kvm_is_ucontrol(kvm)) {
1510 if (!kvm->arch.sca) {
1511 WARN_ON_ONCE(1);
1512 goto out_free_cpu;
1513 }
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001514 sca_add_vcpu(vcpu, kvm, id);
Carsten Otte58f94602012-01-04 10:25:27 +01001515 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001516
Carsten Otteba5c1e92008-03-25 18:47:26 +01001517 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001518 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001519 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001520 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001521
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001522 /*
1523 * Allocate a save area for floating-point registers. If the vector
1524 * extension is available, register contents are saved in the SIE
1525 * control block. The allocated save area is still required in
1526 * particular places, for example, in kvm_s390_vcpu_store_status().
1527 */
1528 vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
1529 GFP_KERNEL);
1530 if (!vcpu->arch.guest_fpregs.fprs) {
1531 rc = -ENOMEM;
1532 goto out_free_sie_block;
1533 }
1534
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001535 rc = kvm_vcpu_init(vcpu, kvm, id);
1536 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001537 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001538 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1539 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001540 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001541
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001542 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001543out_free_sie_block:
1544 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001545out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001546 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001547out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001548 return ERR_PTR(rc);
1549}
1550
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001551int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1552{
David Hildenbrand9a022062014-08-05 17:40:47 +02001553 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001554}
1555
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001556void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001557{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001558 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001559 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001560}
1561
Christian Borntraeger27406cd2015-04-14 12:17:34 +02001562void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001563{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001564 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001565}
1566
Christian Borntraeger8e236542015-04-09 13:49:04 +02001567static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1568{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001569 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02001570 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001571}
1572
1573static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1574{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04001575 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001576}
1577
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001578/*
1579 * Kick a guest cpu out of SIE and wait until SIE is not running.
1580 * If the CPU is not running (e.g. waiting as idle) the function will
1581 * return immediately. */
1582void exit_sie(struct kvm_vcpu *vcpu)
1583{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001584 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001585 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1586 cpu_relax();
1587}
1588
Christian Borntraeger8e236542015-04-09 13:49:04 +02001589/* Kick a guest cpu out of SIE to process a request synchronously */
1590void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001591{
Christian Borntraeger8e236542015-04-09 13:49:04 +02001592 kvm_make_request(req, vcpu);
1593 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001594}
1595
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001596static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1597{
1598 int i;
1599 struct kvm *kvm = gmap->private;
1600 struct kvm_vcpu *vcpu;
1601
1602 kvm_for_each_vcpu(i, vcpu, kvm) {
1603 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001604 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001605 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
Christian Borntraeger8e236542015-04-09 13:49:04 +02001606 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001607 }
1608 }
1609}
1610
Christoffer Dallb6d33832012-03-08 16:44:24 -05001611int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1612{
1613 /* kvm common code refers to this, but never calls it */
1614 BUG();
1615 return 0;
1616}
1617
Carsten Otte14eebd92012-05-15 14:15:26 +02001618static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1619 struct kvm_one_reg *reg)
1620{
1621 int r = -EINVAL;
1622
1623 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001624 case KVM_REG_S390_TODPR:
1625 r = put_user(vcpu->arch.sie_block->todpr,
1626 (u32 __user *)reg->addr);
1627 break;
1628 case KVM_REG_S390_EPOCHDIFF:
1629 r = put_user(vcpu->arch.sie_block->epoch,
1630 (u64 __user *)reg->addr);
1631 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001632 case KVM_REG_S390_CPU_TIMER:
1633 r = put_user(vcpu->arch.sie_block->cputm,
1634 (u64 __user *)reg->addr);
1635 break;
1636 case KVM_REG_S390_CLOCK_COMP:
1637 r = put_user(vcpu->arch.sie_block->ckc,
1638 (u64 __user *)reg->addr);
1639 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001640 case KVM_REG_S390_PFTOKEN:
1641 r = put_user(vcpu->arch.pfault_token,
1642 (u64 __user *)reg->addr);
1643 break;
1644 case KVM_REG_S390_PFCOMPARE:
1645 r = put_user(vcpu->arch.pfault_compare,
1646 (u64 __user *)reg->addr);
1647 break;
1648 case KVM_REG_S390_PFSELECT:
1649 r = put_user(vcpu->arch.pfault_select,
1650 (u64 __user *)reg->addr);
1651 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001652 case KVM_REG_S390_PP:
1653 r = put_user(vcpu->arch.sie_block->pp,
1654 (u64 __user *)reg->addr);
1655 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001656 case KVM_REG_S390_GBEA:
1657 r = put_user(vcpu->arch.sie_block->gbea,
1658 (u64 __user *)reg->addr);
1659 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001660 default:
1661 break;
1662 }
1663
1664 return r;
1665}
1666
1667static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1668 struct kvm_one_reg *reg)
1669{
1670 int r = -EINVAL;
1671
1672 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001673 case KVM_REG_S390_TODPR:
1674 r = get_user(vcpu->arch.sie_block->todpr,
1675 (u32 __user *)reg->addr);
1676 break;
1677 case KVM_REG_S390_EPOCHDIFF:
1678 r = get_user(vcpu->arch.sie_block->epoch,
1679 (u64 __user *)reg->addr);
1680 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001681 case KVM_REG_S390_CPU_TIMER:
1682 r = get_user(vcpu->arch.sie_block->cputm,
1683 (u64 __user *)reg->addr);
1684 break;
1685 case KVM_REG_S390_CLOCK_COMP:
1686 r = get_user(vcpu->arch.sie_block->ckc,
1687 (u64 __user *)reg->addr);
1688 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001689 case KVM_REG_S390_PFTOKEN:
1690 r = get_user(vcpu->arch.pfault_token,
1691 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001692 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1693 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001694 break;
1695 case KVM_REG_S390_PFCOMPARE:
1696 r = get_user(vcpu->arch.pfault_compare,
1697 (u64 __user *)reg->addr);
1698 break;
1699 case KVM_REG_S390_PFSELECT:
1700 r = get_user(vcpu->arch.pfault_select,
1701 (u64 __user *)reg->addr);
1702 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001703 case KVM_REG_S390_PP:
1704 r = get_user(vcpu->arch.sie_block->pp,
1705 (u64 __user *)reg->addr);
1706 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001707 case KVM_REG_S390_GBEA:
1708 r = get_user(vcpu->arch.sie_block->gbea,
1709 (u64 __user *)reg->addr);
1710 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001711 default:
1712 break;
1713 }
1714
1715 return r;
1716}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001717
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001718static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1719{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001720 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001721 return 0;
1722}
1723
1724int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1725{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001726 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001727 return 0;
1728}
1729
1730int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1731{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001732 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001733 return 0;
1734}
1735
1736int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1737 struct kvm_sregs *sregs)
1738{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001739 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001740 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001741 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001742 return 0;
1743}
1744
1745int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1746 struct kvm_sregs *sregs)
1747{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001748 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001749 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001750 return 0;
1751}
1752
1753int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1754{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001755 if (test_fp_ctl(fpu->fpc))
1756 return -EINVAL;
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001757 memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001758 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02001759 save_fpu_regs();
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001760 load_fpu_from(&vcpu->arch.guest_fpregs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001761 return 0;
1762}
1763
1764int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1765{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02001766 memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001767 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001768 return 0;
1769}
1770
1771static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1772{
1773 int rc = 0;
1774
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001775 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001776 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001777 else {
1778 vcpu->run->psw_mask = psw.mask;
1779 vcpu->run->psw_addr = psw.addr;
1780 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001781 return rc;
1782}
1783
1784int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1785 struct kvm_translation *tr)
1786{
1787 return -EINVAL; /* not implemented yet */
1788}
1789
David Hildenbrand27291e22014-01-23 12:26:52 +01001790#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1791 KVM_GUESTDBG_USE_HW_BP | \
1792 KVM_GUESTDBG_ENABLE)
1793
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001794int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1795 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001796{
David Hildenbrand27291e22014-01-23 12:26:52 +01001797 int rc = 0;
1798
1799 vcpu->guest_debug = 0;
1800 kvm_s390_clear_bp_data(vcpu);
1801
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001802 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001803 return -EINVAL;
1804
1805 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1806 vcpu->guest_debug = dbg->control;
1807 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001808 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01001809
1810 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1811 rc = kvm_s390_import_bp_data(vcpu, dbg);
1812 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001813 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01001814 vcpu->arch.guestdbg.last_bp = 0;
1815 }
1816
1817 if (rc) {
1818 vcpu->guest_debug = 0;
1819 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001820 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01001821 }
1822
1823 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001824}
1825
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001826int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1827 struct kvm_mp_state *mp_state)
1828{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001829 /* CHECK_STOP and LOAD are not supported yet */
1830 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1831 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001832}
1833
1834int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1835 struct kvm_mp_state *mp_state)
1836{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001837 int rc = 0;
1838
1839 /* user space knows about this interface - let it control the state */
1840 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1841
1842 switch (mp_state->mp_state) {
1843 case KVM_MP_STATE_STOPPED:
1844 kvm_s390_vcpu_stop(vcpu);
1845 break;
1846 case KVM_MP_STATE_OPERATING:
1847 kvm_s390_vcpu_start(vcpu);
1848 break;
1849 case KVM_MP_STATE_LOAD:
1850 case KVM_MP_STATE_CHECK_STOP:
1851 /* fall through - CHECK_STOP and LOAD are not supported yet */
1852 default:
1853 rc = -ENXIO;
1854 }
1855
1856 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001857}
1858
David Hildenbrand8ad35752014-03-14 11:00:21 +01001859static bool ibs_enabled(struct kvm_vcpu *vcpu)
1860{
1861 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1862}
1863
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001864static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1865{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001866retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02001867 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02001868 if (!vcpu->requests)
1869 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001870 /*
1871 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1872 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1873 * This ensures that the ipte instruction for this request has
1874 * already finished. We might race against a second unmapper that
1875 * wants to set the blocking bit. Lets just retry the request loop.
1876 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001877 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001878 int rc;
1879 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001880 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001881 PAGE_SIZE * 2);
1882 if (rc)
1883 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001884 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001885 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001886
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001887 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1888 vcpu->arch.sie_block->ihcpu = 0xffff;
1889 goto retry;
1890 }
1891
David Hildenbrand8ad35752014-03-14 11:00:21 +01001892 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1893 if (!ibs_enabled(vcpu)) {
1894 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001895 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01001896 &vcpu->arch.sie_block->cpuflags);
1897 }
1898 goto retry;
1899 }
1900
1901 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1902 if (ibs_enabled(vcpu)) {
1903 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02001904 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01001905 &vcpu->arch.sie_block->cpuflags);
1906 }
1907 goto retry;
1908 }
1909
David Hildenbrand0759d062014-05-13 16:54:32 +02001910 /* nothing to do, just clear the request */
1911 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1912
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001913 return 0;
1914}
1915
David Hildenbrand25ed1672015-05-12 09:49:14 +02001916void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
1917{
1918 struct kvm_vcpu *vcpu;
1919 int i;
1920
1921 mutex_lock(&kvm->lock);
1922 preempt_disable();
1923 kvm->arch.epoch = tod - get_tod_clock();
1924 kvm_s390_vcpu_block_all(kvm);
1925 kvm_for_each_vcpu(i, vcpu, kvm)
1926 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
1927 kvm_s390_vcpu_unblock_all(kvm);
1928 preempt_enable();
1929 mutex_unlock(&kvm->lock);
1930}
1931
Thomas Huthfa576c52014-05-06 17:20:16 +02001932/**
1933 * kvm_arch_fault_in_page - fault-in guest page if necessary
1934 * @vcpu: The corresponding virtual cpu
1935 * @gpa: Guest physical address
1936 * @writable: Whether the page should be writable or not
1937 *
1938 * Make sure that a guest page has been faulted-in on the host.
1939 *
1940 * Return: Zero on success, negative error code otherwise.
1941 */
1942long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001943{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001944 return gmap_fault(vcpu->arch.gmap, gpa,
1945 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001946}
1947
Dominik Dingel3c038e62013-10-07 17:11:48 +02001948static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1949 unsigned long token)
1950{
1951 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001952 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001953
1954 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001955 irq.u.ext.ext_params2 = token;
1956 irq.type = KVM_S390_INT_PFAULT_INIT;
1957 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001958 } else {
1959 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001960 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001961 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1962 }
1963}
1964
1965void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1966 struct kvm_async_pf *work)
1967{
1968 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1969 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1970}
1971
1972void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1973 struct kvm_async_pf *work)
1974{
1975 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1976 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1977}
1978
1979void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1980 struct kvm_async_pf *work)
1981{
1982 /* s390 will always inject the page directly */
1983}
1984
1985bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1986{
1987 /*
1988 * s390 will always inject the page directly,
1989 * but we still want check_async_completion to cleanup
1990 */
1991 return true;
1992}
1993
1994static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1995{
1996 hva_t hva;
1997 struct kvm_arch_async_pf arch;
1998 int rc;
1999
2000 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2001 return 0;
2002 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2003 vcpu->arch.pfault_compare)
2004 return 0;
2005 if (psw_extint_disabled(vcpu))
2006 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002007 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002008 return 0;
2009 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2010 return 0;
2011 if (!vcpu->arch.gmap->pfault_enabled)
2012 return 0;
2013
Heiko Carstens81480cc2014-01-01 16:36:07 +01002014 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2015 hva += current->thread.gmap_addr & ~PAGE_MASK;
2016 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002017 return 0;
2018
2019 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2020 return rc;
2021}
2022
Thomas Huth3fb4c402013-09-12 10:33:43 +02002023static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002024{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002025 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002026
Dominik Dingel3c038e62013-10-07 17:11:48 +02002027 /*
2028 * On s390 notifications for arriving pages will be delivered directly
2029 * to the guest but the house keeping for completed pfaults is
2030 * handled outside the worker.
2031 */
2032 kvm_check_async_pf_completion(vcpu);
2033
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002034 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002035
2036 if (need_resched())
2037 schedule();
2038
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002039 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002040 s390_handle_mcck();
2041
Jens Freimann79395032014-04-17 10:10:30 +02002042 if (!kvm_is_ucontrol(vcpu->kvm)) {
2043 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2044 if (rc)
2045 return rc;
2046 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002047
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002048 rc = kvm_s390_handle_requests(vcpu);
2049 if (rc)
2050 return rc;
2051
David Hildenbrand27291e22014-01-23 12:26:52 +01002052 if (guestdbg_enabled(vcpu)) {
2053 kvm_s390_backup_guest_per_regs(vcpu);
2054 kvm_s390_patch_guest_per_regs(vcpu);
2055 }
2056
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002057 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002058 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2059 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2060 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002061
Thomas Huth3fb4c402013-09-12 10:33:43 +02002062 return 0;
2063}
2064
Thomas Huth492d8642015-02-10 16:11:01 +01002065static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2066{
2067 psw_t *psw = &vcpu->arch.sie_block->gpsw;
2068 u8 opcode;
2069 int rc;
2070
2071 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2072 trace_kvm_s390_sie_fault(vcpu);
2073
2074 /*
2075 * We want to inject an addressing exception, which is defined as a
2076 * suppressing or terminating exception. However, since we came here
2077 * by a DAT access exception, the PSW still points to the faulting
2078 * instruction since DAT exceptions are nullifying. So we've got
2079 * to look up the current opcode to get the length of the instruction
2080 * to be able to forward the PSW.
2081 */
Alexander Yarygin8ae04b82015-01-19 13:24:51 +03002082 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
Thomas Huth492d8642015-02-10 16:11:01 +01002083 if (rc)
2084 return kvm_s390_inject_prog_cond(vcpu, rc);
2085 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2086
2087 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2088}
2089
Thomas Huth3fb4c402013-09-12 10:33:43 +02002090static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2091{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02002092 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2093 vcpu->arch.sie_block->icptcode);
2094 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2095
David Hildenbrand27291e22014-01-23 12:26:52 +01002096 if (guestdbg_enabled(vcpu))
2097 kvm_s390_restore_guest_per_regs(vcpu);
2098
David Hildenbrand71f116b2015-10-19 16:24:28 +02002099 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
2100
2101 if (vcpu->arch.sie_block->icptcode > 0) {
2102 int rc = kvm_handle_sie_intercept(vcpu);
2103
2104 if (rc != -EOPNOTSUPP)
2105 return rc;
2106 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2107 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2108 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2109 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2110 return -EREMOTE;
2111 } else if (exit_reason != -EFAULT) {
2112 vcpu->stat.exit_null++;
2113 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02002114 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2115 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2116 vcpu->run->s390_ucontrol.trans_exc_code =
2117 current->thread.gmap_addr;
2118 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002119 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002120 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02002121 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002122 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02002123 if (kvm_arch_setup_async_pf(vcpu))
2124 return 0;
2125 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002126 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02002127 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002128}
2129
2130static int __vcpu_run(struct kvm_vcpu *vcpu)
2131{
2132 int rc, exit_reason;
2133
Thomas Huth800c1062013-09-12 10:33:45 +02002134 /*
2135 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2136 * ning the guest), so that memslots (and other stuff) are protected
2137 */
2138 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2139
Thomas Hutha76ccff2013-09-12 10:33:44 +02002140 do {
2141 rc = vcpu_pre_run(vcpu);
2142 if (rc)
2143 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002144
Thomas Huth800c1062013-09-12 10:33:45 +02002145 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02002146 /*
2147 * As PF_VCPU will be used in fault handler, between
2148 * guest_enter and guest_exit should be no uaccess.
2149 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02002150 local_irq_disable();
2151 __kvm_guest_enter();
2152 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002153 exit_reason = sie64a(vcpu->arch.sie_block,
2154 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02002155 local_irq_disable();
2156 __kvm_guest_exit();
2157 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02002158 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002159
Thomas Hutha76ccff2013-09-12 10:33:44 +02002160 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01002161 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02002162
Thomas Huth800c1062013-09-12 10:33:45 +02002163 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01002164 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002165}
2166
David Hildenbrandb028ee32014-07-17 10:47:43 +02002167static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2168{
2169 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2170 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2171 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2172 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2173 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2174 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002175 /* some control register changes require a tlb flush */
2176 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002177 }
2178 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2179 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2180 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2181 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2182 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2183 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2184 }
2185 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2186 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2187 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2188 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002189 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2190 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02002191 }
2192 kvm_run->kvm_dirty_regs = 0;
2193}
2194
2195static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2196{
2197 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2198 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2199 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2200 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2201 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2202 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2203 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2204 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2205 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2206 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2207 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2208 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2209}
2210
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002211int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2212{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002213 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002214 sigset_t sigsaved;
2215
David Hildenbrand27291e22014-01-23 12:26:52 +01002216 if (guestdbg_exit_pending(vcpu)) {
2217 kvm_s390_prepare_debug_exit(vcpu);
2218 return 0;
2219 }
2220
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002221 if (vcpu->sigset_active)
2222 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2223
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002224 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2225 kvm_s390_vcpu_start(vcpu);
2226 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002227 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002228 vcpu->vcpu_id);
2229 return -EINVAL;
2230 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002231
David Hildenbrandb028ee32014-07-17 10:47:43 +02002232 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002233
Heiko Carstensdab4079d2009-06-12 10:26:32 +02002234 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02002235 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02002236
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002237 if (signal_pending(current) && !rc) {
2238 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002239 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02002240 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002241
David Hildenbrand27291e22014-01-23 12:26:52 +01002242 if (guestdbg_exit_pending(vcpu) && !rc) {
2243 kvm_s390_prepare_debug_exit(vcpu);
2244 rc = 0;
2245 }
2246
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002247 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02002248 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01002249 rc = 0;
2250 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002251
David Hildenbrandb028ee32014-07-17 10:47:43 +02002252 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002253
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002254 if (vcpu->sigset_active)
2255 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2256
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002257 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02002258 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002259}
2260
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002261/*
2262 * store status at address
2263 * we use have two special cases:
2264 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2265 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2266 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01002267int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002268{
Carsten Otte092670c2011-07-24 10:48:22 +02002269 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02002270 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01002271 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002272 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002273
Heiko Carstensd0bce602014-01-01 16:45:58 +01002274 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2275 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002276 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002277 gpa = SAVE_AREA_BASE;
2278 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2279 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002280 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002281 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2282 }
2283 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2284 vcpu->arch.guest_fpregs.fprs, 128);
2285 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2286 vcpu->run->s.regs.gprs, 128);
2287 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2288 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02002289 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002290 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02002291 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002292 rc |= write_guest_abs(vcpu,
2293 gpa + offsetof(struct save_area, fp_ctrl_reg),
2294 &vcpu->arch.guest_fpregs.fpc, 4);
2295 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2296 &vcpu->arch.sie_block->todpr, 4);
2297 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2298 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002299 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002300 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2301 &clkcomp, 8);
2302 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2303 &vcpu->run->s.regs.acrs, 64);
2304 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2305 &vcpu->arch.sie_block->gcr, 128);
2306 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002307}
2308
Thomas Huthe8798922013-11-06 15:46:33 +01002309int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2310{
2311 /*
2312 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2313 * copying in vcpu load/put. Lets update our copies before we save
2314 * it into the save area
2315 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002316 save_fpu_regs();
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002317 if (test_kvm_facility(vcpu->kvm, 129)) {
2318 /*
2319 * If the vector extension is available, the vector registers
2320 * which overlaps with floating-point registers are saved in
2321 * the SIE-control block. Hence, extract the floating-point
2322 * registers and the FPC value and store them in the
2323 * guest_fpregs structure.
2324 */
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002325 vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
2326 convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
2327 current->thread.fpu.vxrs);
2328 } else
2329 save_fpu_to(&vcpu->arch.guest_fpregs);
Thomas Huthe8798922013-11-06 15:46:33 +01002330 save_access_regs(vcpu->run->s.regs.acrs);
2331
2332 return kvm_s390_store_status_unloaded(vcpu, addr);
2333}
2334
Eric Farmanbc17de72014-04-14 16:01:09 -04002335/*
2336 * store additional status at address
2337 */
2338int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2339 unsigned long gpa)
2340{
2341 /* Only bits 0-53 are used for address formation */
2342 if (!(gpa & ~0x3ff))
2343 return 0;
2344
2345 return write_guest_abs(vcpu, gpa & ~0x3ff,
2346 (void *)&vcpu->run->s.regs.vrs, 512);
2347}
2348
2349int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2350{
2351 if (!test_kvm_facility(vcpu->kvm, 129))
2352 return 0;
2353
2354 /*
2355 * The guest VXRS are in the host VXRs due to the lazy
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002356 * copying in vcpu load/put. We can simply call save_fpu_regs()
2357 * to save the current register state because we are in the
2358 * middle of a load/put cycle.
2359 *
2360 * Let's update our copies before we save it into the save area.
Eric Farmanbc17de72014-04-14 16:01:09 -04002361 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02002362 save_fpu_regs();
Eric Farmanbc17de72014-04-14 16:01:09 -04002363
2364 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2365}
2366
David Hildenbrand8ad35752014-03-14 11:00:21 +01002367static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2368{
2369 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002370 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002371}
2372
2373static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2374{
2375 unsigned int i;
2376 struct kvm_vcpu *vcpu;
2377
2378 kvm_for_each_vcpu(i, vcpu, kvm) {
2379 __disable_ibs_on_vcpu(vcpu);
2380 }
2381}
2382
2383static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2384{
2385 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002386 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002387}
2388
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002389void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2390{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002391 int i, online_vcpus, started_vcpus = 0;
2392
2393 if (!is_vcpu_stopped(vcpu))
2394 return;
2395
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002396 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002397 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002398 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002399 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2400
2401 for (i = 0; i < online_vcpus; i++) {
2402 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2403 started_vcpus++;
2404 }
2405
2406 if (started_vcpus == 0) {
2407 /* we're the only active VCPU -> speed it up */
2408 __enable_ibs_on_vcpu(vcpu);
2409 } else if (started_vcpus == 1) {
2410 /*
2411 * As we are starting a second VCPU, we have to disable
2412 * the IBS facility on all VCPUs to remove potentially
2413 * oustanding ENABLE requests.
2414 */
2415 __disable_ibs_on_all_vcpus(vcpu->kvm);
2416 }
2417
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002418 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002419 /*
2420 * Another VCPU might have used IBS while we were offline.
2421 * Let's play safe and flush the VCPU at startup.
2422 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002423 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002424 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002425 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002426}
2427
2428void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2429{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002430 int i, online_vcpus, started_vcpus = 0;
2431 struct kvm_vcpu *started_vcpu = NULL;
2432
2433 if (is_vcpu_stopped(vcpu))
2434 return;
2435
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002436 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002437 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002438 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002439 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2440
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002441 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002442 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002443
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002444 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002445 __disable_ibs_on_vcpu(vcpu);
2446
2447 for (i = 0; i < online_vcpus; i++) {
2448 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2449 started_vcpus++;
2450 started_vcpu = vcpu->kvm->vcpus[i];
2451 }
2452 }
2453
2454 if (started_vcpus == 1) {
2455 /*
2456 * As we only have one VCPU left, we want to enable the
2457 * IBS facility for that VCPU to speed it up.
2458 */
2459 __enable_ibs_on_vcpu(started_vcpu);
2460 }
2461
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002462 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002463 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002464}
2465
Cornelia Huckd6712df2012-12-20 15:32:11 +01002466static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2467 struct kvm_enable_cap *cap)
2468{
2469 int r;
2470
2471 if (cap->flags)
2472 return -EINVAL;
2473
2474 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002475 case KVM_CAP_S390_CSS_SUPPORT:
2476 if (!vcpu->kvm->arch.css_support) {
2477 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02002478 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002479 trace_kvm_s390_enable_css(vcpu->kvm);
2480 }
2481 r = 0;
2482 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002483 default:
2484 r = -EINVAL;
2485 break;
2486 }
2487 return r;
2488}
2489
Thomas Huth41408c282015-02-06 15:01:21 +01002490static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2491 struct kvm_s390_mem_op *mop)
2492{
2493 void __user *uaddr = (void __user *)mop->buf;
2494 void *tmpbuf = NULL;
2495 int r, srcu_idx;
2496 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2497 | KVM_S390_MEMOP_F_CHECK_ONLY;
2498
2499 if (mop->flags & ~supported_flags)
2500 return -EINVAL;
2501
2502 if (mop->size > MEM_OP_MAX_SIZE)
2503 return -E2BIG;
2504
2505 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2506 tmpbuf = vmalloc(mop->size);
2507 if (!tmpbuf)
2508 return -ENOMEM;
2509 }
2510
2511 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2512
2513 switch (mop->op) {
2514 case KVM_S390_MEMOP_LOGICAL_READ:
2515 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2516 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2517 break;
2518 }
2519 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2520 if (r == 0) {
2521 if (copy_to_user(uaddr, tmpbuf, mop->size))
2522 r = -EFAULT;
2523 }
2524 break;
2525 case KVM_S390_MEMOP_LOGICAL_WRITE:
2526 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2527 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2528 break;
2529 }
2530 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2531 r = -EFAULT;
2532 break;
2533 }
2534 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2535 break;
2536 default:
2537 r = -EINVAL;
2538 }
2539
2540 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2541
2542 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2543 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2544
2545 vfree(tmpbuf);
2546 return r;
2547}
2548
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002549long kvm_arch_vcpu_ioctl(struct file *filp,
2550 unsigned int ioctl, unsigned long arg)
2551{
2552 struct kvm_vcpu *vcpu = filp->private_data;
2553 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002554 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002555 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002556
Avi Kivity937366242010-05-13 12:35:17 +03002557 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01002558 case KVM_S390_IRQ: {
2559 struct kvm_s390_irq s390irq;
2560
2561 r = -EFAULT;
2562 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2563 break;
2564 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2565 break;
2566 }
Avi Kivity937366242010-05-13 12:35:17 +03002567 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002568 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002569 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002570
Avi Kivity937366242010-05-13 12:35:17 +03002571 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002572 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03002573 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002574 if (s390int_to_s390irq(&s390int, &s390irq))
2575 return -EINVAL;
2576 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03002577 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002578 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002579 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002580 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002581 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002582 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002583 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002584 case KVM_S390_SET_INITIAL_PSW: {
2585 psw_t psw;
2586
Avi Kivitybc923cc2010-05-13 12:21:46 +03002587 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002588 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002589 break;
2590 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2591 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002592 }
2593 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002594 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2595 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002596 case KVM_SET_ONE_REG:
2597 case KVM_GET_ONE_REG: {
2598 struct kvm_one_reg reg;
2599 r = -EFAULT;
2600 if (copy_from_user(&reg, argp, sizeof(reg)))
2601 break;
2602 if (ioctl == KVM_SET_ONE_REG)
2603 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2604 else
2605 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2606 break;
2607 }
Carsten Otte27e03932012-01-04 10:25:21 +01002608#ifdef CONFIG_KVM_S390_UCONTROL
2609 case KVM_S390_UCAS_MAP: {
2610 struct kvm_s390_ucas_mapping ucasmap;
2611
2612 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2613 r = -EFAULT;
2614 break;
2615 }
2616
2617 if (!kvm_is_ucontrol(vcpu->kvm)) {
2618 r = -EINVAL;
2619 break;
2620 }
2621
2622 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2623 ucasmap.vcpu_addr, ucasmap.length);
2624 break;
2625 }
2626 case KVM_S390_UCAS_UNMAP: {
2627 struct kvm_s390_ucas_mapping ucasmap;
2628
2629 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2630 r = -EFAULT;
2631 break;
2632 }
2633
2634 if (!kvm_is_ucontrol(vcpu->kvm)) {
2635 r = -EINVAL;
2636 break;
2637 }
2638
2639 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2640 ucasmap.length);
2641 break;
2642 }
2643#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002644 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002645 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002646 break;
2647 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002648 case KVM_ENABLE_CAP:
2649 {
2650 struct kvm_enable_cap cap;
2651 r = -EFAULT;
2652 if (copy_from_user(&cap, argp, sizeof(cap)))
2653 break;
2654 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2655 break;
2656 }
Thomas Huth41408c282015-02-06 15:01:21 +01002657 case KVM_S390_MEM_OP: {
2658 struct kvm_s390_mem_op mem_op;
2659
2660 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2661 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2662 else
2663 r = -EFAULT;
2664 break;
2665 }
Jens Freimann816c7662014-11-24 17:13:46 +01002666 case KVM_S390_SET_IRQ_STATE: {
2667 struct kvm_s390_irq_state irq_state;
2668
2669 r = -EFAULT;
2670 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2671 break;
2672 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2673 irq_state.len == 0 ||
2674 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2675 r = -EINVAL;
2676 break;
2677 }
2678 r = kvm_s390_set_irq_state(vcpu,
2679 (void __user *) irq_state.buf,
2680 irq_state.len);
2681 break;
2682 }
2683 case KVM_S390_GET_IRQ_STATE: {
2684 struct kvm_s390_irq_state irq_state;
2685
2686 r = -EFAULT;
2687 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2688 break;
2689 if (irq_state.len == 0) {
2690 r = -EINVAL;
2691 break;
2692 }
2693 r = kvm_s390_get_irq_state(vcpu,
2694 (__u8 __user *) irq_state.buf,
2695 irq_state.len);
2696 break;
2697 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002698 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002699 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002700 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002701 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002702}
2703
Carsten Otte5b1c1492012-01-04 10:25:23 +01002704int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2705{
2706#ifdef CONFIG_KVM_S390_UCONTROL
2707 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2708 && (kvm_is_ucontrol(vcpu->kvm))) {
2709 vmf->page = virt_to_page(vcpu->arch.sie_block);
2710 get_page(vmf->page);
2711 return 0;
2712 }
2713#endif
2714 return VM_FAULT_SIGBUS;
2715}
2716
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302717int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2718 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002719{
2720 return 0;
2721}
2722
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002723/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002724int kvm_arch_prepare_memory_region(struct kvm *kvm,
2725 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002726 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002727 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002728{
Nick Wangdd2887e2013-03-25 17:22:57 +01002729 /* A few sanity checks. We can have memory slots which have to be
2730 located/ended at a segment boundary (1MB). The memory in userland is
2731 ok to be fragmented into various different vmas. It is okay to mmap()
2732 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002733
Carsten Otte598841c2011-07-24 10:48:21 +02002734 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002735 return -EINVAL;
2736
Carsten Otte598841c2011-07-24 10:48:21 +02002737 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002738 return -EINVAL;
2739
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002740 return 0;
2741}
2742
2743void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02002744 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002745 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02002746 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002747 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002748{
Carsten Ottef7850c92011-07-24 10:48:23 +02002749 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002750
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002751 /* If the basics of the memslot do not change, we do not want
2752 * to update the gmap. Every update causes several unnecessary
2753 * segment translation exceptions. This is usually handled just
2754 * fine by the normal fault handler + gmap, but it will also
2755 * cause faults on the prefix page of running guest CPUs.
2756 */
2757 if (old->userspace_addr == mem->userspace_addr &&
2758 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2759 old->npages * PAGE_SIZE == mem->memory_size)
2760 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002761
2762 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2763 mem->guest_phys_addr, mem->memory_size);
2764 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02002765 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002766 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002767}
2768
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002769static int __init kvm_s390_init(void)
2770{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002771 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002772}
2773
2774static void __exit kvm_s390_exit(void)
2775{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002776 kvm_exit();
2777}
2778
2779module_init(kvm_s390_init);
2780module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002781
2782/*
2783 * Enable autoloading of the kvm module.
2784 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2785 * since x86 takes a different approach.
2786 */
2787#include <linux/miscdevice.h>
2788MODULE_ALIAS_MISCDEV(KVM_MINOR);
2789MODULE_ALIAS("devname:kvm");