blob: af09d3437631d348dca2f1a6699c34ed49c624ed [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c22015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010047#include "gaccess.h"
48
David Hildenbrandea2cdd22015-05-20 13:24:02 +020049#define KMSG_COMPONENT "kvm-s390"
50#undef pr_fmt
51#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
52
Cornelia Huck5786fff2012-07-23 17:20:29 +020053#define CREATE_TRACE_POINTS
54#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020055#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020056
Thomas Huth41408c22015-02-06 15:01:21 +010057#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010058#define LOCAL_IRQS 32
59#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c22015-02-06 15:01:21 +010061
Heiko Carstensb0c632d2008-03-25 18:47:20 +010062#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
63
64struct kvm_stats_debugfs_item debugfs_entries[] = {
65 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020066 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010067 { "exit_validity", VCPU_STAT(exit_validity) },
68 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
69 { "exit_external_request", VCPU_STAT(exit_external_request) },
70 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010071 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030072 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
74 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020075 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010076 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020077 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020078 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010084 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020085 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
87 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
88 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
89 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
90 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
91 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
92 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020093 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010094 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
95 { "instruction_spx", VCPU_STAT(instruction_spx) },
96 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
97 { "instruction_stap", VCPU_STAT(instruction_stap) },
98 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010099 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100100 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
101 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200102 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100103 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
104 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200105 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200106 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200107 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100108 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100109 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200110 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200112 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
113 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100114 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200115 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
116 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500117 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100118 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
119 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
120 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200121 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
122 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
123 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100124 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100125 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200126 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200127 { "diagnose_258", VCPU_STAT(diagnose_258) },
128 { "diagnose_308", VCPU_STAT(diagnose_308) },
129 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130 { NULL }
131};
132
David Hildenbranda411edf2016-02-02 15:41:22 +0100133/* allow nested virtualization in KVM (if enabled by user space) */
134static int nested;
135module_param(nested, int, S_IRUGO);
136MODULE_PARM_DESC(nested, "Nested virtualization support");
137
Michael Mueller9d8d5782015-02-02 15:42:51 +0100138/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200139unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100140
Michael Mueller9d8d5782015-02-02 15:42:51 +0100141unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200142{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100143 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
144 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200145}
146
David Hildenbrand15c97052015-03-19 17:36:43 +0100147/* available cpu features supported by kvm */
148static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200149/* available subfunctions indicated via query / "test bit" */
150static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100151
Michael Mueller9d8d5782015-02-02 15:42:51 +0100152static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200153static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200154debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100155
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100156/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200157int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100158{
159 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200160 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161}
162
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100163static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
164 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200165
Fan Zhangfdf03652015-05-13 10:58:41 +0200166/*
167 * This callback is executed during stop_machine(). All CPUs are therefore
168 * temporarily stopped. In order not to change guest behavior, we have to
169 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
170 * so a CPU won't be stopped while calculating with the epoch.
171 */
172static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
173 void *v)
174{
175 struct kvm *kvm;
176 struct kvm_vcpu *vcpu;
177 int i;
178 unsigned long long *delta = v;
179
180 list_for_each_entry(kvm, &vm_list, vm_list) {
181 kvm->arch.epoch -= *delta;
182 kvm_for_each_vcpu(i, vcpu, kvm) {
183 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100184 if (vcpu->arch.cputm_enabled)
185 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100186 if (vcpu->arch.vsie_block)
187 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200188 }
189 }
190 return NOTIFY_OK;
191}
192
193static struct notifier_block kvm_clock_notifier = {
194 .notifier_call = kvm_clock_sync,
195};
196
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100197int kvm_arch_hardware_setup(void)
198{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200199 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100200 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200201 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
202 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200203 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100205 return 0;
206}
207
208void kvm_arch_hardware_unsetup(void)
209{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100210 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200211 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200212 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
213 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100214}
215
David Hildenbrand22be5a12016-01-21 13:22:54 +0100216static void allow_cpu_feat(unsigned long nr)
217{
218 set_bit_inv(nr, kvm_s390_available_cpu_feat);
219}
220
David Hildenbrand0a763c72016-05-18 16:03:47 +0200221static inline int plo_test_bit(unsigned char nr)
222{
223 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100224 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200225
226 asm volatile(
227 /* Parameter registers are ignored for "test bit" */
228 " plo 0,0,0,0(0)\n"
229 " ipm %0\n"
230 " srl %0,28\n"
231 : "=d" (cc)
232 : "d" (r0)
233 : "cc");
234 return cc == 0;
235}
236
David Hildenbrand22be5a12016-01-21 13:22:54 +0100237static void kvm_s390_cpu_feat_init(void)
238{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200239 int i;
240
241 for (i = 0; i < 256; ++i) {
242 if (plo_test_bit(i))
243 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
244 }
245
246 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400247 ptff(kvm_s390_available_subfunc.ptff,
248 sizeof(kvm_s390_available_subfunc.ptff),
249 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200250
251 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200252 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
253 kvm_s390_available_subfunc.kmac);
254 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
255 kvm_s390_available_subfunc.kmc);
256 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
257 kvm_s390_available_subfunc.km);
258 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
259 kvm_s390_available_subfunc.kimd);
260 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
261 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200262 }
263 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200264 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
265 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200266 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200267 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.kmctr);
269 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
270 kvm_s390_available_subfunc.kmf);
271 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
272 kvm_s390_available_subfunc.kmo);
273 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
274 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200275 }
276 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100277 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200278 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200279
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400280 if (test_facility(146)) /* MSA8 */
281 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
282 kvm_s390_available_subfunc.kma);
283
David Hildenbrand22be5a12016-01-21 13:22:54 +0100284 if (MACHINE_HAS_ESOP)
285 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200286 /*
287 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
288 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
289 */
290 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100291 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200292 return;
293 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100294 if (sclp.has_64bscao)
295 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100296 if (sclp.has_siif)
297 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100298 if (sclp.has_gpere)
299 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100300 if (sclp.has_gsls)
301 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100302 if (sclp.has_ib)
303 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100304 if (sclp.has_cei)
305 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100306 if (sclp.has_ibs)
307 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500308 if (sclp.has_kss)
309 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200310 /*
311 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
312 * all skey handling functions read/set the skey from the PGSTE
313 * instead of the real storage key.
314 *
315 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
316 * pages being detected as preserved although they are resident.
317 *
318 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
319 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
320 *
321 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
322 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
323 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
324 *
325 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
326 * cannot easily shadow the SCA because of the ipte lock.
327 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100328}
329
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330int kvm_arch_init(void *opaque)
331{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200332 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
333 if (!kvm_s390_dbf)
334 return -ENOMEM;
335
336 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
337 debug_unregister(kvm_s390_dbf);
338 return -ENOMEM;
339 }
340
David Hildenbrand22be5a12016-01-21 13:22:54 +0100341 kvm_s390_cpu_feat_init();
342
Cornelia Huck84877d92014-09-02 10:27:35 +0100343 /* Register floating interrupt controller interface. */
344 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100345}
346
Christian Borntraeger78f26132015-07-22 15:50:58 +0200347void kvm_arch_exit(void)
348{
349 debug_unregister(kvm_s390_dbf);
350}
351
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100352/* Section: device related */
353long kvm_arch_dev_ioctl(struct file *filp,
354 unsigned int ioctl, unsigned long arg)
355{
356 if (ioctl == KVM_S390_ENABLE_SIE)
357 return s390_enable_sie();
358 return -EINVAL;
359}
360
Alexander Graf784aa3d2014-07-14 18:27:35 +0200361int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100362{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100363 int r;
364
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200365 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100366 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200367 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100368 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100369#ifdef CONFIG_KVM_S390_UCONTROL
370 case KVM_CAP_S390_UCONTROL:
371#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200372 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100373 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200374 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100375 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100376 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100377 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200378 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200379 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200380 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200381 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200382 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100383 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100384 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200385 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100386 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400387 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100388 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200389 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200390 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100391 case KVM_CAP_S390_AIS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100392 r = 1;
393 break;
Thomas Huth41408c22015-02-06 15:01:21 +0100394 case KVM_CAP_S390_MEM_OP:
395 r = MEM_OP_MAX_SIZE;
396 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200397 case KVM_CAP_NR_VCPUS:
398 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100399 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200400 if (!kvm_s390_use_sca_entries())
401 r = KVM_MAX_VCPUS;
402 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100403 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200404 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100405 case KVM_CAP_NR_MEMSLOTS:
406 r = KVM_USER_MEM_SLOTS;
407 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200408 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100409 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200410 break;
Eric Farman68c55752014-06-09 10:57:26 -0400411 case KVM_CAP_S390_VECTOR_REGISTERS:
412 r = MACHINE_HAS_VX;
413 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800414 case KVM_CAP_S390_RI:
415 r = test_facility(64);
416 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100417 case KVM_CAP_S390_GS:
418 r = test_facility(133);
419 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200420 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100421 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200422 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100423 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100424}
425
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400426static void kvm_s390_sync_dirty_log(struct kvm *kvm,
427 struct kvm_memory_slot *memslot)
428{
429 gfn_t cur_gfn, last_gfn;
430 unsigned long address;
431 struct gmap *gmap = kvm->arch.gmap;
432
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400433 /* Loop over all guest pages */
434 last_gfn = memslot->base_gfn + memslot->npages;
435 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
436 address = gfn_to_hva_memslot(memslot, cur_gfn);
437
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100438 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400439 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100440 if (fatal_signal_pending(current))
441 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100442 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400443 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400444}
445
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100446/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200447static void sca_del_vcpu(struct kvm_vcpu *vcpu);
448
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100449/*
450 * Get (and clear) the dirty memory log for a memory slot.
451 */
452int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
453 struct kvm_dirty_log *log)
454{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400455 int r;
456 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200457 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400458 struct kvm_memory_slot *memslot;
459 int is_dirty = 0;
460
Janosch Franke1e8a962017-02-02 16:39:31 +0100461 if (kvm_is_ucontrol(kvm))
462 return -EINVAL;
463
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400464 mutex_lock(&kvm->slots_lock);
465
466 r = -EINVAL;
467 if (log->slot >= KVM_USER_MEM_SLOTS)
468 goto out;
469
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200470 slots = kvm_memslots(kvm);
471 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400472 r = -ENOENT;
473 if (!memslot->dirty_bitmap)
474 goto out;
475
476 kvm_s390_sync_dirty_log(kvm, memslot);
477 r = kvm_get_dirty_log(kvm, log, &is_dirty);
478 if (r)
479 goto out;
480
481 /* Clear the dirty log */
482 if (is_dirty) {
483 n = kvm_dirty_bitmap_bytes(memslot);
484 memset(memslot->dirty_bitmap, 0, n);
485 }
486 r = 0;
487out:
488 mutex_unlock(&kvm->slots_lock);
489 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100490}
491
David Hildenbrand6502a342016-06-21 14:19:51 +0200492static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
493{
494 unsigned int i;
495 struct kvm_vcpu *vcpu;
496
497 kvm_for_each_vcpu(i, vcpu, kvm) {
498 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
499 }
500}
501
Cornelia Huckd938dc52013-10-23 18:26:34 +0200502static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
503{
504 int r;
505
506 if (cap->flags)
507 return -EINVAL;
508
509 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200510 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200511 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200512 kvm->arch.use_irqchip = 1;
513 r = 0;
514 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200515 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200516 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200517 kvm->arch.user_sigp = 1;
518 r = 0;
519 break;
Eric Farman68c55752014-06-09 10:57:26 -0400520 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100521 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200522 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100523 r = -EBUSY;
524 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100525 set_kvm_facility(kvm->arch.model.fac_mask, 129);
526 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200527 if (test_facility(134)) {
528 set_kvm_facility(kvm->arch.model.fac_mask, 134);
529 set_kvm_facility(kvm->arch.model.fac_list, 134);
530 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100531 if (test_facility(135)) {
532 set_kvm_facility(kvm->arch.model.fac_mask, 135);
533 set_kvm_facility(kvm->arch.model.fac_list, 135);
534 }
Michael Mueller18280d82015-03-16 16:05:41 +0100535 r = 0;
536 } else
537 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100538 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200539 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
540 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400541 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800542 case KVM_CAP_S390_RI:
543 r = -EINVAL;
544 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200545 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800546 r = -EBUSY;
547 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100548 set_kvm_facility(kvm->arch.model.fac_mask, 64);
549 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800550 r = 0;
551 }
552 mutex_unlock(&kvm->lock);
553 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
554 r ? "(not available)" : "(success)");
555 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100556 case KVM_CAP_S390_AIS:
557 mutex_lock(&kvm->lock);
558 if (kvm->created_vcpus) {
559 r = -EBUSY;
560 } else {
561 set_kvm_facility(kvm->arch.model.fac_mask, 72);
562 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100563 r = 0;
564 }
565 mutex_unlock(&kvm->lock);
566 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
567 r ? "(not available)" : "(success)");
568 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100569 case KVM_CAP_S390_GS:
570 r = -EINVAL;
571 mutex_lock(&kvm->lock);
572 if (atomic_read(&kvm->online_vcpus)) {
573 r = -EBUSY;
574 } else if (test_facility(133)) {
575 set_kvm_facility(kvm->arch.model.fac_mask, 133);
576 set_kvm_facility(kvm->arch.model.fac_list, 133);
577 r = 0;
578 }
579 mutex_unlock(&kvm->lock);
580 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
581 r ? "(not available)" : "(success)");
582 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100583 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200584 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100585 kvm->arch.user_stsi = 1;
586 r = 0;
587 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200588 case KVM_CAP_S390_USER_INSTR0:
589 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
590 kvm->arch.user_instr0 = 1;
591 icpt_operexc_on_all_vcpus(kvm);
592 r = 0;
593 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200594 default:
595 r = -EINVAL;
596 break;
597 }
598 return r;
599}
600
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100601static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
602{
603 int ret;
604
605 switch (attr->attr) {
606 case KVM_S390_VM_MEM_LIMIT_SIZE:
607 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200608 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100609 kvm->arch.mem_limit);
610 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100611 ret = -EFAULT;
612 break;
613 default:
614 ret = -ENXIO;
615 break;
616 }
617 return ret;
618}
619
620static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200621{
622 int ret;
623 unsigned int idx;
624 switch (attr->attr) {
625 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100626 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100627 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200628 break;
629
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200630 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200631 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200632 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200633 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200634 kvm->arch.use_cmma = 1;
635 ret = 0;
636 }
637 mutex_unlock(&kvm->lock);
638 break;
639 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100640 ret = -ENXIO;
641 if (!sclp.has_cmma)
642 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200643 ret = -EINVAL;
644 if (!kvm->arch.use_cmma)
645 break;
646
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200647 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200648 mutex_lock(&kvm->lock);
649 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200650 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200651 srcu_read_unlock(&kvm->srcu, idx);
652 mutex_unlock(&kvm->lock);
653 ret = 0;
654 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100655 case KVM_S390_VM_MEM_LIMIT_SIZE: {
656 unsigned long new_limit;
657
658 if (kvm_is_ucontrol(kvm))
659 return -EINVAL;
660
661 if (get_user(new_limit, (u64 __user *)attr->addr))
662 return -EFAULT;
663
Dominik Dingela3a92c32014-12-01 17:24:42 +0100664 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
665 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100666 return -E2BIG;
667
Dominik Dingela3a92c32014-12-01 17:24:42 +0100668 if (!new_limit)
669 return -EINVAL;
670
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100671 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100672 if (new_limit != KVM_S390_NO_MEM_LIMIT)
673 new_limit -= 1;
674
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100675 ret = -EBUSY;
676 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200677 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100678 /* gmap_create will round the limit up */
679 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100680
681 if (!new) {
682 ret = -ENOMEM;
683 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100684 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100685 new->private = kvm;
686 kvm->arch.gmap = new;
687 ret = 0;
688 }
689 }
690 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100691 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
692 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
693 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100694 break;
695 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200696 default:
697 ret = -ENXIO;
698 break;
699 }
700 return ret;
701}
702
Tony Krowiaka374e892014-09-03 10:13:53 +0200703static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
704
705static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
706{
707 struct kvm_vcpu *vcpu;
708 int i;
709
Michael Mueller9d8d5782015-02-02 15:42:51 +0100710 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200711 return -EINVAL;
712
713 mutex_lock(&kvm->lock);
714 switch (attr->attr) {
715 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
716 get_random_bytes(
717 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
718 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
719 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200720 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200721 break;
722 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
723 get_random_bytes(
724 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
725 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
726 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200727 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200728 break;
729 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
730 kvm->arch.crypto.aes_kw = 0;
731 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
732 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200733 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200734 break;
735 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
736 kvm->arch.crypto.dea_kw = 0;
737 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
738 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200739 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200740 break;
741 default:
742 mutex_unlock(&kvm->lock);
743 return -ENXIO;
744 }
745
746 kvm_for_each_vcpu(i, vcpu, kvm) {
747 kvm_s390_vcpu_crypto_setup(vcpu);
748 exit_sie(vcpu);
749 }
750 mutex_unlock(&kvm->lock);
751 return 0;
752}
753
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200754static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
755{
756 int cx;
757 struct kvm_vcpu *vcpu;
758
759 kvm_for_each_vcpu(cx, vcpu, kvm)
760 kvm_s390_sync_request(req, vcpu);
761}
762
763/*
764 * Must be called with kvm->srcu held to avoid races on memslots, and with
765 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
766 */
767static int kvm_s390_vm_start_migration(struct kvm *kvm)
768{
769 struct kvm_s390_migration_state *mgs;
770 struct kvm_memory_slot *ms;
771 /* should be the only one */
772 struct kvm_memslots *slots;
773 unsigned long ram_pages;
774 int slotnr;
775
776 /* migration mode already enabled */
777 if (kvm->arch.migration_state)
778 return 0;
779
780 slots = kvm_memslots(kvm);
781 if (!slots || !slots->used_slots)
782 return -EINVAL;
783
784 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
785 if (!mgs)
786 return -ENOMEM;
787 kvm->arch.migration_state = mgs;
788
789 if (kvm->arch.use_cmma) {
790 /*
791 * Get the last slot. They should be sorted by base_gfn, so the
792 * last slot is also the one at the end of the address space.
793 * We have verified above that at least one slot is present.
794 */
795 ms = slots->memslots + slots->used_slots - 1;
796 /* round up so we only use full longs */
797 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
798 /* allocate enough bytes to store all the bits */
799 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
800 if (!mgs->pgste_bitmap) {
801 kfree(mgs);
802 kvm->arch.migration_state = NULL;
803 return -ENOMEM;
804 }
805
806 mgs->bitmap_size = ram_pages;
807 atomic64_set(&mgs->dirty_pages, ram_pages);
808 /* mark all the pages in active slots as dirty */
809 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
810 ms = slots->memslots + slotnr;
811 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
812 }
813
814 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
815 }
816 return 0;
817}
818
819/*
820 * Must be called with kvm->lock to avoid races with ourselves and
821 * kvm_s390_vm_start_migration.
822 */
823static int kvm_s390_vm_stop_migration(struct kvm *kvm)
824{
825 struct kvm_s390_migration_state *mgs;
826
827 /* migration mode already disabled */
828 if (!kvm->arch.migration_state)
829 return 0;
830 mgs = kvm->arch.migration_state;
831 kvm->arch.migration_state = NULL;
832
833 if (kvm->arch.use_cmma) {
834 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
835 vfree(mgs->pgste_bitmap);
836 }
837 kfree(mgs);
838 return 0;
839}
840
841static int kvm_s390_vm_set_migration(struct kvm *kvm,
842 struct kvm_device_attr *attr)
843{
844 int idx, res = -ENXIO;
845
846 mutex_lock(&kvm->lock);
847 switch (attr->attr) {
848 case KVM_S390_VM_MIGRATION_START:
849 idx = srcu_read_lock(&kvm->srcu);
850 res = kvm_s390_vm_start_migration(kvm);
851 srcu_read_unlock(&kvm->srcu, idx);
852 break;
853 case KVM_S390_VM_MIGRATION_STOP:
854 res = kvm_s390_vm_stop_migration(kvm);
855 break;
856 default:
857 break;
858 }
859 mutex_unlock(&kvm->lock);
860
861 return res;
862}
863
864static int kvm_s390_vm_get_migration(struct kvm *kvm,
865 struct kvm_device_attr *attr)
866{
867 u64 mig = (kvm->arch.migration_state != NULL);
868
869 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
870 return -ENXIO;
871
872 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
873 return -EFAULT;
874 return 0;
875}
876
Jason J. Herne72f25022014-11-25 09:46:02 -0500877static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
878{
879 u8 gtod_high;
880
881 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
882 sizeof(gtod_high)))
883 return -EFAULT;
884
885 if (gtod_high != 0)
886 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200887 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500888
889 return 0;
890}
891
892static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
893{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200894 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500895
896 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
897 return -EFAULT;
898
David Hildenbrand25ed1672015-05-12 09:49:14 +0200899 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200900 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500901 return 0;
902}
903
904static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
905{
906 int ret;
907
908 if (attr->flags)
909 return -EINVAL;
910
911 switch (attr->attr) {
912 case KVM_S390_VM_TOD_HIGH:
913 ret = kvm_s390_set_tod_high(kvm, attr);
914 break;
915 case KVM_S390_VM_TOD_LOW:
916 ret = kvm_s390_set_tod_low(kvm, attr);
917 break;
918 default:
919 ret = -ENXIO;
920 break;
921 }
922 return ret;
923}
924
925static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
926{
927 u8 gtod_high = 0;
928
929 if (copy_to_user((void __user *)attr->addr, &gtod_high,
930 sizeof(gtod_high)))
931 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200932 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500933
934 return 0;
935}
936
937static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
938{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200939 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500940
David Hildenbrand60417fc2015-09-29 16:20:36 +0200941 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500942 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
943 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200944 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500945
946 return 0;
947}
948
949static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
950{
951 int ret;
952
953 if (attr->flags)
954 return -EINVAL;
955
956 switch (attr->attr) {
957 case KVM_S390_VM_TOD_HIGH:
958 ret = kvm_s390_get_tod_high(kvm, attr);
959 break;
960 case KVM_S390_VM_TOD_LOW:
961 ret = kvm_s390_get_tod_low(kvm, attr);
962 break;
963 default:
964 ret = -ENXIO;
965 break;
966 }
967 return ret;
968}
969
Michael Mueller658b6ed2015-02-02 15:49:35 +0100970static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
971{
972 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200973 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100974 int ret = 0;
975
976 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200977 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +0100978 ret = -EBUSY;
979 goto out;
980 }
981 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
982 if (!proc) {
983 ret = -ENOMEM;
984 goto out;
985 }
986 if (!copy_from_user(proc, (void __user *)attr->addr,
987 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200988 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200989 lowest_ibc = sclp.ibc >> 16 & 0xfff;
990 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +0200991 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +0200992 if (proc->ibc > unblocked_ibc)
993 kvm->arch.model.ibc = unblocked_ibc;
994 else if (proc->ibc < lowest_ibc)
995 kvm->arch.model.ibc = lowest_ibc;
996 else
997 kvm->arch.model.ibc = proc->ibc;
998 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100999 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001000 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001001 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1002 kvm->arch.model.ibc,
1003 kvm->arch.model.cpuid);
1004 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1005 kvm->arch.model.fac_list[0],
1006 kvm->arch.model.fac_list[1],
1007 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001008 } else
1009 ret = -EFAULT;
1010 kfree(proc);
1011out:
1012 mutex_unlock(&kvm->lock);
1013 return ret;
1014}
1015
David Hildenbrand15c97052015-03-19 17:36:43 +01001016static int kvm_s390_set_processor_feat(struct kvm *kvm,
1017 struct kvm_device_attr *attr)
1018{
1019 struct kvm_s390_vm_cpu_feat data;
1020 int ret = -EBUSY;
1021
1022 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1023 return -EFAULT;
1024 if (!bitmap_subset((unsigned long *) data.feat,
1025 kvm_s390_available_cpu_feat,
1026 KVM_S390_VM_CPU_FEAT_NR_BITS))
1027 return -EINVAL;
1028
1029 mutex_lock(&kvm->lock);
1030 if (!atomic_read(&kvm->online_vcpus)) {
1031 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1032 KVM_S390_VM_CPU_FEAT_NR_BITS);
1033 ret = 0;
1034 }
1035 mutex_unlock(&kvm->lock);
1036 return ret;
1037}
1038
David Hildenbrand0a763c72016-05-18 16:03:47 +02001039static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1040 struct kvm_device_attr *attr)
1041{
1042 /*
1043 * Once supported by kernel + hw, we have to store the subfunctions
1044 * in kvm->arch and remember that user space configured them.
1045 */
1046 return -ENXIO;
1047}
1048
Michael Mueller658b6ed2015-02-02 15:49:35 +01001049static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1050{
1051 int ret = -ENXIO;
1052
1053 switch (attr->attr) {
1054 case KVM_S390_VM_CPU_PROCESSOR:
1055 ret = kvm_s390_set_processor(kvm, attr);
1056 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001057 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1058 ret = kvm_s390_set_processor_feat(kvm, attr);
1059 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001060 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1061 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1062 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001063 }
1064 return ret;
1065}
1066
1067static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1068{
1069 struct kvm_s390_vm_cpu_processor *proc;
1070 int ret = 0;
1071
1072 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1073 if (!proc) {
1074 ret = -ENOMEM;
1075 goto out;
1076 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001077 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001078 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001079 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1080 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001081 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1082 kvm->arch.model.ibc,
1083 kvm->arch.model.cpuid);
1084 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1085 kvm->arch.model.fac_list[0],
1086 kvm->arch.model.fac_list[1],
1087 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001088 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1089 ret = -EFAULT;
1090 kfree(proc);
1091out:
1092 return ret;
1093}
1094
1095static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1096{
1097 struct kvm_s390_vm_cpu_machine *mach;
1098 int ret = 0;
1099
1100 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1101 if (!mach) {
1102 ret = -ENOMEM;
1103 goto out;
1104 }
1105 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001106 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001107 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001108 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001109 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001110 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001111 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1112 kvm->arch.model.ibc,
1113 kvm->arch.model.cpuid);
1114 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1115 mach->fac_mask[0],
1116 mach->fac_mask[1],
1117 mach->fac_mask[2]);
1118 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1119 mach->fac_list[0],
1120 mach->fac_list[1],
1121 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001122 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1123 ret = -EFAULT;
1124 kfree(mach);
1125out:
1126 return ret;
1127}
1128
David Hildenbrand15c97052015-03-19 17:36:43 +01001129static int kvm_s390_get_processor_feat(struct kvm *kvm,
1130 struct kvm_device_attr *attr)
1131{
1132 struct kvm_s390_vm_cpu_feat data;
1133
1134 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1135 KVM_S390_VM_CPU_FEAT_NR_BITS);
1136 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1137 return -EFAULT;
1138 return 0;
1139}
1140
1141static int kvm_s390_get_machine_feat(struct kvm *kvm,
1142 struct kvm_device_attr *attr)
1143{
1144 struct kvm_s390_vm_cpu_feat data;
1145
1146 bitmap_copy((unsigned long *) data.feat,
1147 kvm_s390_available_cpu_feat,
1148 KVM_S390_VM_CPU_FEAT_NR_BITS);
1149 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1150 return -EFAULT;
1151 return 0;
1152}
1153
David Hildenbrand0a763c72016-05-18 16:03:47 +02001154static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1155 struct kvm_device_attr *attr)
1156{
1157 /*
1158 * Once we can actually configure subfunctions (kernel + hw support),
1159 * we have to check if they were already set by user space, if so copy
1160 * them from kvm->arch.
1161 */
1162 return -ENXIO;
1163}
1164
1165static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1166 struct kvm_device_attr *attr)
1167{
1168 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1169 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1170 return -EFAULT;
1171 return 0;
1172}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001173static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1174{
1175 int ret = -ENXIO;
1176
1177 switch (attr->attr) {
1178 case KVM_S390_VM_CPU_PROCESSOR:
1179 ret = kvm_s390_get_processor(kvm, attr);
1180 break;
1181 case KVM_S390_VM_CPU_MACHINE:
1182 ret = kvm_s390_get_machine(kvm, attr);
1183 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001184 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1185 ret = kvm_s390_get_processor_feat(kvm, attr);
1186 break;
1187 case KVM_S390_VM_CPU_MACHINE_FEAT:
1188 ret = kvm_s390_get_machine_feat(kvm, attr);
1189 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001190 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1191 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1192 break;
1193 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1194 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1195 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001196 }
1197 return ret;
1198}
1199
Dominik Dingelf2061652014-04-09 13:13:00 +02001200static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1201{
1202 int ret;
1203
1204 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001205 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001206 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001207 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001208 case KVM_S390_VM_TOD:
1209 ret = kvm_s390_set_tod(kvm, attr);
1210 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001211 case KVM_S390_VM_CPU_MODEL:
1212 ret = kvm_s390_set_cpu_model(kvm, attr);
1213 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001214 case KVM_S390_VM_CRYPTO:
1215 ret = kvm_s390_vm_set_crypto(kvm, attr);
1216 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001217 case KVM_S390_VM_MIGRATION:
1218 ret = kvm_s390_vm_set_migration(kvm, attr);
1219 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001220 default:
1221 ret = -ENXIO;
1222 break;
1223 }
1224
1225 return ret;
1226}
1227
1228static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1229{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001230 int ret;
1231
1232 switch (attr->group) {
1233 case KVM_S390_VM_MEM_CTRL:
1234 ret = kvm_s390_get_mem_control(kvm, attr);
1235 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001236 case KVM_S390_VM_TOD:
1237 ret = kvm_s390_get_tod(kvm, attr);
1238 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001239 case KVM_S390_VM_CPU_MODEL:
1240 ret = kvm_s390_get_cpu_model(kvm, attr);
1241 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001242 case KVM_S390_VM_MIGRATION:
1243 ret = kvm_s390_vm_get_migration(kvm, attr);
1244 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001245 default:
1246 ret = -ENXIO;
1247 break;
1248 }
1249
1250 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001251}
1252
1253static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1254{
1255 int ret;
1256
1257 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001258 case KVM_S390_VM_MEM_CTRL:
1259 switch (attr->attr) {
1260 case KVM_S390_VM_MEM_ENABLE_CMMA:
1261 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001262 ret = sclp.has_cmma ? 0 : -ENXIO;
1263 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001264 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001265 ret = 0;
1266 break;
1267 default:
1268 ret = -ENXIO;
1269 break;
1270 }
1271 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001272 case KVM_S390_VM_TOD:
1273 switch (attr->attr) {
1274 case KVM_S390_VM_TOD_LOW:
1275 case KVM_S390_VM_TOD_HIGH:
1276 ret = 0;
1277 break;
1278 default:
1279 ret = -ENXIO;
1280 break;
1281 }
1282 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001283 case KVM_S390_VM_CPU_MODEL:
1284 switch (attr->attr) {
1285 case KVM_S390_VM_CPU_PROCESSOR:
1286 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001287 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1288 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001289 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001290 ret = 0;
1291 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001292 /* configuring subfunctions is not supported yet */
1293 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001294 default:
1295 ret = -ENXIO;
1296 break;
1297 }
1298 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001299 case KVM_S390_VM_CRYPTO:
1300 switch (attr->attr) {
1301 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1302 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1303 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1304 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1305 ret = 0;
1306 break;
1307 default:
1308 ret = -ENXIO;
1309 break;
1310 }
1311 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001312 case KVM_S390_VM_MIGRATION:
1313 ret = 0;
1314 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001315 default:
1316 ret = -ENXIO;
1317 break;
1318 }
1319
1320 return ret;
1321}
1322
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001323static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1324{
1325 uint8_t *keys;
1326 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001327 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001328
1329 if (args->flags != 0)
1330 return -EINVAL;
1331
1332 /* Is this guest using storage keys? */
1333 if (!mm_use_skey(current->mm))
1334 return KVM_S390_GET_SKEYS_NONE;
1335
1336 /* Enforce sane limit on memory allocation */
1337 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1338 return -EINVAL;
1339
Michal Hocko752ade62017-05-08 15:57:27 -07001340 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001341 if (!keys)
1342 return -ENOMEM;
1343
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001344 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001345 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001346 for (i = 0; i < args->count; i++) {
1347 hva = gfn_to_hva(kvm, args->start_gfn + i);
1348 if (kvm_is_error_hva(hva)) {
1349 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001350 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001351 }
1352
David Hildenbrand154c8c12016-05-09 11:22:34 +02001353 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1354 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001355 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001356 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001357 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001358 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001359
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001360 if (!r) {
1361 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1362 sizeof(uint8_t) * args->count);
1363 if (r)
1364 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001365 }
1366
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001367 kvfree(keys);
1368 return r;
1369}
1370
1371static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1372{
1373 uint8_t *keys;
1374 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001375 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001376
1377 if (args->flags != 0)
1378 return -EINVAL;
1379
1380 /* Enforce sane limit on memory allocation */
1381 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1382 return -EINVAL;
1383
Michal Hocko752ade62017-05-08 15:57:27 -07001384 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001385 if (!keys)
1386 return -ENOMEM;
1387
1388 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1389 sizeof(uint8_t) * args->count);
1390 if (r) {
1391 r = -EFAULT;
1392 goto out;
1393 }
1394
1395 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001396 r = s390_enable_skey();
1397 if (r)
1398 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001399
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001400 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001401 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001402 for (i = 0; i < args->count; i++) {
1403 hva = gfn_to_hva(kvm, args->start_gfn + i);
1404 if (kvm_is_error_hva(hva)) {
1405 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001406 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001407 }
1408
1409 /* Lowest order bit is reserved */
1410 if (keys[i] & 0x01) {
1411 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001412 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001413 }
1414
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001415 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001416 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001417 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001418 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001419 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001420 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001421out:
1422 kvfree(keys);
1423 return r;
1424}
1425
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001426/*
1427 * Base address and length must be sent at the start of each block, therefore
1428 * it's cheaper to send some clean data, as long as it's less than the size of
1429 * two longs.
1430 */
1431#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1432/* for consistency */
1433#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1434
1435/*
1436 * This function searches for the next page with dirty CMMA attributes, and
1437 * saves the attributes in the buffer up to either the end of the buffer or
1438 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1439 * no trailing clean bytes are saved.
1440 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1441 * output buffer will indicate 0 as length.
1442 */
1443static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1444 struct kvm_s390_cmma_log *args)
1445{
1446 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1447 unsigned long bufsize, hva, pgstev, i, next, cur;
1448 int srcu_idx, peek, r = 0, rr;
1449 u8 *res;
1450
1451 cur = args->start_gfn;
1452 i = next = pgstev = 0;
1453
1454 if (unlikely(!kvm->arch.use_cmma))
1455 return -ENXIO;
1456 /* Invalid/unsupported flags were specified */
1457 if (args->flags & ~KVM_S390_CMMA_PEEK)
1458 return -EINVAL;
1459 /* Migration mode query, and we are not doing a migration */
1460 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1461 if (!peek && !s)
1462 return -EINVAL;
1463 /* CMMA is disabled or was not used, or the buffer has length zero */
1464 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
1465 if (!bufsize || !kvm->mm->context.use_cmma) {
1466 memset(args, 0, sizeof(*args));
1467 return 0;
1468 }
1469
1470 if (!peek) {
1471 /* We are not peeking, and there are no dirty pages */
1472 if (!atomic64_read(&s->dirty_pages)) {
1473 memset(args, 0, sizeof(*args));
1474 return 0;
1475 }
1476 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1477 args->start_gfn);
1478 if (cur >= s->bitmap_size) /* nothing found, loop back */
1479 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1480 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1481 memset(args, 0, sizeof(*args));
1482 return 0;
1483 }
1484 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1485 }
1486
1487 res = vmalloc(bufsize);
1488 if (!res)
1489 return -ENOMEM;
1490
1491 args->start_gfn = cur;
1492
1493 down_read(&kvm->mm->mmap_sem);
1494 srcu_idx = srcu_read_lock(&kvm->srcu);
1495 while (i < bufsize) {
1496 hva = gfn_to_hva(kvm, cur);
1497 if (kvm_is_error_hva(hva)) {
1498 r = -EFAULT;
1499 break;
1500 }
1501 /* decrement only if we actually flipped the bit to 0 */
1502 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1503 atomic64_dec(&s->dirty_pages);
1504 r = get_pgste(kvm->mm, hva, &pgstev);
1505 if (r < 0)
1506 pgstev = 0;
1507 /* save the value */
1508 res[i++] = (pgstev >> 24) & 0x3;
1509 /*
1510 * if the next bit is too far away, stop.
1511 * if we reached the previous "next", find the next one
1512 */
1513 if (!peek) {
1514 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1515 break;
1516 if (cur == next)
1517 next = find_next_bit(s->pgste_bitmap,
1518 s->bitmap_size, cur + 1);
1519 /* reached the end of the bitmap or of the buffer, stop */
1520 if ((next >= s->bitmap_size) ||
1521 (next >= args->start_gfn + bufsize))
1522 break;
1523 }
1524 cur++;
1525 }
1526 srcu_read_unlock(&kvm->srcu, srcu_idx);
1527 up_read(&kvm->mm->mmap_sem);
1528 args->count = i;
1529 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1530
1531 rr = copy_to_user((void __user *)args->values, res, args->count);
1532 if (rr)
1533 r = -EFAULT;
1534
1535 vfree(res);
1536 return r;
1537}
1538
1539/*
1540 * This function sets the CMMA attributes for the given pages. If the input
1541 * buffer has zero length, no action is taken, otherwise the attributes are
1542 * set and the mm->context.use_cmma flag is set.
1543 */
1544static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1545 const struct kvm_s390_cmma_log *args)
1546{
1547 unsigned long hva, mask, pgstev, i;
1548 uint8_t *bits;
1549 int srcu_idx, r = 0;
1550
1551 mask = args->mask;
1552
1553 if (!kvm->arch.use_cmma)
1554 return -ENXIO;
1555 /* invalid/unsupported flags */
1556 if (args->flags != 0)
1557 return -EINVAL;
1558 /* Enforce sane limit on memory allocation */
1559 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1560 return -EINVAL;
1561 /* Nothing to do */
1562 if (args->count == 0)
1563 return 0;
1564
1565 bits = vmalloc(sizeof(*bits) * args->count);
1566 if (!bits)
1567 return -ENOMEM;
1568
1569 r = copy_from_user(bits, (void __user *)args->values, args->count);
1570 if (r) {
1571 r = -EFAULT;
1572 goto out;
1573 }
1574
1575 down_read(&kvm->mm->mmap_sem);
1576 srcu_idx = srcu_read_lock(&kvm->srcu);
1577 for (i = 0; i < args->count; i++) {
1578 hva = gfn_to_hva(kvm, args->start_gfn + i);
1579 if (kvm_is_error_hva(hva)) {
1580 r = -EFAULT;
1581 break;
1582 }
1583
1584 pgstev = bits[i];
1585 pgstev = pgstev << 24;
1586 mask &= _PGSTE_GPS_USAGE_MASK;
1587 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1588 }
1589 srcu_read_unlock(&kvm->srcu, srcu_idx);
1590 up_read(&kvm->mm->mmap_sem);
1591
1592 if (!kvm->mm->context.use_cmma) {
1593 down_write(&kvm->mm->mmap_sem);
1594 kvm->mm->context.use_cmma = 1;
1595 up_write(&kvm->mm->mmap_sem);
1596 }
1597out:
1598 vfree(bits);
1599 return r;
1600}
1601
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001602long kvm_arch_vm_ioctl(struct file *filp,
1603 unsigned int ioctl, unsigned long arg)
1604{
1605 struct kvm *kvm = filp->private_data;
1606 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001607 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001608 int r;
1609
1610 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001611 case KVM_S390_INTERRUPT: {
1612 struct kvm_s390_interrupt s390int;
1613
1614 r = -EFAULT;
1615 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1616 break;
1617 r = kvm_s390_inject_vm(kvm, &s390int);
1618 break;
1619 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001620 case KVM_ENABLE_CAP: {
1621 struct kvm_enable_cap cap;
1622 r = -EFAULT;
1623 if (copy_from_user(&cap, argp, sizeof(cap)))
1624 break;
1625 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1626 break;
1627 }
Cornelia Huck84223592013-07-15 13:36:01 +02001628 case KVM_CREATE_IRQCHIP: {
1629 struct kvm_irq_routing_entry routing;
1630
1631 r = -EINVAL;
1632 if (kvm->arch.use_irqchip) {
1633 /* Set up dummy routing. */
1634 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001635 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001636 }
1637 break;
1638 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001639 case KVM_SET_DEVICE_ATTR: {
1640 r = -EFAULT;
1641 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1642 break;
1643 r = kvm_s390_vm_set_attr(kvm, &attr);
1644 break;
1645 }
1646 case KVM_GET_DEVICE_ATTR: {
1647 r = -EFAULT;
1648 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1649 break;
1650 r = kvm_s390_vm_get_attr(kvm, &attr);
1651 break;
1652 }
1653 case KVM_HAS_DEVICE_ATTR: {
1654 r = -EFAULT;
1655 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1656 break;
1657 r = kvm_s390_vm_has_attr(kvm, &attr);
1658 break;
1659 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001660 case KVM_S390_GET_SKEYS: {
1661 struct kvm_s390_skeys args;
1662
1663 r = -EFAULT;
1664 if (copy_from_user(&args, argp,
1665 sizeof(struct kvm_s390_skeys)))
1666 break;
1667 r = kvm_s390_get_skeys(kvm, &args);
1668 break;
1669 }
1670 case KVM_S390_SET_SKEYS: {
1671 struct kvm_s390_skeys args;
1672
1673 r = -EFAULT;
1674 if (copy_from_user(&args, argp,
1675 sizeof(struct kvm_s390_skeys)))
1676 break;
1677 r = kvm_s390_set_skeys(kvm, &args);
1678 break;
1679 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001680 case KVM_S390_GET_CMMA_BITS: {
1681 struct kvm_s390_cmma_log args;
1682
1683 r = -EFAULT;
1684 if (copy_from_user(&args, argp, sizeof(args)))
1685 break;
1686 r = kvm_s390_get_cmma_bits(kvm, &args);
1687 if (!r) {
1688 r = copy_to_user(argp, &args, sizeof(args));
1689 if (r)
1690 r = -EFAULT;
1691 }
1692 break;
1693 }
1694 case KVM_S390_SET_CMMA_BITS: {
1695 struct kvm_s390_cmma_log args;
1696
1697 r = -EFAULT;
1698 if (copy_from_user(&args, argp, sizeof(args)))
1699 break;
1700 r = kvm_s390_set_cmma_bits(kvm, &args);
1701 break;
1702 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001703 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001704 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001705 }
1706
1707 return r;
1708}
1709
Tony Krowiak45c9b472015-01-13 11:33:26 -05001710static int kvm_s390_query_ap_config(u8 *config)
1711{
1712 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001713 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001714
Christian Borntraeger86044c82015-02-26 13:53:47 +01001715 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001716 asm volatile(
1717 "lgr 0,%1\n"
1718 "lgr 2,%2\n"
1719 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001720 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001721 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001722 "1:\n"
1723 EX_TABLE(0b, 1b)
1724 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001725 : "r" (fcn_code), "r" (config)
1726 : "cc", "0", "2", "memory"
1727 );
1728
1729 return cc;
1730}
1731
1732static int kvm_s390_apxa_installed(void)
1733{
1734 u8 config[128];
1735 int cc;
1736
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001737 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001738 cc = kvm_s390_query_ap_config(config);
1739
1740 if (cc)
1741 pr_err("PQAP(QCI) failed with cc=%d", cc);
1742 else
1743 return config[0] & 0x40;
1744 }
1745
1746 return 0;
1747}
1748
1749static void kvm_s390_set_crycb_format(struct kvm *kvm)
1750{
1751 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1752
1753 if (kvm_s390_apxa_installed())
1754 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1755 else
1756 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1757}
1758
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001759static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001760{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001761 struct cpuid cpuid;
1762
1763 get_cpu_id(&cpuid);
1764 cpuid.version = 0xff;
1765 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001766}
1767
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001768static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001769{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001770 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001771 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001772
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001773 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001774 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001775
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001776 /* Enable AES/DEA protected key functions by default */
1777 kvm->arch.crypto.aes_kw = 1;
1778 kvm->arch.crypto.dea_kw = 1;
1779 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1780 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1781 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1782 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001783}
1784
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001785static void sca_dispose(struct kvm *kvm)
1786{
1787 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001788 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001789 else
1790 free_page((unsigned long)(kvm->arch.sca));
1791 kvm->arch.sca = NULL;
1792}
1793
Carsten Ottee08b9632012-01-04 10:25:20 +01001794int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001795{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001796 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001797 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001798 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001799 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001800
Carsten Ottee08b9632012-01-04 10:25:20 +01001801 rc = -EINVAL;
1802#ifdef CONFIG_KVM_S390_UCONTROL
1803 if (type & ~KVM_VM_S390_UCONTROL)
1804 goto out_err;
1805 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1806 goto out_err;
1807#else
1808 if (type)
1809 goto out_err;
1810#endif
1811
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001812 rc = s390_enable_sie();
1813 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001814 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001815
Carsten Otteb2904112011-10-18 12:27:13 +02001816 rc = -ENOMEM;
1817
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001818 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1819
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001820 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001821 if (!sclp.has_64bscao)
1822 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001823 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001824 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001825 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001826 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001827 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001828 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001829 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001830 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001831 kvm->arch.sca = (struct bsca_block *)
1832 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001833 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001834
1835 sprintf(debug_name, "kvm-%u", current->pid);
1836
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001837 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001838 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001839 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001840
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001841 kvm->arch.sie_page2 =
1842 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1843 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001844 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001845
Michael Muellerfb5bf932015-02-27 14:25:10 +01001846 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001847 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001848 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001849 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1850 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001851 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001852 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001853 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001854 }
1855
Michael Mueller981467c2015-02-24 13:51:04 +01001856 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001857 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1858 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001859 S390_ARCH_FAC_LIST_SIZE_BYTE);
1860
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001861 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1862 set_kvm_facility(kvm->arch.model.fac_list, 74);
1863
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001864 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001865 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001866
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001867 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001868
Fei Li51978392017-02-17 17:06:26 +08001869 mutex_init(&kvm->arch.float_int.ais_lock);
1870 kvm->arch.float_int.simm = 0;
1871 kvm->arch.float_int.nimm = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001872 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001873 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1874 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001875 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001876 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001877
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001878 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001879 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001880
Carsten Ottee08b9632012-01-04 10:25:20 +01001881 if (type & KVM_VM_S390_UCONTROL) {
1882 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001883 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001884 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001885 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001886 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001887 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001888 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001889 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001890 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001891 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001892 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001893 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001894 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001895 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001896
1897 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001898 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001899 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001900
David Hildenbrand8ad35752014-03-14 11:00:21 +01001901 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001902 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001903 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001904
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001905 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001906out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001907 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001908 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001909 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001910 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001911 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001912}
1913
Luiz Capitulino235539b2016-09-07 14:47:23 -04001914bool kvm_arch_has_vcpu_debugfs(void)
1915{
1916 return false;
1917}
1918
1919int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1920{
1921 return 0;
1922}
1923
Christian Borntraegerd329c032008-11-26 14:50:27 +01001924void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1925{
1926 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001927 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001928 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001929 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001930 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001931 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001932
1933 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001934 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01001935
Dominik Dingele6db1d62015-05-07 15:41:57 +02001936 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001937 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001938 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001939
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001940 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001941 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001942}
1943
1944static void kvm_free_vcpus(struct kvm *kvm)
1945{
1946 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001947 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001948
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001949 kvm_for_each_vcpu(i, vcpu, kvm)
1950 kvm_arch_vcpu_destroy(vcpu);
1951
1952 mutex_lock(&kvm->lock);
1953 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1954 kvm->vcpus[i] = NULL;
1955
1956 atomic_set(&kvm->online_vcpus, 0);
1957 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001958}
1959
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001960void kvm_arch_destroy_vm(struct kvm *kvm)
1961{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001962 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001963 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001964 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001965 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001966 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001967 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001968 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001969 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001970 kvm_s390_vsie_destroy(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001971 if (kvm->arch.migration_state) {
1972 vfree(kvm->arch.migration_state->pgste_bitmap);
1973 kfree(kvm->arch.migration_state);
1974 }
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001975 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001976}
1977
1978/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001979static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1980{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001981 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001982 if (!vcpu->arch.gmap)
1983 return -ENOMEM;
1984 vcpu->arch.gmap->private = vcpu->kvm;
1985
1986 return 0;
1987}
1988
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001989static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1990{
David Hildenbranda6940672016-08-08 22:39:32 +02001991 if (!kvm_s390_use_sca_entries())
1992 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001993 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001994 if (vcpu->kvm->arch.use_esca) {
1995 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001996
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001997 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001998 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001999 } else {
2000 struct bsca_block *sca = vcpu->kvm->arch.sca;
2001
2002 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002003 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002004 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002005 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002006}
2007
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002008static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002009{
David Hildenbranda6940672016-08-08 22:39:32 +02002010 if (!kvm_s390_use_sca_entries()) {
2011 struct bsca_block *sca = vcpu->kvm->arch.sca;
2012
2013 /* we still need the basic sca for the ipte control */
2014 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2015 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2016 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002017 read_lock(&vcpu->kvm->arch.sca_lock);
2018 if (vcpu->kvm->arch.use_esca) {
2019 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002020
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002021 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002022 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2023 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002024 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002025 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002026 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002027 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002028
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002029 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002030 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2031 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002032 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002033 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002034 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002035}
2036
2037/* Basic SCA to Extended SCA data copy routines */
2038static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2039{
2040 d->sda = s->sda;
2041 d->sigp_ctrl.c = s->sigp_ctrl.c;
2042 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2043}
2044
2045static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2046{
2047 int i;
2048
2049 d->ipte_control = s->ipte_control;
2050 d->mcn[0] = s->mcn;
2051 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2052 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2053}
2054
2055static int sca_switch_to_extended(struct kvm *kvm)
2056{
2057 struct bsca_block *old_sca = kvm->arch.sca;
2058 struct esca_block *new_sca;
2059 struct kvm_vcpu *vcpu;
2060 unsigned int vcpu_idx;
2061 u32 scaol, scaoh;
2062
2063 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2064 if (!new_sca)
2065 return -ENOMEM;
2066
2067 scaoh = (u32)((u64)(new_sca) >> 32);
2068 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2069
2070 kvm_s390_vcpu_block_all(kvm);
2071 write_lock(&kvm->arch.sca_lock);
2072
2073 sca_copy_b_to_e(new_sca, old_sca);
2074
2075 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2076 vcpu->arch.sie_block->scaoh = scaoh;
2077 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002078 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002079 }
2080 kvm->arch.sca = new_sca;
2081 kvm->arch.use_esca = 1;
2082
2083 write_unlock(&kvm->arch.sca_lock);
2084 kvm_s390_vcpu_unblock_all(kvm);
2085
2086 free_page((unsigned long)old_sca);
2087
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002088 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2089 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002090 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002091}
2092
2093static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2094{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002095 int rc;
2096
David Hildenbranda6940672016-08-08 22:39:32 +02002097 if (!kvm_s390_use_sca_entries()) {
2098 if (id < KVM_MAX_VCPUS)
2099 return true;
2100 return false;
2101 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002102 if (id < KVM_S390_BSCA_CPU_SLOTS)
2103 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002104 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002105 return false;
2106
2107 mutex_lock(&kvm->lock);
2108 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2109 mutex_unlock(&kvm->lock);
2110
2111 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002112}
2113
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002114int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2115{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002116 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2117 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002118 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2119 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002120 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002121 KVM_SYNC_CRS |
2122 KVM_SYNC_ARCH0 |
2123 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002124 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002125 if (test_kvm_facility(vcpu->kvm, 64))
2126 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002127 if (test_kvm_facility(vcpu->kvm, 133))
2128 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002129 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2130 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2131 */
2132 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002133 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002134 else
2135 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002136
2137 if (kvm_is_ucontrol(vcpu->kvm))
2138 return __kvm_ucontrol_vcpu_init(vcpu);
2139
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002140 return 0;
2141}
2142
David Hildenbranddb0758b2016-02-15 09:42:25 +01002143/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2144static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2145{
2146 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002147 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002148 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002149 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002150}
2151
2152/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2153static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2154{
2155 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002156 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002157 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2158 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002159 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002160}
2161
2162/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2163static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2164{
2165 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2166 vcpu->arch.cputm_enabled = true;
2167 __start_cpu_timer_accounting(vcpu);
2168}
2169
2170/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2171static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2172{
2173 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2174 __stop_cpu_timer_accounting(vcpu);
2175 vcpu->arch.cputm_enabled = false;
2176}
2177
2178static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2179{
2180 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2181 __enable_cpu_timer_accounting(vcpu);
2182 preempt_enable();
2183}
2184
2185static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2186{
2187 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2188 __disable_cpu_timer_accounting(vcpu);
2189 preempt_enable();
2190}
2191
David Hildenbrand4287f242016-02-15 09:40:12 +01002192/* set the cpu timer - may only be called from the VCPU thread itself */
2193void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2194{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002195 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002196 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002197 if (vcpu->arch.cputm_enabled)
2198 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002199 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002200 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002201 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002202}
2203
David Hildenbranddb0758b2016-02-15 09:42:25 +01002204/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002205__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2206{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002207 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002208 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002209
2210 if (unlikely(!vcpu->arch.cputm_enabled))
2211 return vcpu->arch.sie_block->cputm;
2212
David Hildenbrand9c23a132016-02-17 21:53:33 +01002213 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2214 do {
2215 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2216 /*
2217 * If the writer would ever execute a read in the critical
2218 * section, e.g. in irq context, we have a deadlock.
2219 */
2220 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2221 value = vcpu->arch.sie_block->cputm;
2222 /* if cputm_start is 0, accounting is being started/stopped */
2223 if (likely(vcpu->arch.cputm_start))
2224 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2225 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2226 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002227 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002228}
2229
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002230void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2231{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002232
David Hildenbrand37d9df92015-03-11 16:47:33 +01002233 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002234 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002235 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002236 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002237 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002238}
2239
2240void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2241{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002242 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002243 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002244 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002245 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002246 vcpu->arch.enabled_gmap = gmap_get_enabled();
2247 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002248
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002249}
2250
2251static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2252{
2253 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2254 vcpu->arch.sie_block->gpsw.mask = 0UL;
2255 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002256 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002257 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002258 vcpu->arch.sie_block->ckc = 0UL;
2259 vcpu->arch.sie_block->todpr = 0;
2260 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2261 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2262 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002263 /* make sure the new fpc will be lazily loaded */
2264 save_fpu_regs();
2265 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002266 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002267 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002268 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2269 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002270 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2271 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002272 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002273}
2274
Dominik Dingel31928aa2014-12-04 15:47:07 +01002275void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002276{
Jason J. Herne72f25022014-11-25 09:46:02 -05002277 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002278 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002279 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02002280 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002281 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002282 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002283 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002284 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002285 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002286 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2287 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002288 /* make vcpu_load load the right gmap on the first trigger */
2289 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002290}
2291
Tony Krowiak5102ee82014-06-27 14:46:01 -04002292static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2293{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002294 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002295 return;
2296
Tony Krowiaka374e892014-09-03 10:13:53 +02002297 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2298
2299 if (vcpu->kvm->arch.crypto.aes_kw)
2300 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2301 if (vcpu->kvm->arch.crypto.dea_kw)
2302 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2303
Tony Krowiak5102ee82014-06-27 14:46:01 -04002304 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2305}
2306
Dominik Dingelb31605c2014-03-25 13:47:11 +01002307void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2308{
2309 free_page(vcpu->arch.sie_block->cbrlo);
2310 vcpu->arch.sie_block->cbrlo = 0;
2311}
2312
2313int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2314{
2315 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2316 if (!vcpu->arch.sie_block->cbrlo)
2317 return -ENOMEM;
2318
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002319 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002320 return 0;
2321}
2322
Michael Mueller91520f12015-02-27 14:32:11 +01002323static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2324{
2325 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2326
Michael Mueller91520f12015-02-27 14:32:11 +01002327 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002328 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002329 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002330}
2331
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002332int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2333{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002334 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002335
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002336 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2337 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002338 CPUSTAT_STOPPED);
2339
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002340 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002341 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002342 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002343 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002344
Michael Mueller91520f12015-02-27 14:32:11 +01002345 kvm_s390_vcpu_setup_model(vcpu);
2346
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002347 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2348 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002349 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002350 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002351 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002352 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002353 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002354
David Hildenbrand873b4252016-04-04 15:53:47 +02002355 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002356 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002357 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002358 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2359 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002360 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002361 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002362 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002363 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002364 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002365 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002366 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002367 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002368 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002369 vcpu->arch.sie_block->eca |= ECA_VX;
2370 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002371 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002372 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2373 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002374 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002375
2376 if (sclp.has_kss)
2377 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2378 else
2379 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002380
Dominik Dingele6db1d62015-05-07 15:41:57 +02002381 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002382 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2383 if (rc)
2384 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002385 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01002386 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002387 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002388
Tony Krowiak5102ee82014-06-27 14:46:01 -04002389 kvm_s390_vcpu_crypto_setup(vcpu);
2390
Dominik Dingelb31605c2014-03-25 13:47:11 +01002391 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002392}
2393
2394struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2395 unsigned int id)
2396{
Carsten Otte4d475552011-10-18 12:27:12 +02002397 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002398 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002399 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002400
David Hildenbrand42158252015-10-12 12:57:22 +02002401 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002402 goto out;
2403
2404 rc = -ENOMEM;
2405
Michael Muellerb110fea2013-06-12 13:54:54 +02002406 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002407 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002408 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002409
QingFeng Haoda72ca42017-06-07 11:41:19 +02002410 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002411 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2412 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002413 goto out_free_cpu;
2414
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002415 vcpu->arch.sie_block = &sie_page->sie_block;
2416 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2417
David Hildenbrandefed1102015-04-16 12:32:41 +02002418 /* the real guest size will always be smaller than msl */
2419 vcpu->arch.sie_block->mso = 0;
2420 vcpu->arch.sie_block->msl = sclp.hamax;
2421
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002422 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002423 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002424 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002425 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002426 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002427 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002428
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002429 rc = kvm_vcpu_init(vcpu, kvm, id);
2430 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002431 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002432 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002433 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002434 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002435
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002436 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002437out_free_sie_block:
2438 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002439out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002440 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002441out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002442 return ERR_PTR(rc);
2443}
2444
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002445int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2446{
David Hildenbrand9a022062014-08-05 17:40:47 +02002447 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002448}
2449
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002450void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002451{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002452 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002453 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002454}
2455
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002456void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002457{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002458 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002459}
2460
Christian Borntraeger8e236542015-04-09 13:49:04 +02002461static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2462{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002463 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002464 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002465}
2466
2467static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2468{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002469 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002470}
2471
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002472/*
2473 * Kick a guest cpu out of SIE and wait until SIE is not running.
2474 * If the CPU is not running (e.g. waiting as idle) the function will
2475 * return immediately. */
2476void exit_sie(struct kvm_vcpu *vcpu)
2477{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002478 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002479 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2480 cpu_relax();
2481}
2482
Christian Borntraeger8e236542015-04-09 13:49:04 +02002483/* Kick a guest cpu out of SIE to process a request synchronously */
2484void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002485{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002486 kvm_make_request(req, vcpu);
2487 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002488}
2489
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002490static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2491 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002492{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002493 struct kvm *kvm = gmap->private;
2494 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002495 unsigned long prefix;
2496 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002497
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002498 if (gmap_is_shadow(gmap))
2499 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002500 if (start >= 1UL << 31)
2501 /* We are only interested in prefix pages */
2502 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002503 kvm_for_each_vcpu(i, vcpu, kvm) {
2504 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002505 prefix = kvm_s390_get_prefix(vcpu);
2506 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2507 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2508 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002509 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002510 }
2511 }
2512}
2513
Christoffer Dallb6d33832012-03-08 16:44:24 -05002514int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2515{
2516 /* kvm common code refers to this, but never calls it */
2517 BUG();
2518 return 0;
2519}
2520
Carsten Otte14eebd92012-05-15 14:15:26 +02002521static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2522 struct kvm_one_reg *reg)
2523{
2524 int r = -EINVAL;
2525
2526 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002527 case KVM_REG_S390_TODPR:
2528 r = put_user(vcpu->arch.sie_block->todpr,
2529 (u32 __user *)reg->addr);
2530 break;
2531 case KVM_REG_S390_EPOCHDIFF:
2532 r = put_user(vcpu->arch.sie_block->epoch,
2533 (u64 __user *)reg->addr);
2534 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002535 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002536 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002537 (u64 __user *)reg->addr);
2538 break;
2539 case KVM_REG_S390_CLOCK_COMP:
2540 r = put_user(vcpu->arch.sie_block->ckc,
2541 (u64 __user *)reg->addr);
2542 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002543 case KVM_REG_S390_PFTOKEN:
2544 r = put_user(vcpu->arch.pfault_token,
2545 (u64 __user *)reg->addr);
2546 break;
2547 case KVM_REG_S390_PFCOMPARE:
2548 r = put_user(vcpu->arch.pfault_compare,
2549 (u64 __user *)reg->addr);
2550 break;
2551 case KVM_REG_S390_PFSELECT:
2552 r = put_user(vcpu->arch.pfault_select,
2553 (u64 __user *)reg->addr);
2554 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002555 case KVM_REG_S390_PP:
2556 r = put_user(vcpu->arch.sie_block->pp,
2557 (u64 __user *)reg->addr);
2558 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002559 case KVM_REG_S390_GBEA:
2560 r = put_user(vcpu->arch.sie_block->gbea,
2561 (u64 __user *)reg->addr);
2562 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002563 default:
2564 break;
2565 }
2566
2567 return r;
2568}
2569
2570static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2571 struct kvm_one_reg *reg)
2572{
2573 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002574 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002575
2576 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002577 case KVM_REG_S390_TODPR:
2578 r = get_user(vcpu->arch.sie_block->todpr,
2579 (u32 __user *)reg->addr);
2580 break;
2581 case KVM_REG_S390_EPOCHDIFF:
2582 r = get_user(vcpu->arch.sie_block->epoch,
2583 (u64 __user *)reg->addr);
2584 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002585 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002586 r = get_user(val, (u64 __user *)reg->addr);
2587 if (!r)
2588 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002589 break;
2590 case KVM_REG_S390_CLOCK_COMP:
2591 r = get_user(vcpu->arch.sie_block->ckc,
2592 (u64 __user *)reg->addr);
2593 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002594 case KVM_REG_S390_PFTOKEN:
2595 r = get_user(vcpu->arch.pfault_token,
2596 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002597 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2598 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002599 break;
2600 case KVM_REG_S390_PFCOMPARE:
2601 r = get_user(vcpu->arch.pfault_compare,
2602 (u64 __user *)reg->addr);
2603 break;
2604 case KVM_REG_S390_PFSELECT:
2605 r = get_user(vcpu->arch.pfault_select,
2606 (u64 __user *)reg->addr);
2607 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002608 case KVM_REG_S390_PP:
2609 r = get_user(vcpu->arch.sie_block->pp,
2610 (u64 __user *)reg->addr);
2611 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002612 case KVM_REG_S390_GBEA:
2613 r = get_user(vcpu->arch.sie_block->gbea,
2614 (u64 __user *)reg->addr);
2615 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002616 default:
2617 break;
2618 }
2619
2620 return r;
2621}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002622
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002623static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2624{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002625 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002626 return 0;
2627}
2628
2629int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2630{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002631 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002632 return 0;
2633}
2634
2635int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2636{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002637 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002638 return 0;
2639}
2640
2641int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2642 struct kvm_sregs *sregs)
2643{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002644 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002645 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002646 return 0;
2647}
2648
2649int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2650 struct kvm_sregs *sregs)
2651{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002652 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002653 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002654 return 0;
2655}
2656
2657int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2658{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002659 if (test_fp_ctl(fpu->fpc))
2660 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002661 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002662 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002663 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2664 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002665 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002666 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002667 return 0;
2668}
2669
2670int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2671{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002672 /* make sure we have the latest values */
2673 save_fpu_regs();
2674 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002675 convert_vx_to_fp((freg_t *) fpu->fprs,
2676 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002677 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002678 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002679 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002680 return 0;
2681}
2682
2683static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2684{
2685 int rc = 0;
2686
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002687 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002688 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002689 else {
2690 vcpu->run->psw_mask = psw.mask;
2691 vcpu->run->psw_addr = psw.addr;
2692 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002693 return rc;
2694}
2695
2696int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2697 struct kvm_translation *tr)
2698{
2699 return -EINVAL; /* not implemented yet */
2700}
2701
David Hildenbrand27291e22014-01-23 12:26:52 +01002702#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2703 KVM_GUESTDBG_USE_HW_BP | \
2704 KVM_GUESTDBG_ENABLE)
2705
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002706int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2707 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002708{
David Hildenbrand27291e22014-01-23 12:26:52 +01002709 int rc = 0;
2710
2711 vcpu->guest_debug = 0;
2712 kvm_s390_clear_bp_data(vcpu);
2713
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002714 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002715 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002716 if (!sclp.has_gpere)
2717 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002718
2719 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2720 vcpu->guest_debug = dbg->control;
2721 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002722 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002723
2724 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2725 rc = kvm_s390_import_bp_data(vcpu, dbg);
2726 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002727 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002728 vcpu->arch.guestdbg.last_bp = 0;
2729 }
2730
2731 if (rc) {
2732 vcpu->guest_debug = 0;
2733 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002734 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002735 }
2736
2737 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002738}
2739
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002740int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2741 struct kvm_mp_state *mp_state)
2742{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002743 /* CHECK_STOP and LOAD are not supported yet */
2744 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2745 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002746}
2747
2748int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2749 struct kvm_mp_state *mp_state)
2750{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002751 int rc = 0;
2752
2753 /* user space knows about this interface - let it control the state */
2754 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2755
2756 switch (mp_state->mp_state) {
2757 case KVM_MP_STATE_STOPPED:
2758 kvm_s390_vcpu_stop(vcpu);
2759 break;
2760 case KVM_MP_STATE_OPERATING:
2761 kvm_s390_vcpu_start(vcpu);
2762 break;
2763 case KVM_MP_STATE_LOAD:
2764 case KVM_MP_STATE_CHECK_STOP:
2765 /* fall through - CHECK_STOP and LOAD are not supported yet */
2766 default:
2767 rc = -ENXIO;
2768 }
2769
2770 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002771}
2772
David Hildenbrand8ad35752014-03-14 11:00:21 +01002773static bool ibs_enabled(struct kvm_vcpu *vcpu)
2774{
2775 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2776}
2777
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002778static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2779{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002780retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002781 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02002782 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002783 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002784 /*
2785 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002786 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002787 * This ensures that the ipte instruction for this request has
2788 * already finished. We might race against a second unmapper that
2789 * wants to set the blocking bit. Lets just retry the request loop.
2790 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002791 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002792 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002793 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2794 kvm_s390_get_prefix(vcpu),
2795 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002796 if (rc) {
2797 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002798 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002799 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002800 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002801 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002802
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002803 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2804 vcpu->arch.sie_block->ihcpu = 0xffff;
2805 goto retry;
2806 }
2807
David Hildenbrand8ad35752014-03-14 11:00:21 +01002808 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2809 if (!ibs_enabled(vcpu)) {
2810 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002811 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002812 &vcpu->arch.sie_block->cpuflags);
2813 }
2814 goto retry;
2815 }
2816
2817 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2818 if (ibs_enabled(vcpu)) {
2819 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002820 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002821 &vcpu->arch.sie_block->cpuflags);
2822 }
2823 goto retry;
2824 }
2825
David Hildenbrand6502a342016-06-21 14:19:51 +02002826 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2827 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2828 goto retry;
2829 }
2830
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002831 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
2832 /*
2833 * Disable CMMA virtualization; we will emulate the ESSA
2834 * instruction manually, in order to provide additional
2835 * functionalities needed for live migration.
2836 */
2837 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
2838 goto retry;
2839 }
2840
2841 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
2842 /*
2843 * Re-enable CMMA virtualization if CMMA is available and
2844 * was used.
2845 */
2846 if ((vcpu->kvm->arch.use_cmma) &&
2847 (vcpu->kvm->mm->context.use_cmma))
2848 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
2849 goto retry;
2850 }
2851
David Hildenbrand0759d062014-05-13 16:54:32 +02002852 /* nothing to do, just clear the request */
Radim Krčmář72875d8a2017-04-26 22:32:19 +02002853 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02002854
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002855 return 0;
2856}
2857
David Hildenbrand25ed1672015-05-12 09:49:14 +02002858void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2859{
2860 struct kvm_vcpu *vcpu;
2861 int i;
2862
2863 mutex_lock(&kvm->lock);
2864 preempt_disable();
2865 kvm->arch.epoch = tod - get_tod_clock();
2866 kvm_s390_vcpu_block_all(kvm);
2867 kvm_for_each_vcpu(i, vcpu, kvm)
2868 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2869 kvm_s390_vcpu_unblock_all(kvm);
2870 preempt_enable();
2871 mutex_unlock(&kvm->lock);
2872}
2873
Thomas Huthfa576c52014-05-06 17:20:16 +02002874/**
2875 * kvm_arch_fault_in_page - fault-in guest page if necessary
2876 * @vcpu: The corresponding virtual cpu
2877 * @gpa: Guest physical address
2878 * @writable: Whether the page should be writable or not
2879 *
2880 * Make sure that a guest page has been faulted-in on the host.
2881 *
2882 * Return: Zero on success, negative error code otherwise.
2883 */
2884long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002885{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002886 return gmap_fault(vcpu->arch.gmap, gpa,
2887 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002888}
2889
Dominik Dingel3c038e62013-10-07 17:11:48 +02002890static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2891 unsigned long token)
2892{
2893 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002894 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002895
2896 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002897 irq.u.ext.ext_params2 = token;
2898 irq.type = KVM_S390_INT_PFAULT_INIT;
2899 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002900 } else {
2901 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002902 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002903 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2904 }
2905}
2906
2907void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2908 struct kvm_async_pf *work)
2909{
2910 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2911 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2912}
2913
2914void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2915 struct kvm_async_pf *work)
2916{
2917 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2918 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2919}
2920
2921void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2922 struct kvm_async_pf *work)
2923{
2924 /* s390 will always inject the page directly */
2925}
2926
2927bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2928{
2929 /*
2930 * s390 will always inject the page directly,
2931 * but we still want check_async_completion to cleanup
2932 */
2933 return true;
2934}
2935
2936static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2937{
2938 hva_t hva;
2939 struct kvm_arch_async_pf arch;
2940 int rc;
2941
2942 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2943 return 0;
2944 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2945 vcpu->arch.pfault_compare)
2946 return 0;
2947 if (psw_extint_disabled(vcpu))
2948 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002949 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002950 return 0;
2951 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2952 return 0;
2953 if (!vcpu->arch.gmap->pfault_enabled)
2954 return 0;
2955
Heiko Carstens81480cc2014-01-01 16:36:07 +01002956 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2957 hva += current->thread.gmap_addr & ~PAGE_MASK;
2958 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002959 return 0;
2960
2961 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2962 return rc;
2963}
2964
Thomas Huth3fb4c402013-09-12 10:33:43 +02002965static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002966{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002967 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002968
Dominik Dingel3c038e62013-10-07 17:11:48 +02002969 /*
2970 * On s390 notifications for arriving pages will be delivered directly
2971 * to the guest but the house keeping for completed pfaults is
2972 * handled outside the worker.
2973 */
2974 kvm_check_async_pf_completion(vcpu);
2975
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002976 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2977 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002978
2979 if (need_resched())
2980 schedule();
2981
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002982 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002983 s390_handle_mcck();
2984
Jens Freimann79395032014-04-17 10:10:30 +02002985 if (!kvm_is_ucontrol(vcpu->kvm)) {
2986 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2987 if (rc)
2988 return rc;
2989 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002990
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002991 rc = kvm_s390_handle_requests(vcpu);
2992 if (rc)
2993 return rc;
2994
David Hildenbrand27291e22014-01-23 12:26:52 +01002995 if (guestdbg_enabled(vcpu)) {
2996 kvm_s390_backup_guest_per_regs(vcpu);
2997 kvm_s390_patch_guest_per_regs(vcpu);
2998 }
2999
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003000 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003001 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3002 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3003 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003004
Thomas Huth3fb4c402013-09-12 10:33:43 +02003005 return 0;
3006}
3007
Thomas Huth492d8642015-02-10 16:11:01 +01003008static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3009{
David Hildenbrand56317922016-01-12 17:37:58 +01003010 struct kvm_s390_pgm_info pgm_info = {
3011 .code = PGM_ADDRESSING,
3012 };
3013 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003014 int rc;
3015
3016 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3017 trace_kvm_s390_sie_fault(vcpu);
3018
3019 /*
3020 * We want to inject an addressing exception, which is defined as a
3021 * suppressing or terminating exception. However, since we came here
3022 * by a DAT access exception, the PSW still points to the faulting
3023 * instruction since DAT exceptions are nullifying. So we've got
3024 * to look up the current opcode to get the length of the instruction
3025 * to be able to forward the PSW.
3026 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003027 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003028 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003029 if (rc < 0) {
3030 return rc;
3031 } else if (rc) {
3032 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3033 * Forward by arbitrary ilc, injection will take care of
3034 * nullification if necessary.
3035 */
3036 pgm_info = vcpu->arch.pgm;
3037 ilen = 4;
3038 }
David Hildenbrand56317922016-01-12 17:37:58 +01003039 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3040 kvm_s390_forward_psw(vcpu, ilen);
3041 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003042}
3043
Thomas Huth3fb4c402013-09-12 10:33:43 +02003044static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3045{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003046 struct mcck_volatile_info *mcck_info;
3047 struct sie_page *sie_page;
3048
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003049 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3050 vcpu->arch.sie_block->icptcode);
3051 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3052
David Hildenbrand27291e22014-01-23 12:26:52 +01003053 if (guestdbg_enabled(vcpu))
3054 kvm_s390_restore_guest_per_regs(vcpu);
3055
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003056 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3057 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003058
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003059 if (exit_reason == -EINTR) {
3060 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3061 sie_page = container_of(vcpu->arch.sie_block,
3062 struct sie_page, sie_block);
3063 mcck_info = &sie_page->mcck_info;
3064 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3065 return 0;
3066 }
3067
David Hildenbrand71f116b2015-10-19 16:24:28 +02003068 if (vcpu->arch.sie_block->icptcode > 0) {
3069 int rc = kvm_handle_sie_intercept(vcpu);
3070
3071 if (rc != -EOPNOTSUPP)
3072 return rc;
3073 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3074 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3075 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3076 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3077 return -EREMOTE;
3078 } else if (exit_reason != -EFAULT) {
3079 vcpu->stat.exit_null++;
3080 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003081 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3082 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3083 vcpu->run->s390_ucontrol.trans_exc_code =
3084 current->thread.gmap_addr;
3085 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003086 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003087 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003088 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003089 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003090 if (kvm_arch_setup_async_pf(vcpu))
3091 return 0;
3092 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003093 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003094 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003095}
3096
3097static int __vcpu_run(struct kvm_vcpu *vcpu)
3098{
3099 int rc, exit_reason;
3100
Thomas Huth800c1062013-09-12 10:33:45 +02003101 /*
3102 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3103 * ning the guest), so that memslots (and other stuff) are protected
3104 */
3105 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3106
Thomas Hutha76ccff2013-09-12 10:33:44 +02003107 do {
3108 rc = vcpu_pre_run(vcpu);
3109 if (rc)
3110 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003111
Thomas Huth800c1062013-09-12 10:33:45 +02003112 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003113 /*
3114 * As PF_VCPU will be used in fault handler, between
3115 * guest_enter and guest_exit should be no uaccess.
3116 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003117 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003118 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003119 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003120 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003121 exit_reason = sie64a(vcpu->arch.sie_block,
3122 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003123 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003124 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003125 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003126 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003127 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003128
Thomas Hutha76ccff2013-09-12 10:33:44 +02003129 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003130 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003131
Thomas Huth800c1062013-09-12 10:33:45 +02003132 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003133 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003134}
3135
David Hildenbrandb028ee32014-07-17 10:47:43 +02003136static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3137{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003138 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003139 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003140
3141 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003142 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003143 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3144 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3145 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3146 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3147 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3148 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003149 /* some control register changes require a tlb flush */
3150 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003151 }
3152 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003153 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003154 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3155 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3156 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3157 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3158 }
3159 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3160 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3161 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3162 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003163 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3164 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003165 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003166 /*
3167 * If userspace sets the riccb (e.g. after migration) to a valid state,
3168 * we should enable RI here instead of doing the lazy enablement.
3169 */
3170 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003171 test_kvm_facility(vcpu->kvm, 64) &&
3172 riccb->valid &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003173 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003174 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003175 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003176 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003177 /*
3178 * If userspace sets the gscb (e.g. after migration) to non-zero,
3179 * we should enable GS here instead of doing the lazy enablement.
3180 */
3181 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3182 test_kvm_facility(vcpu->kvm, 133) &&
3183 gscb->gssm &&
3184 !vcpu->arch.gs_enabled) {
3185 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3186 vcpu->arch.sie_block->ecb |= ECB_GS;
3187 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3188 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003189 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003190 save_access_regs(vcpu->arch.host_acrs);
3191 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003192 /* save host (userspace) fprs/vrs */
3193 save_fpu_regs();
3194 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3195 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3196 if (MACHINE_HAS_VX)
3197 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3198 else
3199 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3200 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3201 if (test_fp_ctl(current->thread.fpu.fpc))
3202 /* User space provided an invalid FPC, let's clear it */
3203 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003204 if (MACHINE_HAS_GS) {
3205 preempt_disable();
3206 __ctl_set_bit(2, 4);
3207 if (current->thread.gs_cb) {
3208 vcpu->arch.host_gscb = current->thread.gs_cb;
3209 save_gs_cb(vcpu->arch.host_gscb);
3210 }
3211 if (vcpu->arch.gs_enabled) {
3212 current->thread.gs_cb = (struct gs_cb *)
3213 &vcpu->run->s.regs.gscb;
3214 restore_gs_cb(current->thread.gs_cb);
3215 }
3216 preempt_enable();
3217 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003218
David Hildenbrandb028ee32014-07-17 10:47:43 +02003219 kvm_run->kvm_dirty_regs = 0;
3220}
3221
3222static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3223{
3224 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3225 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3226 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3227 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003228 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003229 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3230 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3231 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3232 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3233 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3234 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3235 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003236 save_access_regs(vcpu->run->s.regs.acrs);
3237 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003238 /* Save guest register state */
3239 save_fpu_regs();
3240 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3241 /* Restore will be done lazily at return */
3242 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3243 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003244 if (MACHINE_HAS_GS) {
3245 __ctl_set_bit(2, 4);
3246 if (vcpu->arch.gs_enabled)
3247 save_gs_cb(current->thread.gs_cb);
3248 preempt_disable();
3249 current->thread.gs_cb = vcpu->arch.host_gscb;
3250 restore_gs_cb(vcpu->arch.host_gscb);
3251 preempt_enable();
3252 if (!vcpu->arch.host_gscb)
3253 __ctl_clear_bit(2, 4);
3254 vcpu->arch.host_gscb = NULL;
3255 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003256
David Hildenbrandb028ee32014-07-17 10:47:43 +02003257}
3258
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003259int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3260{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003261 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003262 sigset_t sigsaved;
3263
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003264 if (kvm_run->immediate_exit)
3265 return -EINTR;
3266
David Hildenbrand27291e22014-01-23 12:26:52 +01003267 if (guestdbg_exit_pending(vcpu)) {
3268 kvm_s390_prepare_debug_exit(vcpu);
3269 return 0;
3270 }
3271
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003272 if (vcpu->sigset_active)
3273 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3274
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003275 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3276 kvm_s390_vcpu_start(vcpu);
3277 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003278 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003279 vcpu->vcpu_id);
3280 return -EINVAL;
3281 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003282
David Hildenbrandb028ee32014-07-17 10:47:43 +02003283 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003284 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003285
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003286 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003287 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003288
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003289 if (signal_pending(current) && !rc) {
3290 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003291 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003292 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003293
David Hildenbrand27291e22014-01-23 12:26:52 +01003294 if (guestdbg_exit_pending(vcpu) && !rc) {
3295 kvm_s390_prepare_debug_exit(vcpu);
3296 rc = 0;
3297 }
3298
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003299 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003300 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003301 rc = 0;
3302 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003303
David Hildenbranddb0758b2016-02-15 09:42:25 +01003304 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003305 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003306
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003307 if (vcpu->sigset_active)
3308 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3309
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003310 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003311 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003312}
3313
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003314/*
3315 * store status at address
3316 * we use have two special cases:
3317 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3318 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3319 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003320int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003321{
Carsten Otte092670c2011-07-24 10:48:22 +02003322 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003323 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003324 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003325 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003326 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003327
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003328 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003329 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3330 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003331 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003332 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003333 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3334 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003335 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003336 gpa = px;
3337 } else
3338 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003339
3340 /* manually convert vector registers if necessary */
3341 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003342 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003343 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3344 fprs, 128);
3345 } else {
3346 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003347 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003348 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003349 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003350 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003351 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003352 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003353 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003354 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003355 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003356 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003357 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003358 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003359 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003360 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003361 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003362 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003363 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003364 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003365 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003366 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003367 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003368 &vcpu->arch.sie_block->gcr, 128);
3369 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003370}
3371
Thomas Huthe8798922013-11-06 15:46:33 +01003372int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3373{
3374 /*
3375 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003376 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003377 * it into the save area
3378 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003379 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003380 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003381 save_access_regs(vcpu->run->s.regs.acrs);
3382
3383 return kvm_s390_store_status_unloaded(vcpu, addr);
3384}
3385
David Hildenbrand8ad35752014-03-14 11:00:21 +01003386static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3387{
3388 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003389 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003390}
3391
3392static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3393{
3394 unsigned int i;
3395 struct kvm_vcpu *vcpu;
3396
3397 kvm_for_each_vcpu(i, vcpu, kvm) {
3398 __disable_ibs_on_vcpu(vcpu);
3399 }
3400}
3401
3402static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3403{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003404 if (!sclp.has_ibs)
3405 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003406 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003407 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003408}
3409
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003410void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3411{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003412 int i, online_vcpus, started_vcpus = 0;
3413
3414 if (!is_vcpu_stopped(vcpu))
3415 return;
3416
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003417 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003418 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003419 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003420 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3421
3422 for (i = 0; i < online_vcpus; i++) {
3423 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3424 started_vcpus++;
3425 }
3426
3427 if (started_vcpus == 0) {
3428 /* we're the only active VCPU -> speed it up */
3429 __enable_ibs_on_vcpu(vcpu);
3430 } else if (started_vcpus == 1) {
3431 /*
3432 * As we are starting a second VCPU, we have to disable
3433 * the IBS facility on all VCPUs to remove potentially
3434 * oustanding ENABLE requests.
3435 */
3436 __disable_ibs_on_all_vcpus(vcpu->kvm);
3437 }
3438
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003439 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003440 /*
3441 * Another VCPU might have used IBS while we were offline.
3442 * Let's play safe and flush the VCPU at startup.
3443 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003444 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003445 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003446 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003447}
3448
3449void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3450{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003451 int i, online_vcpus, started_vcpus = 0;
3452 struct kvm_vcpu *started_vcpu = NULL;
3453
3454 if (is_vcpu_stopped(vcpu))
3455 return;
3456
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003457 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003458 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003459 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003460 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3461
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003462 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003463 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003464
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003465 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003466 __disable_ibs_on_vcpu(vcpu);
3467
3468 for (i = 0; i < online_vcpus; i++) {
3469 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3470 started_vcpus++;
3471 started_vcpu = vcpu->kvm->vcpus[i];
3472 }
3473 }
3474
3475 if (started_vcpus == 1) {
3476 /*
3477 * As we only have one VCPU left, we want to enable the
3478 * IBS facility for that VCPU to speed it up.
3479 */
3480 __enable_ibs_on_vcpu(started_vcpu);
3481 }
3482
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003483 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003484 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003485}
3486
Cornelia Huckd6712df2012-12-20 15:32:11 +01003487static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3488 struct kvm_enable_cap *cap)
3489{
3490 int r;
3491
3492 if (cap->flags)
3493 return -EINVAL;
3494
3495 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003496 case KVM_CAP_S390_CSS_SUPPORT:
3497 if (!vcpu->kvm->arch.css_support) {
3498 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003499 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003500 trace_kvm_s390_enable_css(vcpu->kvm);
3501 }
3502 r = 0;
3503 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003504 default:
3505 r = -EINVAL;
3506 break;
3507 }
3508 return r;
3509}
3510
Thomas Huth41408c22015-02-06 15:01:21 +01003511static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3512 struct kvm_s390_mem_op *mop)
3513{
3514 void __user *uaddr = (void __user *)mop->buf;
3515 void *tmpbuf = NULL;
3516 int r, srcu_idx;
3517 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3518 | KVM_S390_MEMOP_F_CHECK_ONLY;
3519
3520 if (mop->flags & ~supported_flags)
3521 return -EINVAL;
3522
3523 if (mop->size > MEM_OP_MAX_SIZE)
3524 return -E2BIG;
3525
3526 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3527 tmpbuf = vmalloc(mop->size);
3528 if (!tmpbuf)
3529 return -ENOMEM;
3530 }
3531
3532 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3533
3534 switch (mop->op) {
3535 case KVM_S390_MEMOP_LOGICAL_READ:
3536 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003537 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3538 mop->size, GACC_FETCH);
Thomas Huth41408c22015-02-06 15:01:21 +01003539 break;
3540 }
3541 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3542 if (r == 0) {
3543 if (copy_to_user(uaddr, tmpbuf, mop->size))
3544 r = -EFAULT;
3545 }
3546 break;
3547 case KVM_S390_MEMOP_LOGICAL_WRITE:
3548 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003549 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3550 mop->size, GACC_STORE);
Thomas Huth41408c22015-02-06 15:01:21 +01003551 break;
3552 }
3553 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3554 r = -EFAULT;
3555 break;
3556 }
3557 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3558 break;
3559 default:
3560 r = -EINVAL;
3561 }
3562
3563 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3564
3565 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3566 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3567
3568 vfree(tmpbuf);
3569 return r;
3570}
3571
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003572long kvm_arch_vcpu_ioctl(struct file *filp,
3573 unsigned int ioctl, unsigned long arg)
3574{
3575 struct kvm_vcpu *vcpu = filp->private_data;
3576 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003577 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003578 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003579
Avi Kivity93736622010-05-13 12:35:17 +03003580 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003581 case KVM_S390_IRQ: {
3582 struct kvm_s390_irq s390irq;
3583
3584 r = -EFAULT;
3585 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3586 break;
3587 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3588 break;
3589 }
Avi Kivity93736622010-05-13 12:35:17 +03003590 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003591 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003592 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003593
Avi Kivity93736622010-05-13 12:35:17 +03003594 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003595 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03003596 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003597 if (s390int_to_s390irq(&s390int, &s390irq))
3598 return -EINVAL;
3599 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03003600 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003601 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003602 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003603 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003604 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003605 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003606 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003607 case KVM_S390_SET_INITIAL_PSW: {
3608 psw_t psw;
3609
Avi Kivitybc923cc2010-05-13 12:21:46 +03003610 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003611 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003612 break;
3613 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3614 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003615 }
3616 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003617 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3618 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003619 case KVM_SET_ONE_REG:
3620 case KVM_GET_ONE_REG: {
3621 struct kvm_one_reg reg;
3622 r = -EFAULT;
3623 if (copy_from_user(&reg, argp, sizeof(reg)))
3624 break;
3625 if (ioctl == KVM_SET_ONE_REG)
3626 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3627 else
3628 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3629 break;
3630 }
Carsten Otte27e03932012-01-04 10:25:21 +01003631#ifdef CONFIG_KVM_S390_UCONTROL
3632 case KVM_S390_UCAS_MAP: {
3633 struct kvm_s390_ucas_mapping ucasmap;
3634
3635 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3636 r = -EFAULT;
3637 break;
3638 }
3639
3640 if (!kvm_is_ucontrol(vcpu->kvm)) {
3641 r = -EINVAL;
3642 break;
3643 }
3644
3645 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3646 ucasmap.vcpu_addr, ucasmap.length);
3647 break;
3648 }
3649 case KVM_S390_UCAS_UNMAP: {
3650 struct kvm_s390_ucas_mapping ucasmap;
3651
3652 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3653 r = -EFAULT;
3654 break;
3655 }
3656
3657 if (!kvm_is_ucontrol(vcpu->kvm)) {
3658 r = -EINVAL;
3659 break;
3660 }
3661
3662 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3663 ucasmap.length);
3664 break;
3665 }
3666#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003667 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003668 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003669 break;
3670 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003671 case KVM_ENABLE_CAP:
3672 {
3673 struct kvm_enable_cap cap;
3674 r = -EFAULT;
3675 if (copy_from_user(&cap, argp, sizeof(cap)))
3676 break;
3677 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3678 break;
3679 }
Thomas Huth41408c22015-02-06 15:01:21 +01003680 case KVM_S390_MEM_OP: {
3681 struct kvm_s390_mem_op mem_op;
3682
3683 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3684 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3685 else
3686 r = -EFAULT;
3687 break;
3688 }
Jens Freimann816c7662014-11-24 17:13:46 +01003689 case KVM_S390_SET_IRQ_STATE: {
3690 struct kvm_s390_irq_state irq_state;
3691
3692 r = -EFAULT;
3693 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3694 break;
3695 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3696 irq_state.len == 0 ||
3697 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3698 r = -EINVAL;
3699 break;
3700 }
3701 r = kvm_s390_set_irq_state(vcpu,
3702 (void __user *) irq_state.buf,
3703 irq_state.len);
3704 break;
3705 }
3706 case KVM_S390_GET_IRQ_STATE: {
3707 struct kvm_s390_irq_state irq_state;
3708
3709 r = -EFAULT;
3710 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3711 break;
3712 if (irq_state.len == 0) {
3713 r = -EINVAL;
3714 break;
3715 }
3716 r = kvm_s390_get_irq_state(vcpu,
3717 (__u8 __user *) irq_state.buf,
3718 irq_state.len);
3719 break;
3720 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003721 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003722 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003723 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003724 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003725}
3726
Carsten Otte5b1c1492012-01-04 10:25:23 +01003727int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3728{
3729#ifdef CONFIG_KVM_S390_UCONTROL
3730 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3731 && (kvm_is_ucontrol(vcpu->kvm))) {
3732 vmf->page = virt_to_page(vcpu->arch.sie_block);
3733 get_page(vmf->page);
3734 return 0;
3735 }
3736#endif
3737 return VM_FAULT_SIGBUS;
3738}
3739
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303740int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3741 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003742{
3743 return 0;
3744}
3745
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003746/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003747int kvm_arch_prepare_memory_region(struct kvm *kvm,
3748 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003749 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003750 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003751{
Nick Wangdd2887e2013-03-25 17:22:57 +01003752 /* A few sanity checks. We can have memory slots which have to be
3753 located/ended at a segment boundary (1MB). The memory in userland is
3754 ok to be fragmented into various different vmas. It is okay to mmap()
3755 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003756
Carsten Otte598841c2011-07-24 10:48:21 +02003757 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003758 return -EINVAL;
3759
Carsten Otte598841c2011-07-24 10:48:21 +02003760 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003761 return -EINVAL;
3762
Dominik Dingela3a92c32014-12-01 17:24:42 +01003763 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3764 return -EINVAL;
3765
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003766 return 0;
3767}
3768
3769void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003770 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003771 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003772 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003773 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003774{
Carsten Ottef7850c92011-07-24 10:48:23 +02003775 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003776
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003777 /* If the basics of the memslot do not change, we do not want
3778 * to update the gmap. Every update causes several unnecessary
3779 * segment translation exceptions. This is usually handled just
3780 * fine by the normal fault handler + gmap, but it will also
3781 * cause faults on the prefix page of running guest CPUs.
3782 */
3783 if (old->userspace_addr == mem->userspace_addr &&
3784 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3785 old->npages * PAGE_SIZE == mem->memory_size)
3786 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003787
3788 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3789 mem->guest_phys_addr, mem->memory_size);
3790 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003791 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003792 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003793}
3794
Alexander Yarygin60a37702016-04-01 15:38:57 +03003795static inline unsigned long nonhyp_mask(int i)
3796{
3797 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3798
3799 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3800}
3801
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003802void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3803{
3804 vcpu->valid_wakeup = false;
3805}
3806
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003807static int __init kvm_s390_init(void)
3808{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003809 int i;
3810
David Hildenbrand07197fd2015-01-30 16:01:38 +01003811 if (!sclp.has_sief2) {
3812 pr_info("SIE not available\n");
3813 return -ENODEV;
3814 }
3815
Alexander Yarygin60a37702016-04-01 15:38:57 +03003816 for (i = 0; i < 16; i++)
3817 kvm_s390_fac_list_mask[i] |=
3818 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3819
Michael Mueller9d8d5782015-02-02 15:42:51 +01003820 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003821}
3822
3823static void __exit kvm_s390_exit(void)
3824{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003825 kvm_exit();
3826}
3827
3828module_init(kvm_s390_init);
3829module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003830
3831/*
3832 * Enable autoloading of the kvm module.
3833 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3834 * since x86 takes a different approach.
3835 */
3836#include <linux/miscdevice.h>
3837MODULE_ALIAS_MISCDEV(KVM_MINOR);
3838MODULE_ALIAS("devname:kvm");