blob: a0f6b599ce6b0128246f31a4d2edb61b8597ee75 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Claudio Imbrenda4036e382016-08-04 17:58:47 +020034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010047#include "gaccess.h"
48
David Hildenbrandea2cdd22015-05-20 13:24:02 +020049#define KMSG_COMPONENT "kvm-s390"
50#undef pr_fmt
51#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
52
Cornelia Huck5786fff2012-07-23 17:20:29 +020053#define CREATE_TRACE_POINTS
54#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020055#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020056
Thomas Huth41408c282015-02-06 15:01:21 +010057#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010058#define LOCAL_IRQS 32
59#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010061
Heiko Carstensb0c632d2008-03-25 18:47:20 +010062#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
63
64struct kvm_stats_debugfs_item debugfs_entries[] = {
65 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020066 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010067 { "exit_validity", VCPU_STAT(exit_validity) },
68 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
69 { "exit_external_request", VCPU_STAT(exit_external_request) },
70 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010071 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030072 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
74 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020075 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010076 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020077 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020078 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010084 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020085 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
87 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
88 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
89 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
90 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
91 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
92 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020093 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010094 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
95 { "instruction_spx", VCPU_STAT(instruction_spx) },
96 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
97 { "instruction_stap", VCPU_STAT(instruction_stap) },
98 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010099 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100100 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
101 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200102 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100103 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
104 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200105 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200106 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200107 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100108 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100109 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200110 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200112 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
113 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100114 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200115 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
116 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500117 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100118 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
119 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
120 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200121 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
122 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
123 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100124 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100125 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200126 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200127 { "diagnose_258", VCPU_STAT(diagnose_258) },
128 { "diagnose_308", VCPU_STAT(diagnose_308) },
129 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130 { NULL }
131};
132
David Hildenbranda411edf2016-02-02 15:41:22 +0100133/* allow nested virtualization in KVM (if enabled by user space) */
134static int nested;
135module_param(nested, int, S_IRUGO);
136MODULE_PARM_DESC(nested, "Nested virtualization support");
137
Michael Mueller9d8d5782015-02-02 15:42:51 +0100138/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200139unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100140
Michael Mueller9d8d5782015-02-02 15:42:51 +0100141unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200142{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100143 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
144 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200145}
146
David Hildenbrand15c97052015-03-19 17:36:43 +0100147/* available cpu features supported by kvm */
148static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200149/* available subfunctions indicated via query / "test bit" */
150static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100151
Michael Mueller9d8d5782015-02-02 15:42:51 +0100152static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200153static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200154debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100155
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100156/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200157int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100158{
159 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200160 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161}
162
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100163static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
164 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200165
Fan Zhangfdf03652015-05-13 10:58:41 +0200166/*
167 * This callback is executed during stop_machine(). All CPUs are therefore
168 * temporarily stopped. In order not to change guest behavior, we have to
169 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
170 * so a CPU won't be stopped while calculating with the epoch.
171 */
172static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
173 void *v)
174{
175 struct kvm *kvm;
176 struct kvm_vcpu *vcpu;
177 int i;
178 unsigned long long *delta = v;
179
180 list_for_each_entry(kvm, &vm_list, vm_list) {
181 kvm->arch.epoch -= *delta;
182 kvm_for_each_vcpu(i, vcpu, kvm) {
183 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100184 if (vcpu->arch.cputm_enabled)
185 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100186 if (vcpu->arch.vsie_block)
187 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200188 }
189 }
190 return NOTIFY_OK;
191}
192
193static struct notifier_block kvm_clock_notifier = {
194 .notifier_call = kvm_clock_sync,
195};
196
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100197int kvm_arch_hardware_setup(void)
198{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200199 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100200 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200201 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
202 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200203 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100205 return 0;
206}
207
208void kvm_arch_hardware_unsetup(void)
209{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100210 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200211 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200212 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
213 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100214}
215
David Hildenbrand22be5a12016-01-21 13:22:54 +0100216static void allow_cpu_feat(unsigned long nr)
217{
218 set_bit_inv(nr, kvm_s390_available_cpu_feat);
219}
220
David Hildenbrand0a763c72016-05-18 16:03:47 +0200221static inline int plo_test_bit(unsigned char nr)
222{
223 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100224 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200225
226 asm volatile(
227 /* Parameter registers are ignored for "test bit" */
228 " plo 0,0,0,0(0)\n"
229 " ipm %0\n"
230 " srl %0,28\n"
231 : "=d" (cc)
232 : "d" (r0)
233 : "cc");
234 return cc == 0;
235}
236
David Hildenbrand22be5a12016-01-21 13:22:54 +0100237static void kvm_s390_cpu_feat_init(void)
238{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200239 int i;
240
241 for (i = 0; i < 256; ++i) {
242 if (plo_test_bit(i))
243 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
244 }
245
246 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400247 ptff(kvm_s390_available_subfunc.ptff,
248 sizeof(kvm_s390_available_subfunc.ptff),
249 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200250
251 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200252 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
253 kvm_s390_available_subfunc.kmac);
254 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
255 kvm_s390_available_subfunc.kmc);
256 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
257 kvm_s390_available_subfunc.km);
258 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
259 kvm_s390_available_subfunc.kimd);
260 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
261 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200262 }
263 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200264 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
265 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200266 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200267 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.kmctr);
269 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
270 kvm_s390_available_subfunc.kmf);
271 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
272 kvm_s390_available_subfunc.kmo);
273 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
274 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200275 }
276 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100277 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200278 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200279
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400280 if (test_facility(146)) /* MSA8 */
281 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
282 kvm_s390_available_subfunc.kma);
283
David Hildenbrand22be5a12016-01-21 13:22:54 +0100284 if (MACHINE_HAS_ESOP)
285 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200286 /*
287 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
288 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
289 */
290 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100291 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200292 return;
293 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100294 if (sclp.has_64bscao)
295 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100296 if (sclp.has_siif)
297 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100298 if (sclp.has_gpere)
299 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100300 if (sclp.has_gsls)
301 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100302 if (sclp.has_ib)
303 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100304 if (sclp.has_cei)
305 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100306 if (sclp.has_ibs)
307 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500308 if (sclp.has_kss)
309 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200310 /*
311 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
312 * all skey handling functions read/set the skey from the PGSTE
313 * instead of the real storage key.
314 *
315 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
316 * pages being detected as preserved although they are resident.
317 *
318 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
319 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
320 *
321 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
322 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
323 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
324 *
325 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
326 * cannot easily shadow the SCA because of the ipte lock.
327 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100328}
329
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330int kvm_arch_init(void *opaque)
331{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200332 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
333 if (!kvm_s390_dbf)
334 return -ENOMEM;
335
336 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
337 debug_unregister(kvm_s390_dbf);
338 return -ENOMEM;
339 }
340
David Hildenbrand22be5a12016-01-21 13:22:54 +0100341 kvm_s390_cpu_feat_init();
342
Cornelia Huck84877d92014-09-02 10:27:35 +0100343 /* Register floating interrupt controller interface. */
344 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100345}
346
Christian Borntraeger78f26132015-07-22 15:50:58 +0200347void kvm_arch_exit(void)
348{
349 debug_unregister(kvm_s390_dbf);
350}
351
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100352/* Section: device related */
353long kvm_arch_dev_ioctl(struct file *filp,
354 unsigned int ioctl, unsigned long arg)
355{
356 if (ioctl == KVM_S390_ENABLE_SIE)
357 return s390_enable_sie();
358 return -EINVAL;
359}
360
Alexander Graf784aa3d2014-07-14 18:27:35 +0200361int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100362{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100363 int r;
364
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200365 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100366 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200367 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100368 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100369#ifdef CONFIG_KVM_S390_UCONTROL
370 case KVM_CAP_S390_UCONTROL:
371#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200372 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100373 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200374 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100375 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100376 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100377 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200378 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200379 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200380 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200381 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200382 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100383 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100384 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200385 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100386 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400387 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100388 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200389 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200390 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100391 case KVM_CAP_S390_AIS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100392 r = 1;
393 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100394 case KVM_CAP_S390_MEM_OP:
395 r = MEM_OP_MAX_SIZE;
396 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200397 case KVM_CAP_NR_VCPUS:
398 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100399 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200400 if (!kvm_s390_use_sca_entries())
401 r = KVM_MAX_VCPUS;
402 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100403 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200404 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100405 case KVM_CAP_NR_MEMSLOTS:
406 r = KVM_USER_MEM_SLOTS;
407 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200408 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100409 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200410 break;
Eric Farman68c55752014-06-09 10:57:26 -0400411 case KVM_CAP_S390_VECTOR_REGISTERS:
412 r = MACHINE_HAS_VX;
413 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800414 case KVM_CAP_S390_RI:
415 r = test_facility(64);
416 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100417 case KVM_CAP_S390_GS:
418 r = test_facility(133);
419 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200420 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100421 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200422 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100423 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100424}
425
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400426static void kvm_s390_sync_dirty_log(struct kvm *kvm,
427 struct kvm_memory_slot *memslot)
428{
429 gfn_t cur_gfn, last_gfn;
430 unsigned long address;
431 struct gmap *gmap = kvm->arch.gmap;
432
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400433 /* Loop over all guest pages */
434 last_gfn = memslot->base_gfn + memslot->npages;
435 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
436 address = gfn_to_hva_memslot(memslot, cur_gfn);
437
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100438 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400439 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100440 if (fatal_signal_pending(current))
441 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100442 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400443 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400444}
445
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100446/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200447static void sca_del_vcpu(struct kvm_vcpu *vcpu);
448
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100449/*
450 * Get (and clear) the dirty memory log for a memory slot.
451 */
452int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
453 struct kvm_dirty_log *log)
454{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400455 int r;
456 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200457 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400458 struct kvm_memory_slot *memslot;
459 int is_dirty = 0;
460
Janosch Franke1e8a962017-02-02 16:39:31 +0100461 if (kvm_is_ucontrol(kvm))
462 return -EINVAL;
463
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400464 mutex_lock(&kvm->slots_lock);
465
466 r = -EINVAL;
467 if (log->slot >= KVM_USER_MEM_SLOTS)
468 goto out;
469
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200470 slots = kvm_memslots(kvm);
471 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400472 r = -ENOENT;
473 if (!memslot->dirty_bitmap)
474 goto out;
475
476 kvm_s390_sync_dirty_log(kvm, memslot);
477 r = kvm_get_dirty_log(kvm, log, &is_dirty);
478 if (r)
479 goto out;
480
481 /* Clear the dirty log */
482 if (is_dirty) {
483 n = kvm_dirty_bitmap_bytes(memslot);
484 memset(memslot->dirty_bitmap, 0, n);
485 }
486 r = 0;
487out:
488 mutex_unlock(&kvm->slots_lock);
489 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100490}
491
David Hildenbrand6502a342016-06-21 14:19:51 +0200492static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
493{
494 unsigned int i;
495 struct kvm_vcpu *vcpu;
496
497 kvm_for_each_vcpu(i, vcpu, kvm) {
498 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
499 }
500}
501
Cornelia Huckd938dc52013-10-23 18:26:34 +0200502static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
503{
504 int r;
505
506 if (cap->flags)
507 return -EINVAL;
508
509 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200510 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200511 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200512 kvm->arch.use_irqchip = 1;
513 r = 0;
514 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200515 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200516 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200517 kvm->arch.user_sigp = 1;
518 r = 0;
519 break;
Eric Farman68c55752014-06-09 10:57:26 -0400520 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100521 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200522 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100523 r = -EBUSY;
524 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100525 set_kvm_facility(kvm->arch.model.fac_mask, 129);
526 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200527 if (test_facility(134)) {
528 set_kvm_facility(kvm->arch.model.fac_mask, 134);
529 set_kvm_facility(kvm->arch.model.fac_list, 134);
530 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100531 if (test_facility(135)) {
532 set_kvm_facility(kvm->arch.model.fac_mask, 135);
533 set_kvm_facility(kvm->arch.model.fac_list, 135);
534 }
Michael Mueller18280d82015-03-16 16:05:41 +0100535 r = 0;
536 } else
537 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100538 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200539 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
540 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400541 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800542 case KVM_CAP_S390_RI:
543 r = -EINVAL;
544 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200545 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800546 r = -EBUSY;
547 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100548 set_kvm_facility(kvm->arch.model.fac_mask, 64);
549 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800550 r = 0;
551 }
552 mutex_unlock(&kvm->lock);
553 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
554 r ? "(not available)" : "(success)");
555 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100556 case KVM_CAP_S390_AIS:
557 mutex_lock(&kvm->lock);
558 if (kvm->created_vcpus) {
559 r = -EBUSY;
560 } else {
561 set_kvm_facility(kvm->arch.model.fac_mask, 72);
562 set_kvm_facility(kvm->arch.model.fac_list, 72);
563 kvm->arch.float_int.ais_enabled = 1;
564 r = 0;
565 }
566 mutex_unlock(&kvm->lock);
567 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
568 r ? "(not available)" : "(success)");
569 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100570 case KVM_CAP_S390_GS:
571 r = -EINVAL;
572 mutex_lock(&kvm->lock);
573 if (atomic_read(&kvm->online_vcpus)) {
574 r = -EBUSY;
575 } else if (test_facility(133)) {
576 set_kvm_facility(kvm->arch.model.fac_mask, 133);
577 set_kvm_facility(kvm->arch.model.fac_list, 133);
578 r = 0;
579 }
580 mutex_unlock(&kvm->lock);
581 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
582 r ? "(not available)" : "(success)");
583 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100584 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200585 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100586 kvm->arch.user_stsi = 1;
587 r = 0;
588 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200589 case KVM_CAP_S390_USER_INSTR0:
590 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
591 kvm->arch.user_instr0 = 1;
592 icpt_operexc_on_all_vcpus(kvm);
593 r = 0;
594 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200595 default:
596 r = -EINVAL;
597 break;
598 }
599 return r;
600}
601
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100602static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
603{
604 int ret;
605
606 switch (attr->attr) {
607 case KVM_S390_VM_MEM_LIMIT_SIZE:
608 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200609 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100610 kvm->arch.mem_limit);
611 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100612 ret = -EFAULT;
613 break;
614 default:
615 ret = -ENXIO;
616 break;
617 }
618 return ret;
619}
620
621static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200622{
623 int ret;
624 unsigned int idx;
625 switch (attr->attr) {
626 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100627 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100628 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200629 break;
630
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200631 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200632 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200633 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200634 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200635 kvm->arch.use_cmma = 1;
636 ret = 0;
637 }
638 mutex_unlock(&kvm->lock);
639 break;
640 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100641 ret = -ENXIO;
642 if (!sclp.has_cmma)
643 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200644 ret = -EINVAL;
645 if (!kvm->arch.use_cmma)
646 break;
647
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200648 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200649 mutex_lock(&kvm->lock);
650 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200651 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200652 srcu_read_unlock(&kvm->srcu, idx);
653 mutex_unlock(&kvm->lock);
654 ret = 0;
655 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100656 case KVM_S390_VM_MEM_LIMIT_SIZE: {
657 unsigned long new_limit;
658
659 if (kvm_is_ucontrol(kvm))
660 return -EINVAL;
661
662 if (get_user(new_limit, (u64 __user *)attr->addr))
663 return -EFAULT;
664
Dominik Dingela3a92c32014-12-01 17:24:42 +0100665 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
666 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100667 return -E2BIG;
668
Dominik Dingela3a92c32014-12-01 17:24:42 +0100669 if (!new_limit)
670 return -EINVAL;
671
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100672 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100673 if (new_limit != KVM_S390_NO_MEM_LIMIT)
674 new_limit -= 1;
675
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100676 ret = -EBUSY;
677 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200678 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100679 /* gmap_create will round the limit up */
680 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100681
682 if (!new) {
683 ret = -ENOMEM;
684 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100685 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100686 new->private = kvm;
687 kvm->arch.gmap = new;
688 ret = 0;
689 }
690 }
691 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100692 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
693 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
694 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100695 break;
696 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200697 default:
698 ret = -ENXIO;
699 break;
700 }
701 return ret;
702}
703
Tony Krowiaka374e892014-09-03 10:13:53 +0200704static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
705
706static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
707{
708 struct kvm_vcpu *vcpu;
709 int i;
710
Michael Mueller9d8d5782015-02-02 15:42:51 +0100711 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200712 return -EINVAL;
713
714 mutex_lock(&kvm->lock);
715 switch (attr->attr) {
716 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
717 get_random_bytes(
718 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
719 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
720 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200721 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200722 break;
723 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
724 get_random_bytes(
725 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
726 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
727 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200728 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200729 break;
730 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
731 kvm->arch.crypto.aes_kw = 0;
732 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
733 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200734 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200735 break;
736 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
737 kvm->arch.crypto.dea_kw = 0;
738 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
739 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200740 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200741 break;
742 default:
743 mutex_unlock(&kvm->lock);
744 return -ENXIO;
745 }
746
747 kvm_for_each_vcpu(i, vcpu, kvm) {
748 kvm_s390_vcpu_crypto_setup(vcpu);
749 exit_sie(vcpu);
750 }
751 mutex_unlock(&kvm->lock);
752 return 0;
753}
754
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200755static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
756{
757 int cx;
758 struct kvm_vcpu *vcpu;
759
760 kvm_for_each_vcpu(cx, vcpu, kvm)
761 kvm_s390_sync_request(req, vcpu);
762}
763
764/*
765 * Must be called with kvm->srcu held to avoid races on memslots, and with
766 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
767 */
768static int kvm_s390_vm_start_migration(struct kvm *kvm)
769{
770 struct kvm_s390_migration_state *mgs;
771 struct kvm_memory_slot *ms;
772 /* should be the only one */
773 struct kvm_memslots *slots;
774 unsigned long ram_pages;
775 int slotnr;
776
777 /* migration mode already enabled */
778 if (kvm->arch.migration_state)
779 return 0;
780
781 slots = kvm_memslots(kvm);
782 if (!slots || !slots->used_slots)
783 return -EINVAL;
784
785 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
786 if (!mgs)
787 return -ENOMEM;
788 kvm->arch.migration_state = mgs;
789
790 if (kvm->arch.use_cmma) {
791 /*
792 * Get the last slot. They should be sorted by base_gfn, so the
793 * last slot is also the one at the end of the address space.
794 * We have verified above that at least one slot is present.
795 */
796 ms = slots->memslots + slots->used_slots - 1;
797 /* round up so we only use full longs */
798 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
799 /* allocate enough bytes to store all the bits */
800 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
801 if (!mgs->pgste_bitmap) {
802 kfree(mgs);
803 kvm->arch.migration_state = NULL;
804 return -ENOMEM;
805 }
806
807 mgs->bitmap_size = ram_pages;
808 atomic64_set(&mgs->dirty_pages, ram_pages);
809 /* mark all the pages in active slots as dirty */
810 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
811 ms = slots->memslots + slotnr;
812 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
813 }
814
815 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
816 }
817 return 0;
818}
819
820/*
821 * Must be called with kvm->lock to avoid races with ourselves and
822 * kvm_s390_vm_start_migration.
823 */
824static int kvm_s390_vm_stop_migration(struct kvm *kvm)
825{
826 struct kvm_s390_migration_state *mgs;
827
828 /* migration mode already disabled */
829 if (!kvm->arch.migration_state)
830 return 0;
831 mgs = kvm->arch.migration_state;
832 kvm->arch.migration_state = NULL;
833
834 if (kvm->arch.use_cmma) {
835 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
836 vfree(mgs->pgste_bitmap);
837 }
838 kfree(mgs);
839 return 0;
840}
841
842static int kvm_s390_vm_set_migration(struct kvm *kvm,
843 struct kvm_device_attr *attr)
844{
845 int idx, res = -ENXIO;
846
847 mutex_lock(&kvm->lock);
848 switch (attr->attr) {
849 case KVM_S390_VM_MIGRATION_START:
850 idx = srcu_read_lock(&kvm->srcu);
851 res = kvm_s390_vm_start_migration(kvm);
852 srcu_read_unlock(&kvm->srcu, idx);
853 break;
854 case KVM_S390_VM_MIGRATION_STOP:
855 res = kvm_s390_vm_stop_migration(kvm);
856 break;
857 default:
858 break;
859 }
860 mutex_unlock(&kvm->lock);
861
862 return res;
863}
864
865static int kvm_s390_vm_get_migration(struct kvm *kvm,
866 struct kvm_device_attr *attr)
867{
868 u64 mig = (kvm->arch.migration_state != NULL);
869
870 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
871 return -ENXIO;
872
873 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
874 return -EFAULT;
875 return 0;
876}
877
Jason J. Herne72f25022014-11-25 09:46:02 -0500878static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
879{
880 u8 gtod_high;
881
882 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
883 sizeof(gtod_high)))
884 return -EFAULT;
885
886 if (gtod_high != 0)
887 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200888 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500889
890 return 0;
891}
892
893static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
894{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200895 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500896
897 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
898 return -EFAULT;
899
David Hildenbrand25ed1672015-05-12 09:49:14 +0200900 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200901 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500902 return 0;
903}
904
905static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
906{
907 int ret;
908
909 if (attr->flags)
910 return -EINVAL;
911
912 switch (attr->attr) {
913 case KVM_S390_VM_TOD_HIGH:
914 ret = kvm_s390_set_tod_high(kvm, attr);
915 break;
916 case KVM_S390_VM_TOD_LOW:
917 ret = kvm_s390_set_tod_low(kvm, attr);
918 break;
919 default:
920 ret = -ENXIO;
921 break;
922 }
923 return ret;
924}
925
926static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
927{
928 u8 gtod_high = 0;
929
930 if (copy_to_user((void __user *)attr->addr, &gtod_high,
931 sizeof(gtod_high)))
932 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200933 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500934
935 return 0;
936}
937
938static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
939{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200940 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500941
David Hildenbrand60417fc2015-09-29 16:20:36 +0200942 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500943 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
944 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200945 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500946
947 return 0;
948}
949
950static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
951{
952 int ret;
953
954 if (attr->flags)
955 return -EINVAL;
956
957 switch (attr->attr) {
958 case KVM_S390_VM_TOD_HIGH:
959 ret = kvm_s390_get_tod_high(kvm, attr);
960 break;
961 case KVM_S390_VM_TOD_LOW:
962 ret = kvm_s390_get_tod_low(kvm, attr);
963 break;
964 default:
965 ret = -ENXIO;
966 break;
967 }
968 return ret;
969}
970
Michael Mueller658b6ed2015-02-02 15:49:35 +0100971static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
972{
973 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200974 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100975 int ret = 0;
976
977 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200978 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +0100979 ret = -EBUSY;
980 goto out;
981 }
982 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
983 if (!proc) {
984 ret = -ENOMEM;
985 goto out;
986 }
987 if (!copy_from_user(proc, (void __user *)attr->addr,
988 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200989 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200990 lowest_ibc = sclp.ibc >> 16 & 0xfff;
991 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +0200992 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +0200993 if (proc->ibc > unblocked_ibc)
994 kvm->arch.model.ibc = unblocked_ibc;
995 else if (proc->ibc < lowest_ibc)
996 kvm->arch.model.ibc = lowest_ibc;
997 else
998 kvm->arch.model.ibc = proc->ibc;
999 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001000 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001001 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001002 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1003 kvm->arch.model.ibc,
1004 kvm->arch.model.cpuid);
1005 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1006 kvm->arch.model.fac_list[0],
1007 kvm->arch.model.fac_list[1],
1008 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001009 } else
1010 ret = -EFAULT;
1011 kfree(proc);
1012out:
1013 mutex_unlock(&kvm->lock);
1014 return ret;
1015}
1016
David Hildenbrand15c97052015-03-19 17:36:43 +01001017static int kvm_s390_set_processor_feat(struct kvm *kvm,
1018 struct kvm_device_attr *attr)
1019{
1020 struct kvm_s390_vm_cpu_feat data;
1021 int ret = -EBUSY;
1022
1023 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1024 return -EFAULT;
1025 if (!bitmap_subset((unsigned long *) data.feat,
1026 kvm_s390_available_cpu_feat,
1027 KVM_S390_VM_CPU_FEAT_NR_BITS))
1028 return -EINVAL;
1029
1030 mutex_lock(&kvm->lock);
1031 if (!atomic_read(&kvm->online_vcpus)) {
1032 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1033 KVM_S390_VM_CPU_FEAT_NR_BITS);
1034 ret = 0;
1035 }
1036 mutex_unlock(&kvm->lock);
1037 return ret;
1038}
1039
David Hildenbrand0a763c72016-05-18 16:03:47 +02001040static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1041 struct kvm_device_attr *attr)
1042{
1043 /*
1044 * Once supported by kernel + hw, we have to store the subfunctions
1045 * in kvm->arch and remember that user space configured them.
1046 */
1047 return -ENXIO;
1048}
1049
Michael Mueller658b6ed2015-02-02 15:49:35 +01001050static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1051{
1052 int ret = -ENXIO;
1053
1054 switch (attr->attr) {
1055 case KVM_S390_VM_CPU_PROCESSOR:
1056 ret = kvm_s390_set_processor(kvm, attr);
1057 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001058 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1059 ret = kvm_s390_set_processor_feat(kvm, attr);
1060 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001061 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1062 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1063 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001064 }
1065 return ret;
1066}
1067
1068static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1069{
1070 struct kvm_s390_vm_cpu_processor *proc;
1071 int ret = 0;
1072
1073 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1074 if (!proc) {
1075 ret = -ENOMEM;
1076 goto out;
1077 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001078 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001079 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001080 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1081 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001082 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1083 kvm->arch.model.ibc,
1084 kvm->arch.model.cpuid);
1085 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1086 kvm->arch.model.fac_list[0],
1087 kvm->arch.model.fac_list[1],
1088 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001089 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1090 ret = -EFAULT;
1091 kfree(proc);
1092out:
1093 return ret;
1094}
1095
1096static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1097{
1098 struct kvm_s390_vm_cpu_machine *mach;
1099 int ret = 0;
1100
1101 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1102 if (!mach) {
1103 ret = -ENOMEM;
1104 goto out;
1105 }
1106 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001107 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001108 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001109 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001110 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001111 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001112 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1113 kvm->arch.model.ibc,
1114 kvm->arch.model.cpuid);
1115 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1116 mach->fac_mask[0],
1117 mach->fac_mask[1],
1118 mach->fac_mask[2]);
1119 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1120 mach->fac_list[0],
1121 mach->fac_list[1],
1122 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001123 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1124 ret = -EFAULT;
1125 kfree(mach);
1126out:
1127 return ret;
1128}
1129
David Hildenbrand15c97052015-03-19 17:36:43 +01001130static int kvm_s390_get_processor_feat(struct kvm *kvm,
1131 struct kvm_device_attr *attr)
1132{
1133 struct kvm_s390_vm_cpu_feat data;
1134
1135 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1136 KVM_S390_VM_CPU_FEAT_NR_BITS);
1137 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1138 return -EFAULT;
1139 return 0;
1140}
1141
1142static int kvm_s390_get_machine_feat(struct kvm *kvm,
1143 struct kvm_device_attr *attr)
1144{
1145 struct kvm_s390_vm_cpu_feat data;
1146
1147 bitmap_copy((unsigned long *) data.feat,
1148 kvm_s390_available_cpu_feat,
1149 KVM_S390_VM_CPU_FEAT_NR_BITS);
1150 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1151 return -EFAULT;
1152 return 0;
1153}
1154
David Hildenbrand0a763c72016-05-18 16:03:47 +02001155static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1156 struct kvm_device_attr *attr)
1157{
1158 /*
1159 * Once we can actually configure subfunctions (kernel + hw support),
1160 * we have to check if they were already set by user space, if so copy
1161 * them from kvm->arch.
1162 */
1163 return -ENXIO;
1164}
1165
1166static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1167 struct kvm_device_attr *attr)
1168{
1169 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1170 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1171 return -EFAULT;
1172 return 0;
1173}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001174static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1175{
1176 int ret = -ENXIO;
1177
1178 switch (attr->attr) {
1179 case KVM_S390_VM_CPU_PROCESSOR:
1180 ret = kvm_s390_get_processor(kvm, attr);
1181 break;
1182 case KVM_S390_VM_CPU_MACHINE:
1183 ret = kvm_s390_get_machine(kvm, attr);
1184 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001185 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1186 ret = kvm_s390_get_processor_feat(kvm, attr);
1187 break;
1188 case KVM_S390_VM_CPU_MACHINE_FEAT:
1189 ret = kvm_s390_get_machine_feat(kvm, attr);
1190 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001191 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1192 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1193 break;
1194 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1195 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1196 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001197 }
1198 return ret;
1199}
1200
Dominik Dingelf2061652014-04-09 13:13:00 +02001201static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1202{
1203 int ret;
1204
1205 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001206 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001207 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001208 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001209 case KVM_S390_VM_TOD:
1210 ret = kvm_s390_set_tod(kvm, attr);
1211 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001212 case KVM_S390_VM_CPU_MODEL:
1213 ret = kvm_s390_set_cpu_model(kvm, attr);
1214 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001215 case KVM_S390_VM_CRYPTO:
1216 ret = kvm_s390_vm_set_crypto(kvm, attr);
1217 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001218 case KVM_S390_VM_MIGRATION:
1219 ret = kvm_s390_vm_set_migration(kvm, attr);
1220 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001221 default:
1222 ret = -ENXIO;
1223 break;
1224 }
1225
1226 return ret;
1227}
1228
1229static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1230{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001231 int ret;
1232
1233 switch (attr->group) {
1234 case KVM_S390_VM_MEM_CTRL:
1235 ret = kvm_s390_get_mem_control(kvm, attr);
1236 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001237 case KVM_S390_VM_TOD:
1238 ret = kvm_s390_get_tod(kvm, attr);
1239 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001240 case KVM_S390_VM_CPU_MODEL:
1241 ret = kvm_s390_get_cpu_model(kvm, attr);
1242 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001243 case KVM_S390_VM_MIGRATION:
1244 ret = kvm_s390_vm_get_migration(kvm, attr);
1245 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001246 default:
1247 ret = -ENXIO;
1248 break;
1249 }
1250
1251 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001252}
1253
1254static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1255{
1256 int ret;
1257
1258 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001259 case KVM_S390_VM_MEM_CTRL:
1260 switch (attr->attr) {
1261 case KVM_S390_VM_MEM_ENABLE_CMMA:
1262 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001263 ret = sclp.has_cmma ? 0 : -ENXIO;
1264 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001265 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001266 ret = 0;
1267 break;
1268 default:
1269 ret = -ENXIO;
1270 break;
1271 }
1272 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001273 case KVM_S390_VM_TOD:
1274 switch (attr->attr) {
1275 case KVM_S390_VM_TOD_LOW:
1276 case KVM_S390_VM_TOD_HIGH:
1277 ret = 0;
1278 break;
1279 default:
1280 ret = -ENXIO;
1281 break;
1282 }
1283 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001284 case KVM_S390_VM_CPU_MODEL:
1285 switch (attr->attr) {
1286 case KVM_S390_VM_CPU_PROCESSOR:
1287 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001288 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1289 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001290 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001291 ret = 0;
1292 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001293 /* configuring subfunctions is not supported yet */
1294 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001295 default:
1296 ret = -ENXIO;
1297 break;
1298 }
1299 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001300 case KVM_S390_VM_CRYPTO:
1301 switch (attr->attr) {
1302 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1303 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1304 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1305 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1306 ret = 0;
1307 break;
1308 default:
1309 ret = -ENXIO;
1310 break;
1311 }
1312 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001313 case KVM_S390_VM_MIGRATION:
1314 ret = 0;
1315 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001316 default:
1317 ret = -ENXIO;
1318 break;
1319 }
1320
1321 return ret;
1322}
1323
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001324static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1325{
1326 uint8_t *keys;
1327 uint64_t hva;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001328 int i, r = 0;
1329
1330 if (args->flags != 0)
1331 return -EINVAL;
1332
1333 /* Is this guest using storage keys? */
1334 if (!mm_use_skey(current->mm))
1335 return KVM_S390_GET_SKEYS_NONE;
1336
1337 /* Enforce sane limit on memory allocation */
1338 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1339 return -EINVAL;
1340
Michal Hocko752ade62017-05-08 15:57:27 -07001341 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001342 if (!keys)
1343 return -ENOMEM;
1344
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001345 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001346 for (i = 0; i < args->count; i++) {
1347 hva = gfn_to_hva(kvm, args->start_gfn + i);
1348 if (kvm_is_error_hva(hva)) {
1349 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001350 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001351 }
1352
David Hildenbrand154c8c12016-05-09 11:22:34 +02001353 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1354 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001355 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001356 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001357 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001358
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001359 if (!r) {
1360 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1361 sizeof(uint8_t) * args->count);
1362 if (r)
1363 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001364 }
1365
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001366 kvfree(keys);
1367 return r;
1368}
1369
1370static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1371{
1372 uint8_t *keys;
1373 uint64_t hva;
1374 int i, r = 0;
1375
1376 if (args->flags != 0)
1377 return -EINVAL;
1378
1379 /* Enforce sane limit on memory allocation */
1380 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1381 return -EINVAL;
1382
Michal Hocko752ade62017-05-08 15:57:27 -07001383 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001384 if (!keys)
1385 return -ENOMEM;
1386
1387 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1388 sizeof(uint8_t) * args->count);
1389 if (r) {
1390 r = -EFAULT;
1391 goto out;
1392 }
1393
1394 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001395 r = s390_enable_skey();
1396 if (r)
1397 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001398
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001399 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001400 for (i = 0; i < args->count; i++) {
1401 hva = gfn_to_hva(kvm, args->start_gfn + i);
1402 if (kvm_is_error_hva(hva)) {
1403 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001404 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001405 }
1406
1407 /* Lowest order bit is reserved */
1408 if (keys[i] & 0x01) {
1409 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001410 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001411 }
1412
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001413 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001414 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001415 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001416 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001417 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001418out:
1419 kvfree(keys);
1420 return r;
1421}
1422
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001423/*
1424 * Base address and length must be sent at the start of each block, therefore
1425 * it's cheaper to send some clean data, as long as it's less than the size of
1426 * two longs.
1427 */
1428#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1429/* for consistency */
1430#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1431
1432/*
1433 * This function searches for the next page with dirty CMMA attributes, and
1434 * saves the attributes in the buffer up to either the end of the buffer or
1435 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1436 * no trailing clean bytes are saved.
1437 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1438 * output buffer will indicate 0 as length.
1439 */
1440static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1441 struct kvm_s390_cmma_log *args)
1442{
1443 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1444 unsigned long bufsize, hva, pgstev, i, next, cur;
1445 int srcu_idx, peek, r = 0, rr;
1446 u8 *res;
1447
1448 cur = args->start_gfn;
1449 i = next = pgstev = 0;
1450
1451 if (unlikely(!kvm->arch.use_cmma))
1452 return -ENXIO;
1453 /* Invalid/unsupported flags were specified */
1454 if (args->flags & ~KVM_S390_CMMA_PEEK)
1455 return -EINVAL;
1456 /* Migration mode query, and we are not doing a migration */
1457 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1458 if (!peek && !s)
1459 return -EINVAL;
1460 /* CMMA is disabled or was not used, or the buffer has length zero */
1461 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
1462 if (!bufsize || !kvm->mm->context.use_cmma) {
1463 memset(args, 0, sizeof(*args));
1464 return 0;
1465 }
1466
1467 if (!peek) {
1468 /* We are not peeking, and there are no dirty pages */
1469 if (!atomic64_read(&s->dirty_pages)) {
1470 memset(args, 0, sizeof(*args));
1471 return 0;
1472 }
1473 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1474 args->start_gfn);
1475 if (cur >= s->bitmap_size) /* nothing found, loop back */
1476 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1477 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1478 memset(args, 0, sizeof(*args));
1479 return 0;
1480 }
1481 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1482 }
1483
1484 res = vmalloc(bufsize);
1485 if (!res)
1486 return -ENOMEM;
1487
1488 args->start_gfn = cur;
1489
1490 down_read(&kvm->mm->mmap_sem);
1491 srcu_idx = srcu_read_lock(&kvm->srcu);
1492 while (i < bufsize) {
1493 hva = gfn_to_hva(kvm, cur);
1494 if (kvm_is_error_hva(hva)) {
1495 r = -EFAULT;
1496 break;
1497 }
1498 /* decrement only if we actually flipped the bit to 0 */
1499 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1500 atomic64_dec(&s->dirty_pages);
1501 r = get_pgste(kvm->mm, hva, &pgstev);
1502 if (r < 0)
1503 pgstev = 0;
1504 /* save the value */
1505 res[i++] = (pgstev >> 24) & 0x3;
1506 /*
1507 * if the next bit is too far away, stop.
1508 * if we reached the previous "next", find the next one
1509 */
1510 if (!peek) {
1511 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1512 break;
1513 if (cur == next)
1514 next = find_next_bit(s->pgste_bitmap,
1515 s->bitmap_size, cur + 1);
1516 /* reached the end of the bitmap or of the buffer, stop */
1517 if ((next >= s->bitmap_size) ||
1518 (next >= args->start_gfn + bufsize))
1519 break;
1520 }
1521 cur++;
1522 }
1523 srcu_read_unlock(&kvm->srcu, srcu_idx);
1524 up_read(&kvm->mm->mmap_sem);
1525 args->count = i;
1526 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1527
1528 rr = copy_to_user((void __user *)args->values, res, args->count);
1529 if (rr)
1530 r = -EFAULT;
1531
1532 vfree(res);
1533 return r;
1534}
1535
1536/*
1537 * This function sets the CMMA attributes for the given pages. If the input
1538 * buffer has zero length, no action is taken, otherwise the attributes are
1539 * set and the mm->context.use_cmma flag is set.
1540 */
1541static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1542 const struct kvm_s390_cmma_log *args)
1543{
1544 unsigned long hva, mask, pgstev, i;
1545 uint8_t *bits;
1546 int srcu_idx, r = 0;
1547
1548 mask = args->mask;
1549
1550 if (!kvm->arch.use_cmma)
1551 return -ENXIO;
1552 /* invalid/unsupported flags */
1553 if (args->flags != 0)
1554 return -EINVAL;
1555 /* Enforce sane limit on memory allocation */
1556 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1557 return -EINVAL;
1558 /* Nothing to do */
1559 if (args->count == 0)
1560 return 0;
1561
1562 bits = vmalloc(sizeof(*bits) * args->count);
1563 if (!bits)
1564 return -ENOMEM;
1565
1566 r = copy_from_user(bits, (void __user *)args->values, args->count);
1567 if (r) {
1568 r = -EFAULT;
1569 goto out;
1570 }
1571
1572 down_read(&kvm->mm->mmap_sem);
1573 srcu_idx = srcu_read_lock(&kvm->srcu);
1574 for (i = 0; i < args->count; i++) {
1575 hva = gfn_to_hva(kvm, args->start_gfn + i);
1576 if (kvm_is_error_hva(hva)) {
1577 r = -EFAULT;
1578 break;
1579 }
1580
1581 pgstev = bits[i];
1582 pgstev = pgstev << 24;
1583 mask &= _PGSTE_GPS_USAGE_MASK;
1584 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1585 }
1586 srcu_read_unlock(&kvm->srcu, srcu_idx);
1587 up_read(&kvm->mm->mmap_sem);
1588
1589 if (!kvm->mm->context.use_cmma) {
1590 down_write(&kvm->mm->mmap_sem);
1591 kvm->mm->context.use_cmma = 1;
1592 up_write(&kvm->mm->mmap_sem);
1593 }
1594out:
1595 vfree(bits);
1596 return r;
1597}
1598
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001599long kvm_arch_vm_ioctl(struct file *filp,
1600 unsigned int ioctl, unsigned long arg)
1601{
1602 struct kvm *kvm = filp->private_data;
1603 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001604 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001605 int r;
1606
1607 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001608 case KVM_S390_INTERRUPT: {
1609 struct kvm_s390_interrupt s390int;
1610
1611 r = -EFAULT;
1612 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1613 break;
1614 r = kvm_s390_inject_vm(kvm, &s390int);
1615 break;
1616 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001617 case KVM_ENABLE_CAP: {
1618 struct kvm_enable_cap cap;
1619 r = -EFAULT;
1620 if (copy_from_user(&cap, argp, sizeof(cap)))
1621 break;
1622 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1623 break;
1624 }
Cornelia Huck84223592013-07-15 13:36:01 +02001625 case KVM_CREATE_IRQCHIP: {
1626 struct kvm_irq_routing_entry routing;
1627
1628 r = -EINVAL;
1629 if (kvm->arch.use_irqchip) {
1630 /* Set up dummy routing. */
1631 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001632 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001633 }
1634 break;
1635 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001636 case KVM_SET_DEVICE_ATTR: {
1637 r = -EFAULT;
1638 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1639 break;
1640 r = kvm_s390_vm_set_attr(kvm, &attr);
1641 break;
1642 }
1643 case KVM_GET_DEVICE_ATTR: {
1644 r = -EFAULT;
1645 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1646 break;
1647 r = kvm_s390_vm_get_attr(kvm, &attr);
1648 break;
1649 }
1650 case KVM_HAS_DEVICE_ATTR: {
1651 r = -EFAULT;
1652 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1653 break;
1654 r = kvm_s390_vm_has_attr(kvm, &attr);
1655 break;
1656 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001657 case KVM_S390_GET_SKEYS: {
1658 struct kvm_s390_skeys args;
1659
1660 r = -EFAULT;
1661 if (copy_from_user(&args, argp,
1662 sizeof(struct kvm_s390_skeys)))
1663 break;
1664 r = kvm_s390_get_skeys(kvm, &args);
1665 break;
1666 }
1667 case KVM_S390_SET_SKEYS: {
1668 struct kvm_s390_skeys args;
1669
1670 r = -EFAULT;
1671 if (copy_from_user(&args, argp,
1672 sizeof(struct kvm_s390_skeys)))
1673 break;
1674 r = kvm_s390_set_skeys(kvm, &args);
1675 break;
1676 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001677 case KVM_S390_GET_CMMA_BITS: {
1678 struct kvm_s390_cmma_log args;
1679
1680 r = -EFAULT;
1681 if (copy_from_user(&args, argp, sizeof(args)))
1682 break;
1683 r = kvm_s390_get_cmma_bits(kvm, &args);
1684 if (!r) {
1685 r = copy_to_user(argp, &args, sizeof(args));
1686 if (r)
1687 r = -EFAULT;
1688 }
1689 break;
1690 }
1691 case KVM_S390_SET_CMMA_BITS: {
1692 struct kvm_s390_cmma_log args;
1693
1694 r = -EFAULT;
1695 if (copy_from_user(&args, argp, sizeof(args)))
1696 break;
1697 r = kvm_s390_set_cmma_bits(kvm, &args);
1698 break;
1699 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001700 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001701 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001702 }
1703
1704 return r;
1705}
1706
Tony Krowiak45c9b472015-01-13 11:33:26 -05001707static int kvm_s390_query_ap_config(u8 *config)
1708{
1709 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001710 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001711
Christian Borntraeger86044c82015-02-26 13:53:47 +01001712 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001713 asm volatile(
1714 "lgr 0,%1\n"
1715 "lgr 2,%2\n"
1716 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001717 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001718 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001719 "1:\n"
1720 EX_TABLE(0b, 1b)
1721 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001722 : "r" (fcn_code), "r" (config)
1723 : "cc", "0", "2", "memory"
1724 );
1725
1726 return cc;
1727}
1728
1729static int kvm_s390_apxa_installed(void)
1730{
1731 u8 config[128];
1732 int cc;
1733
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001734 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001735 cc = kvm_s390_query_ap_config(config);
1736
1737 if (cc)
1738 pr_err("PQAP(QCI) failed with cc=%d", cc);
1739 else
1740 return config[0] & 0x40;
1741 }
1742
1743 return 0;
1744}
1745
1746static void kvm_s390_set_crycb_format(struct kvm *kvm)
1747{
1748 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1749
1750 if (kvm_s390_apxa_installed())
1751 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1752 else
1753 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1754}
1755
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001756static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001757{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001758 struct cpuid cpuid;
1759
1760 get_cpu_id(&cpuid);
1761 cpuid.version = 0xff;
1762 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001763}
1764
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001765static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001766{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001767 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001768 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001769
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001770 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001771 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001772
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001773 /* Enable AES/DEA protected key functions by default */
1774 kvm->arch.crypto.aes_kw = 1;
1775 kvm->arch.crypto.dea_kw = 1;
1776 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1777 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1778 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1779 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001780}
1781
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001782static void sca_dispose(struct kvm *kvm)
1783{
1784 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001785 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001786 else
1787 free_page((unsigned long)(kvm->arch.sca));
1788 kvm->arch.sca = NULL;
1789}
1790
Carsten Ottee08b9632012-01-04 10:25:20 +01001791int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001792{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001793 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001794 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001795 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001796 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001797
Carsten Ottee08b9632012-01-04 10:25:20 +01001798 rc = -EINVAL;
1799#ifdef CONFIG_KVM_S390_UCONTROL
1800 if (type & ~KVM_VM_S390_UCONTROL)
1801 goto out_err;
1802 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1803 goto out_err;
1804#else
1805 if (type)
1806 goto out_err;
1807#endif
1808
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001809 rc = s390_enable_sie();
1810 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001811 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001812
Carsten Otteb2904112011-10-18 12:27:13 +02001813 rc = -ENOMEM;
1814
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001815 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1816
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001817 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001818 if (!sclp.has_64bscao)
1819 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001820 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001821 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001822 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001823 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001824 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001825 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001826 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001827 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001828 kvm->arch.sca = (struct bsca_block *)
1829 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001830 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001831
1832 sprintf(debug_name, "kvm-%u", current->pid);
1833
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001834 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001835 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001836 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001837
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001838 kvm->arch.sie_page2 =
1839 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1840 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001841 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001842
Michael Muellerfb5bf932015-02-27 14:25:10 +01001843 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001844 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001845 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001846 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1847 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001848 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001849 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001850 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001851 }
1852
Michael Mueller981467c2015-02-24 13:51:04 +01001853 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001854 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1855 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001856 S390_ARCH_FAC_LIST_SIZE_BYTE);
1857
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001858 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1859 set_kvm_facility(kvm->arch.model.fac_list, 74);
1860
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001861 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001862 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001863
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001864 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001865
Fei Li51978392017-02-17 17:06:26 +08001866 mutex_init(&kvm->arch.float_int.ais_lock);
1867 kvm->arch.float_int.simm = 0;
1868 kvm->arch.float_int.nimm = 0;
1869 kvm->arch.float_int.ais_enabled = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001870 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001871 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1872 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001873 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001874 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001875
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001876 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001877 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001878
Carsten Ottee08b9632012-01-04 10:25:20 +01001879 if (type & KVM_VM_S390_UCONTROL) {
1880 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001881 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001882 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001883 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001884 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001885 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001886 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001887 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001888 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001889 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001890 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001891 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001892 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001893 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001894
1895 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001896 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001897 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001898
David Hildenbrand8ad35752014-03-14 11:00:21 +01001899 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001900 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001901 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001902
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001903 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001904out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001905 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001906 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001907 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001908 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001909 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001910}
1911
Luiz Capitulino235539b2016-09-07 14:47:23 -04001912bool kvm_arch_has_vcpu_debugfs(void)
1913{
1914 return false;
1915}
1916
1917int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1918{
1919 return 0;
1920}
1921
Christian Borntraegerd329c032008-11-26 14:50:27 +01001922void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1923{
1924 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001925 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001926 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001927 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001928 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001929 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001930
1931 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001932 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01001933
Dominik Dingele6db1d62015-05-07 15:41:57 +02001934 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001935 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001936 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001937
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001938 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001939 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001940}
1941
1942static void kvm_free_vcpus(struct kvm *kvm)
1943{
1944 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001945 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001946
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001947 kvm_for_each_vcpu(i, vcpu, kvm)
1948 kvm_arch_vcpu_destroy(vcpu);
1949
1950 mutex_lock(&kvm->lock);
1951 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1952 kvm->vcpus[i] = NULL;
1953
1954 atomic_set(&kvm->online_vcpus, 0);
1955 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001956}
1957
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001958void kvm_arch_destroy_vm(struct kvm *kvm)
1959{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001960 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001961 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001962 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001963 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001964 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001965 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001966 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001967 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001968 kvm_s390_vsie_destroy(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001969 if (kvm->arch.migration_state) {
1970 vfree(kvm->arch.migration_state->pgste_bitmap);
1971 kfree(kvm->arch.migration_state);
1972 }
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001973 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001974}
1975
1976/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001977static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1978{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001979 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001980 if (!vcpu->arch.gmap)
1981 return -ENOMEM;
1982 vcpu->arch.gmap->private = vcpu->kvm;
1983
1984 return 0;
1985}
1986
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001987static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1988{
David Hildenbranda6940672016-08-08 22:39:32 +02001989 if (!kvm_s390_use_sca_entries())
1990 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001991 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001992 if (vcpu->kvm->arch.use_esca) {
1993 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001994
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001995 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001996 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001997 } else {
1998 struct bsca_block *sca = vcpu->kvm->arch.sca;
1999
2000 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002001 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002002 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002003 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002004}
2005
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002006static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002007{
David Hildenbranda6940672016-08-08 22:39:32 +02002008 if (!kvm_s390_use_sca_entries()) {
2009 struct bsca_block *sca = vcpu->kvm->arch.sca;
2010
2011 /* we still need the basic sca for the ipte control */
2012 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2013 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2014 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002015 read_lock(&vcpu->kvm->arch.sca_lock);
2016 if (vcpu->kvm->arch.use_esca) {
2017 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002018
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002019 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002020 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2021 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002022 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002023 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002024 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002025 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002026
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002027 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002028 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2029 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002030 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002031 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002032 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002033}
2034
2035/* Basic SCA to Extended SCA data copy routines */
2036static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2037{
2038 d->sda = s->sda;
2039 d->sigp_ctrl.c = s->sigp_ctrl.c;
2040 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2041}
2042
2043static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2044{
2045 int i;
2046
2047 d->ipte_control = s->ipte_control;
2048 d->mcn[0] = s->mcn;
2049 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2050 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2051}
2052
2053static int sca_switch_to_extended(struct kvm *kvm)
2054{
2055 struct bsca_block *old_sca = kvm->arch.sca;
2056 struct esca_block *new_sca;
2057 struct kvm_vcpu *vcpu;
2058 unsigned int vcpu_idx;
2059 u32 scaol, scaoh;
2060
2061 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2062 if (!new_sca)
2063 return -ENOMEM;
2064
2065 scaoh = (u32)((u64)(new_sca) >> 32);
2066 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2067
2068 kvm_s390_vcpu_block_all(kvm);
2069 write_lock(&kvm->arch.sca_lock);
2070
2071 sca_copy_b_to_e(new_sca, old_sca);
2072
2073 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2074 vcpu->arch.sie_block->scaoh = scaoh;
2075 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002076 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002077 }
2078 kvm->arch.sca = new_sca;
2079 kvm->arch.use_esca = 1;
2080
2081 write_unlock(&kvm->arch.sca_lock);
2082 kvm_s390_vcpu_unblock_all(kvm);
2083
2084 free_page((unsigned long)old_sca);
2085
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002086 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2087 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002088 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002089}
2090
2091static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2092{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002093 int rc;
2094
David Hildenbranda6940672016-08-08 22:39:32 +02002095 if (!kvm_s390_use_sca_entries()) {
2096 if (id < KVM_MAX_VCPUS)
2097 return true;
2098 return false;
2099 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002100 if (id < KVM_S390_BSCA_CPU_SLOTS)
2101 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002102 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002103 return false;
2104
2105 mutex_lock(&kvm->lock);
2106 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2107 mutex_unlock(&kvm->lock);
2108
2109 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002110}
2111
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002112int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2113{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002114 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2115 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002116 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2117 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002118 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002119 KVM_SYNC_CRS |
2120 KVM_SYNC_ARCH0 |
2121 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002122 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002123 if (test_kvm_facility(vcpu->kvm, 64))
2124 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002125 if (test_kvm_facility(vcpu->kvm, 133))
2126 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002127 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2128 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2129 */
2130 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002131 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002132 else
2133 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002134
2135 if (kvm_is_ucontrol(vcpu->kvm))
2136 return __kvm_ucontrol_vcpu_init(vcpu);
2137
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002138 return 0;
2139}
2140
David Hildenbranddb0758b2016-02-15 09:42:25 +01002141/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2142static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2143{
2144 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002145 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002146 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002147 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002148}
2149
2150/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2151static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2152{
2153 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002154 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002155 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2156 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002157 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002158}
2159
2160/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2161static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2162{
2163 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2164 vcpu->arch.cputm_enabled = true;
2165 __start_cpu_timer_accounting(vcpu);
2166}
2167
2168/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2169static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2170{
2171 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2172 __stop_cpu_timer_accounting(vcpu);
2173 vcpu->arch.cputm_enabled = false;
2174}
2175
2176static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2177{
2178 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2179 __enable_cpu_timer_accounting(vcpu);
2180 preempt_enable();
2181}
2182
2183static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2184{
2185 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2186 __disable_cpu_timer_accounting(vcpu);
2187 preempt_enable();
2188}
2189
David Hildenbrand4287f242016-02-15 09:40:12 +01002190/* set the cpu timer - may only be called from the VCPU thread itself */
2191void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2192{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002193 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002194 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002195 if (vcpu->arch.cputm_enabled)
2196 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002197 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002198 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002199 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002200}
2201
David Hildenbranddb0758b2016-02-15 09:42:25 +01002202/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002203__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2204{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002205 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002206 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002207
2208 if (unlikely(!vcpu->arch.cputm_enabled))
2209 return vcpu->arch.sie_block->cputm;
2210
David Hildenbrand9c23a132016-02-17 21:53:33 +01002211 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2212 do {
2213 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2214 /*
2215 * If the writer would ever execute a read in the critical
2216 * section, e.g. in irq context, we have a deadlock.
2217 */
2218 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2219 value = vcpu->arch.sie_block->cputm;
2220 /* if cputm_start is 0, accounting is being started/stopped */
2221 if (likely(vcpu->arch.cputm_start))
2222 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2223 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2224 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002225 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002226}
2227
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002228void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2229{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002230
David Hildenbrand37d9df92015-03-11 16:47:33 +01002231 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002232 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002233 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002234 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002235 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002236}
2237
2238void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2239{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002240 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002241 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002242 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002243 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002244 vcpu->arch.enabled_gmap = gmap_get_enabled();
2245 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002246
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002247}
2248
2249static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2250{
2251 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2252 vcpu->arch.sie_block->gpsw.mask = 0UL;
2253 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002254 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002255 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002256 vcpu->arch.sie_block->ckc = 0UL;
2257 vcpu->arch.sie_block->todpr = 0;
2258 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2259 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2260 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002261 /* make sure the new fpc will be lazily loaded */
2262 save_fpu_regs();
2263 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002264 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002265 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002266 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2267 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002268 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2269 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002270 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002271}
2272
Dominik Dingel31928aa2014-12-04 15:47:07 +01002273void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002274{
Jason J. Herne72f25022014-11-25 09:46:02 -05002275 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002276 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002277 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02002278 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002279 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002280 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002281 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002282 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002283 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002284 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2285 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002286 /* make vcpu_load load the right gmap on the first trigger */
2287 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002288}
2289
Tony Krowiak5102ee82014-06-27 14:46:01 -04002290static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2291{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002292 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002293 return;
2294
Tony Krowiaka374e892014-09-03 10:13:53 +02002295 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2296
2297 if (vcpu->kvm->arch.crypto.aes_kw)
2298 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2299 if (vcpu->kvm->arch.crypto.dea_kw)
2300 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2301
Tony Krowiak5102ee82014-06-27 14:46:01 -04002302 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2303}
2304
Dominik Dingelb31605c2014-03-25 13:47:11 +01002305void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2306{
2307 free_page(vcpu->arch.sie_block->cbrlo);
2308 vcpu->arch.sie_block->cbrlo = 0;
2309}
2310
2311int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2312{
2313 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2314 if (!vcpu->arch.sie_block->cbrlo)
2315 return -ENOMEM;
2316
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002317 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002318 return 0;
2319}
2320
Michael Mueller91520f12015-02-27 14:32:11 +01002321static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2322{
2323 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2324
Michael Mueller91520f12015-02-27 14:32:11 +01002325 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002326 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002327 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002328}
2329
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002330int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2331{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002332 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002333
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002334 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2335 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002336 CPUSTAT_STOPPED);
2337
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002338 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002339 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002340 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002341 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002342
Michael Mueller91520f12015-02-27 14:32:11 +01002343 kvm_s390_vcpu_setup_model(vcpu);
2344
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002345 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2346 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002347 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002348 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002349 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002350 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002351 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002352
David Hildenbrand873b4252016-04-04 15:53:47 +02002353 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002354 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002355 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002356 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2357 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002358 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002359 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002360 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002361 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002362 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002363 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002364 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002365 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002366 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002367 vcpu->arch.sie_block->eca |= ECA_VX;
2368 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002369 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002370 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2371 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002372 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002373
2374 if (sclp.has_kss)
2375 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2376 else
2377 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002378
Dominik Dingele6db1d62015-05-07 15:41:57 +02002379 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002380 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2381 if (rc)
2382 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002383 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01002384 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002385 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002386
Tony Krowiak5102ee82014-06-27 14:46:01 -04002387 kvm_s390_vcpu_crypto_setup(vcpu);
2388
Dominik Dingelb31605c2014-03-25 13:47:11 +01002389 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002390}
2391
2392struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2393 unsigned int id)
2394{
Carsten Otte4d475552011-10-18 12:27:12 +02002395 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002396 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002397 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002398
David Hildenbrand42158252015-10-12 12:57:22 +02002399 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002400 goto out;
2401
2402 rc = -ENOMEM;
2403
Michael Muellerb110fea2013-06-12 13:54:54 +02002404 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002405 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002406 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002407
QingFeng Haoda72ca42017-06-07 11:41:19 +02002408 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002409 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2410 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002411 goto out_free_cpu;
2412
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002413 vcpu->arch.sie_block = &sie_page->sie_block;
2414 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2415
David Hildenbrandefed1102015-04-16 12:32:41 +02002416 /* the real guest size will always be smaller than msl */
2417 vcpu->arch.sie_block->mso = 0;
2418 vcpu->arch.sie_block->msl = sclp.hamax;
2419
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002420 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002421 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002422 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002423 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002424 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002425 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002426
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002427 rc = kvm_vcpu_init(vcpu, kvm, id);
2428 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002429 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002430 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002431 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002432 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002433
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002434 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002435out_free_sie_block:
2436 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002437out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002438 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002439out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002440 return ERR_PTR(rc);
2441}
2442
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002443int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2444{
David Hildenbrand9a022062014-08-05 17:40:47 +02002445 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002446}
2447
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002448void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002449{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002450 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002451 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002452}
2453
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002454void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002455{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002456 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002457}
2458
Christian Borntraeger8e236542015-04-09 13:49:04 +02002459static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2460{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002461 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002462 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002463}
2464
2465static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2466{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002467 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002468}
2469
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002470/*
2471 * Kick a guest cpu out of SIE and wait until SIE is not running.
2472 * If the CPU is not running (e.g. waiting as idle) the function will
2473 * return immediately. */
2474void exit_sie(struct kvm_vcpu *vcpu)
2475{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002476 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002477 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2478 cpu_relax();
2479}
2480
Christian Borntraeger8e236542015-04-09 13:49:04 +02002481/* Kick a guest cpu out of SIE to process a request synchronously */
2482void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002483{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002484 kvm_make_request(req, vcpu);
2485 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002486}
2487
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002488static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2489 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002490{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002491 struct kvm *kvm = gmap->private;
2492 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002493 unsigned long prefix;
2494 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002495
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002496 if (gmap_is_shadow(gmap))
2497 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002498 if (start >= 1UL << 31)
2499 /* We are only interested in prefix pages */
2500 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002501 kvm_for_each_vcpu(i, vcpu, kvm) {
2502 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002503 prefix = kvm_s390_get_prefix(vcpu);
2504 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2505 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2506 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002507 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002508 }
2509 }
2510}
2511
Christoffer Dallb6d33832012-03-08 16:44:24 -05002512int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2513{
2514 /* kvm common code refers to this, but never calls it */
2515 BUG();
2516 return 0;
2517}
2518
Carsten Otte14eebd92012-05-15 14:15:26 +02002519static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2520 struct kvm_one_reg *reg)
2521{
2522 int r = -EINVAL;
2523
2524 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002525 case KVM_REG_S390_TODPR:
2526 r = put_user(vcpu->arch.sie_block->todpr,
2527 (u32 __user *)reg->addr);
2528 break;
2529 case KVM_REG_S390_EPOCHDIFF:
2530 r = put_user(vcpu->arch.sie_block->epoch,
2531 (u64 __user *)reg->addr);
2532 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002533 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002534 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002535 (u64 __user *)reg->addr);
2536 break;
2537 case KVM_REG_S390_CLOCK_COMP:
2538 r = put_user(vcpu->arch.sie_block->ckc,
2539 (u64 __user *)reg->addr);
2540 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002541 case KVM_REG_S390_PFTOKEN:
2542 r = put_user(vcpu->arch.pfault_token,
2543 (u64 __user *)reg->addr);
2544 break;
2545 case KVM_REG_S390_PFCOMPARE:
2546 r = put_user(vcpu->arch.pfault_compare,
2547 (u64 __user *)reg->addr);
2548 break;
2549 case KVM_REG_S390_PFSELECT:
2550 r = put_user(vcpu->arch.pfault_select,
2551 (u64 __user *)reg->addr);
2552 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002553 case KVM_REG_S390_PP:
2554 r = put_user(vcpu->arch.sie_block->pp,
2555 (u64 __user *)reg->addr);
2556 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002557 case KVM_REG_S390_GBEA:
2558 r = put_user(vcpu->arch.sie_block->gbea,
2559 (u64 __user *)reg->addr);
2560 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002561 default:
2562 break;
2563 }
2564
2565 return r;
2566}
2567
2568static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2569 struct kvm_one_reg *reg)
2570{
2571 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002572 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002573
2574 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002575 case KVM_REG_S390_TODPR:
2576 r = get_user(vcpu->arch.sie_block->todpr,
2577 (u32 __user *)reg->addr);
2578 break;
2579 case KVM_REG_S390_EPOCHDIFF:
2580 r = get_user(vcpu->arch.sie_block->epoch,
2581 (u64 __user *)reg->addr);
2582 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002583 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002584 r = get_user(val, (u64 __user *)reg->addr);
2585 if (!r)
2586 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002587 break;
2588 case KVM_REG_S390_CLOCK_COMP:
2589 r = get_user(vcpu->arch.sie_block->ckc,
2590 (u64 __user *)reg->addr);
2591 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002592 case KVM_REG_S390_PFTOKEN:
2593 r = get_user(vcpu->arch.pfault_token,
2594 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002595 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2596 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002597 break;
2598 case KVM_REG_S390_PFCOMPARE:
2599 r = get_user(vcpu->arch.pfault_compare,
2600 (u64 __user *)reg->addr);
2601 break;
2602 case KVM_REG_S390_PFSELECT:
2603 r = get_user(vcpu->arch.pfault_select,
2604 (u64 __user *)reg->addr);
2605 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002606 case KVM_REG_S390_PP:
2607 r = get_user(vcpu->arch.sie_block->pp,
2608 (u64 __user *)reg->addr);
2609 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002610 case KVM_REG_S390_GBEA:
2611 r = get_user(vcpu->arch.sie_block->gbea,
2612 (u64 __user *)reg->addr);
2613 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002614 default:
2615 break;
2616 }
2617
2618 return r;
2619}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002620
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002621static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2622{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002623 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002624 return 0;
2625}
2626
2627int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2628{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002629 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002630 return 0;
2631}
2632
2633int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2634{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002635 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002636 return 0;
2637}
2638
2639int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2640 struct kvm_sregs *sregs)
2641{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002642 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002643 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002644 return 0;
2645}
2646
2647int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2648 struct kvm_sregs *sregs)
2649{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002650 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002651 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002652 return 0;
2653}
2654
2655int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2656{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002657 if (test_fp_ctl(fpu->fpc))
2658 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002659 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002660 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002661 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2662 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002663 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002664 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002665 return 0;
2666}
2667
2668int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2669{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002670 /* make sure we have the latest values */
2671 save_fpu_regs();
2672 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002673 convert_vx_to_fp((freg_t *) fpu->fprs,
2674 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002675 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002676 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002677 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002678 return 0;
2679}
2680
2681static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2682{
2683 int rc = 0;
2684
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002685 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002686 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002687 else {
2688 vcpu->run->psw_mask = psw.mask;
2689 vcpu->run->psw_addr = psw.addr;
2690 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002691 return rc;
2692}
2693
2694int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2695 struct kvm_translation *tr)
2696{
2697 return -EINVAL; /* not implemented yet */
2698}
2699
David Hildenbrand27291e22014-01-23 12:26:52 +01002700#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2701 KVM_GUESTDBG_USE_HW_BP | \
2702 KVM_GUESTDBG_ENABLE)
2703
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002704int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2705 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002706{
David Hildenbrand27291e22014-01-23 12:26:52 +01002707 int rc = 0;
2708
2709 vcpu->guest_debug = 0;
2710 kvm_s390_clear_bp_data(vcpu);
2711
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002712 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002713 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002714 if (!sclp.has_gpere)
2715 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002716
2717 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2718 vcpu->guest_debug = dbg->control;
2719 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002720 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002721
2722 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2723 rc = kvm_s390_import_bp_data(vcpu, dbg);
2724 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002725 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002726 vcpu->arch.guestdbg.last_bp = 0;
2727 }
2728
2729 if (rc) {
2730 vcpu->guest_debug = 0;
2731 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002732 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002733 }
2734
2735 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002736}
2737
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002738int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2739 struct kvm_mp_state *mp_state)
2740{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002741 /* CHECK_STOP and LOAD are not supported yet */
2742 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2743 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002744}
2745
2746int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2747 struct kvm_mp_state *mp_state)
2748{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002749 int rc = 0;
2750
2751 /* user space knows about this interface - let it control the state */
2752 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2753
2754 switch (mp_state->mp_state) {
2755 case KVM_MP_STATE_STOPPED:
2756 kvm_s390_vcpu_stop(vcpu);
2757 break;
2758 case KVM_MP_STATE_OPERATING:
2759 kvm_s390_vcpu_start(vcpu);
2760 break;
2761 case KVM_MP_STATE_LOAD:
2762 case KVM_MP_STATE_CHECK_STOP:
2763 /* fall through - CHECK_STOP and LOAD are not supported yet */
2764 default:
2765 rc = -ENXIO;
2766 }
2767
2768 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002769}
2770
David Hildenbrand8ad35752014-03-14 11:00:21 +01002771static bool ibs_enabled(struct kvm_vcpu *vcpu)
2772{
2773 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2774}
2775
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002776static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2777{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002778retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002779 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002780 if (!vcpu->requests)
2781 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002782 /*
2783 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002784 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002785 * This ensures that the ipte instruction for this request has
2786 * already finished. We might race against a second unmapper that
2787 * wants to set the blocking bit. Lets just retry the request loop.
2788 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002789 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002790 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002791 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2792 kvm_s390_get_prefix(vcpu),
2793 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002794 if (rc) {
2795 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002796 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002797 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002798 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002799 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002800
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002801 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2802 vcpu->arch.sie_block->ihcpu = 0xffff;
2803 goto retry;
2804 }
2805
David Hildenbrand8ad35752014-03-14 11:00:21 +01002806 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2807 if (!ibs_enabled(vcpu)) {
2808 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002809 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002810 &vcpu->arch.sie_block->cpuflags);
2811 }
2812 goto retry;
2813 }
2814
2815 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2816 if (ibs_enabled(vcpu)) {
2817 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002818 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002819 &vcpu->arch.sie_block->cpuflags);
2820 }
2821 goto retry;
2822 }
2823
David Hildenbrand6502a342016-06-21 14:19:51 +02002824 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2825 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2826 goto retry;
2827 }
2828
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002829 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
2830 /*
2831 * Disable CMMA virtualization; we will emulate the ESSA
2832 * instruction manually, in order to provide additional
2833 * functionalities needed for live migration.
2834 */
2835 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
2836 goto retry;
2837 }
2838
2839 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
2840 /*
2841 * Re-enable CMMA virtualization if CMMA is available and
2842 * was used.
2843 */
2844 if ((vcpu->kvm->arch.use_cmma) &&
2845 (vcpu->kvm->mm->context.use_cmma))
2846 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
2847 goto retry;
2848 }
2849
David Hildenbrand0759d062014-05-13 16:54:32 +02002850 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02002851 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02002852
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002853 return 0;
2854}
2855
David Hildenbrand25ed1672015-05-12 09:49:14 +02002856void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2857{
2858 struct kvm_vcpu *vcpu;
2859 int i;
2860
2861 mutex_lock(&kvm->lock);
2862 preempt_disable();
2863 kvm->arch.epoch = tod - get_tod_clock();
2864 kvm_s390_vcpu_block_all(kvm);
2865 kvm_for_each_vcpu(i, vcpu, kvm)
2866 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2867 kvm_s390_vcpu_unblock_all(kvm);
2868 preempt_enable();
2869 mutex_unlock(&kvm->lock);
2870}
2871
Thomas Huthfa576c52014-05-06 17:20:16 +02002872/**
2873 * kvm_arch_fault_in_page - fault-in guest page if necessary
2874 * @vcpu: The corresponding virtual cpu
2875 * @gpa: Guest physical address
2876 * @writable: Whether the page should be writable or not
2877 *
2878 * Make sure that a guest page has been faulted-in on the host.
2879 *
2880 * Return: Zero on success, negative error code otherwise.
2881 */
2882long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002883{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002884 return gmap_fault(vcpu->arch.gmap, gpa,
2885 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002886}
2887
Dominik Dingel3c038e62013-10-07 17:11:48 +02002888static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2889 unsigned long token)
2890{
2891 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002892 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002893
2894 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002895 irq.u.ext.ext_params2 = token;
2896 irq.type = KVM_S390_INT_PFAULT_INIT;
2897 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002898 } else {
2899 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002900 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002901 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2902 }
2903}
2904
2905void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2906 struct kvm_async_pf *work)
2907{
2908 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2909 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2910}
2911
2912void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2913 struct kvm_async_pf *work)
2914{
2915 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2916 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2917}
2918
2919void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2920 struct kvm_async_pf *work)
2921{
2922 /* s390 will always inject the page directly */
2923}
2924
2925bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2926{
2927 /*
2928 * s390 will always inject the page directly,
2929 * but we still want check_async_completion to cleanup
2930 */
2931 return true;
2932}
2933
2934static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2935{
2936 hva_t hva;
2937 struct kvm_arch_async_pf arch;
2938 int rc;
2939
2940 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2941 return 0;
2942 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2943 vcpu->arch.pfault_compare)
2944 return 0;
2945 if (psw_extint_disabled(vcpu))
2946 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002947 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002948 return 0;
2949 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2950 return 0;
2951 if (!vcpu->arch.gmap->pfault_enabled)
2952 return 0;
2953
Heiko Carstens81480cc2014-01-01 16:36:07 +01002954 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2955 hva += current->thread.gmap_addr & ~PAGE_MASK;
2956 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002957 return 0;
2958
2959 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2960 return rc;
2961}
2962
Thomas Huth3fb4c402013-09-12 10:33:43 +02002963static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002964{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002965 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002966
Dominik Dingel3c038e62013-10-07 17:11:48 +02002967 /*
2968 * On s390 notifications for arriving pages will be delivered directly
2969 * to the guest but the house keeping for completed pfaults is
2970 * handled outside the worker.
2971 */
2972 kvm_check_async_pf_completion(vcpu);
2973
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002974 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2975 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002976
2977 if (need_resched())
2978 schedule();
2979
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002980 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002981 s390_handle_mcck();
2982
Jens Freimann79395032014-04-17 10:10:30 +02002983 if (!kvm_is_ucontrol(vcpu->kvm)) {
2984 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2985 if (rc)
2986 return rc;
2987 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002988
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002989 rc = kvm_s390_handle_requests(vcpu);
2990 if (rc)
2991 return rc;
2992
David Hildenbrand27291e22014-01-23 12:26:52 +01002993 if (guestdbg_enabled(vcpu)) {
2994 kvm_s390_backup_guest_per_regs(vcpu);
2995 kvm_s390_patch_guest_per_regs(vcpu);
2996 }
2997
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002998 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002999 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3000 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3001 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003002
Thomas Huth3fb4c402013-09-12 10:33:43 +02003003 return 0;
3004}
3005
Thomas Huth492d8642015-02-10 16:11:01 +01003006static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3007{
David Hildenbrand56317922016-01-12 17:37:58 +01003008 struct kvm_s390_pgm_info pgm_info = {
3009 .code = PGM_ADDRESSING,
3010 };
3011 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003012 int rc;
3013
3014 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3015 trace_kvm_s390_sie_fault(vcpu);
3016
3017 /*
3018 * We want to inject an addressing exception, which is defined as a
3019 * suppressing or terminating exception. However, since we came here
3020 * by a DAT access exception, the PSW still points to the faulting
3021 * instruction since DAT exceptions are nullifying. So we've got
3022 * to look up the current opcode to get the length of the instruction
3023 * to be able to forward the PSW.
3024 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003025 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003026 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003027 if (rc < 0) {
3028 return rc;
3029 } else if (rc) {
3030 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3031 * Forward by arbitrary ilc, injection will take care of
3032 * nullification if necessary.
3033 */
3034 pgm_info = vcpu->arch.pgm;
3035 ilen = 4;
3036 }
David Hildenbrand56317922016-01-12 17:37:58 +01003037 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3038 kvm_s390_forward_psw(vcpu, ilen);
3039 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003040}
3041
Thomas Huth3fb4c402013-09-12 10:33:43 +02003042static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3043{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003044 struct mcck_volatile_info *mcck_info;
3045 struct sie_page *sie_page;
3046
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003047 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3048 vcpu->arch.sie_block->icptcode);
3049 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3050
David Hildenbrand27291e22014-01-23 12:26:52 +01003051 if (guestdbg_enabled(vcpu))
3052 kvm_s390_restore_guest_per_regs(vcpu);
3053
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003054 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3055 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003056
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003057 if (exit_reason == -EINTR) {
3058 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3059 sie_page = container_of(vcpu->arch.sie_block,
3060 struct sie_page, sie_block);
3061 mcck_info = &sie_page->mcck_info;
3062 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3063 return 0;
3064 }
3065
David Hildenbrand71f116b2015-10-19 16:24:28 +02003066 if (vcpu->arch.sie_block->icptcode > 0) {
3067 int rc = kvm_handle_sie_intercept(vcpu);
3068
3069 if (rc != -EOPNOTSUPP)
3070 return rc;
3071 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3072 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3073 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3074 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3075 return -EREMOTE;
3076 } else if (exit_reason != -EFAULT) {
3077 vcpu->stat.exit_null++;
3078 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003079 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3080 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3081 vcpu->run->s390_ucontrol.trans_exc_code =
3082 current->thread.gmap_addr;
3083 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003084 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003085 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003086 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003087 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003088 if (kvm_arch_setup_async_pf(vcpu))
3089 return 0;
3090 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003091 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003092 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003093}
3094
3095static int __vcpu_run(struct kvm_vcpu *vcpu)
3096{
3097 int rc, exit_reason;
3098
Thomas Huth800c1062013-09-12 10:33:45 +02003099 /*
3100 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3101 * ning the guest), so that memslots (and other stuff) are protected
3102 */
3103 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3104
Thomas Hutha76ccff2013-09-12 10:33:44 +02003105 do {
3106 rc = vcpu_pre_run(vcpu);
3107 if (rc)
3108 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003109
Thomas Huth800c1062013-09-12 10:33:45 +02003110 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003111 /*
3112 * As PF_VCPU will be used in fault handler, between
3113 * guest_enter and guest_exit should be no uaccess.
3114 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003115 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003116 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003117 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003118 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003119 exit_reason = sie64a(vcpu->arch.sie_block,
3120 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003121 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003122 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003123 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003124 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003125 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003126
Thomas Hutha76ccff2013-09-12 10:33:44 +02003127 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003128 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003129
Thomas Huth800c1062013-09-12 10:33:45 +02003130 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003131 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003132}
3133
David Hildenbrandb028ee32014-07-17 10:47:43 +02003134static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3135{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003136 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003137 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003138
3139 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003140 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003141 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3142 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3143 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3144 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3145 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3146 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003147 /* some control register changes require a tlb flush */
3148 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003149 }
3150 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003151 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003152 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3153 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3154 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3155 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3156 }
3157 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3158 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3159 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3160 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003161 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3162 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003163 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003164 /*
3165 * If userspace sets the riccb (e.g. after migration) to a valid state,
3166 * we should enable RI here instead of doing the lazy enablement.
3167 */
3168 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003169 test_kvm_facility(vcpu->kvm, 64) &&
3170 riccb->valid &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003171 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003172 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003173 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003174 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003175 /*
3176 * If userspace sets the gscb (e.g. after migration) to non-zero,
3177 * we should enable GS here instead of doing the lazy enablement.
3178 */
3179 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3180 test_kvm_facility(vcpu->kvm, 133) &&
3181 gscb->gssm &&
3182 !vcpu->arch.gs_enabled) {
3183 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3184 vcpu->arch.sie_block->ecb |= ECB_GS;
3185 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3186 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003187 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003188 save_access_regs(vcpu->arch.host_acrs);
3189 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003190 /* save host (userspace) fprs/vrs */
3191 save_fpu_regs();
3192 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3193 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3194 if (MACHINE_HAS_VX)
3195 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3196 else
3197 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3198 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3199 if (test_fp_ctl(current->thread.fpu.fpc))
3200 /* User space provided an invalid FPC, let's clear it */
3201 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003202 if (MACHINE_HAS_GS) {
3203 preempt_disable();
3204 __ctl_set_bit(2, 4);
3205 if (current->thread.gs_cb) {
3206 vcpu->arch.host_gscb = current->thread.gs_cb;
3207 save_gs_cb(vcpu->arch.host_gscb);
3208 }
3209 if (vcpu->arch.gs_enabled) {
3210 current->thread.gs_cb = (struct gs_cb *)
3211 &vcpu->run->s.regs.gscb;
3212 restore_gs_cb(current->thread.gs_cb);
3213 }
3214 preempt_enable();
3215 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003216
David Hildenbrandb028ee32014-07-17 10:47:43 +02003217 kvm_run->kvm_dirty_regs = 0;
3218}
3219
3220static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3221{
3222 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3223 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3224 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3225 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003226 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003227 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3228 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3229 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3230 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3231 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3232 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3233 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003234 save_access_regs(vcpu->run->s.regs.acrs);
3235 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003236 /* Save guest register state */
3237 save_fpu_regs();
3238 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3239 /* Restore will be done lazily at return */
3240 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3241 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003242 if (MACHINE_HAS_GS) {
3243 __ctl_set_bit(2, 4);
3244 if (vcpu->arch.gs_enabled)
3245 save_gs_cb(current->thread.gs_cb);
3246 preempt_disable();
3247 current->thread.gs_cb = vcpu->arch.host_gscb;
3248 restore_gs_cb(vcpu->arch.host_gscb);
3249 preempt_enable();
3250 if (!vcpu->arch.host_gscb)
3251 __ctl_clear_bit(2, 4);
3252 vcpu->arch.host_gscb = NULL;
3253 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003254
David Hildenbrandb028ee32014-07-17 10:47:43 +02003255}
3256
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003257int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3258{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003259 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003260 sigset_t sigsaved;
3261
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003262 if (kvm_run->immediate_exit)
3263 return -EINTR;
3264
David Hildenbrand27291e22014-01-23 12:26:52 +01003265 if (guestdbg_exit_pending(vcpu)) {
3266 kvm_s390_prepare_debug_exit(vcpu);
3267 return 0;
3268 }
3269
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003270 if (vcpu->sigset_active)
3271 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3272
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003273 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3274 kvm_s390_vcpu_start(vcpu);
3275 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003276 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003277 vcpu->vcpu_id);
3278 return -EINVAL;
3279 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003280
David Hildenbrandb028ee32014-07-17 10:47:43 +02003281 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003282 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003283
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003284 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003285 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003286
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003287 if (signal_pending(current) && !rc) {
3288 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003289 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003290 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003291
David Hildenbrand27291e22014-01-23 12:26:52 +01003292 if (guestdbg_exit_pending(vcpu) && !rc) {
3293 kvm_s390_prepare_debug_exit(vcpu);
3294 rc = 0;
3295 }
3296
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003297 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003298 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003299 rc = 0;
3300 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003301
David Hildenbranddb0758b2016-02-15 09:42:25 +01003302 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003303 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003304
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003305 if (vcpu->sigset_active)
3306 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3307
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003308 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003309 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003310}
3311
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003312/*
3313 * store status at address
3314 * we use have two special cases:
3315 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3316 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3317 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003318int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003319{
Carsten Otte092670c2011-07-24 10:48:22 +02003320 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003321 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003322 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003323 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003324 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003325
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003326 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003327 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3328 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003329 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003330 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003331 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3332 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003333 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003334 gpa = px;
3335 } else
3336 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003337
3338 /* manually convert vector registers if necessary */
3339 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003340 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003341 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3342 fprs, 128);
3343 } else {
3344 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003345 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003346 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003347 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003348 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003349 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003350 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003351 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003352 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003353 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003354 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003355 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003356 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003357 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003358 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003359 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003360 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003361 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003362 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003363 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003364 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003365 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003366 &vcpu->arch.sie_block->gcr, 128);
3367 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003368}
3369
Thomas Huthe8798922013-11-06 15:46:33 +01003370int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3371{
3372 /*
3373 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003374 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003375 * it into the save area
3376 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003377 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003378 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003379 save_access_regs(vcpu->run->s.regs.acrs);
3380
3381 return kvm_s390_store_status_unloaded(vcpu, addr);
3382}
3383
David Hildenbrand8ad35752014-03-14 11:00:21 +01003384static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3385{
3386 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003387 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003388}
3389
3390static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3391{
3392 unsigned int i;
3393 struct kvm_vcpu *vcpu;
3394
3395 kvm_for_each_vcpu(i, vcpu, kvm) {
3396 __disable_ibs_on_vcpu(vcpu);
3397 }
3398}
3399
3400static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3401{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003402 if (!sclp.has_ibs)
3403 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003404 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003405 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003406}
3407
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003408void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3409{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003410 int i, online_vcpus, started_vcpus = 0;
3411
3412 if (!is_vcpu_stopped(vcpu))
3413 return;
3414
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003415 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003416 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003417 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003418 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3419
3420 for (i = 0; i < online_vcpus; i++) {
3421 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3422 started_vcpus++;
3423 }
3424
3425 if (started_vcpus == 0) {
3426 /* we're the only active VCPU -> speed it up */
3427 __enable_ibs_on_vcpu(vcpu);
3428 } else if (started_vcpus == 1) {
3429 /*
3430 * As we are starting a second VCPU, we have to disable
3431 * the IBS facility on all VCPUs to remove potentially
3432 * oustanding ENABLE requests.
3433 */
3434 __disable_ibs_on_all_vcpus(vcpu->kvm);
3435 }
3436
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003437 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003438 /*
3439 * Another VCPU might have used IBS while we were offline.
3440 * Let's play safe and flush the VCPU at startup.
3441 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003442 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003443 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003444 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003445}
3446
3447void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3448{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003449 int i, online_vcpus, started_vcpus = 0;
3450 struct kvm_vcpu *started_vcpu = NULL;
3451
3452 if (is_vcpu_stopped(vcpu))
3453 return;
3454
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003455 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003456 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003457 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003458 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3459
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003460 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003461 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003462
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003463 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003464 __disable_ibs_on_vcpu(vcpu);
3465
3466 for (i = 0; i < online_vcpus; i++) {
3467 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3468 started_vcpus++;
3469 started_vcpu = vcpu->kvm->vcpus[i];
3470 }
3471 }
3472
3473 if (started_vcpus == 1) {
3474 /*
3475 * As we only have one VCPU left, we want to enable the
3476 * IBS facility for that VCPU to speed it up.
3477 */
3478 __enable_ibs_on_vcpu(started_vcpu);
3479 }
3480
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003481 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003482 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003483}
3484
Cornelia Huckd6712df2012-12-20 15:32:11 +01003485static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3486 struct kvm_enable_cap *cap)
3487{
3488 int r;
3489
3490 if (cap->flags)
3491 return -EINVAL;
3492
3493 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003494 case KVM_CAP_S390_CSS_SUPPORT:
3495 if (!vcpu->kvm->arch.css_support) {
3496 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003497 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003498 trace_kvm_s390_enable_css(vcpu->kvm);
3499 }
3500 r = 0;
3501 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003502 default:
3503 r = -EINVAL;
3504 break;
3505 }
3506 return r;
3507}
3508
Thomas Huth41408c282015-02-06 15:01:21 +01003509static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3510 struct kvm_s390_mem_op *mop)
3511{
3512 void __user *uaddr = (void __user *)mop->buf;
3513 void *tmpbuf = NULL;
3514 int r, srcu_idx;
3515 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3516 | KVM_S390_MEMOP_F_CHECK_ONLY;
3517
3518 if (mop->flags & ~supported_flags)
3519 return -EINVAL;
3520
3521 if (mop->size > MEM_OP_MAX_SIZE)
3522 return -E2BIG;
3523
3524 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3525 tmpbuf = vmalloc(mop->size);
3526 if (!tmpbuf)
3527 return -ENOMEM;
3528 }
3529
3530 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3531
3532 switch (mop->op) {
3533 case KVM_S390_MEMOP_LOGICAL_READ:
3534 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003535 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3536 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003537 break;
3538 }
3539 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3540 if (r == 0) {
3541 if (copy_to_user(uaddr, tmpbuf, mop->size))
3542 r = -EFAULT;
3543 }
3544 break;
3545 case KVM_S390_MEMOP_LOGICAL_WRITE:
3546 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003547 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3548 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003549 break;
3550 }
3551 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3552 r = -EFAULT;
3553 break;
3554 }
3555 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3556 break;
3557 default:
3558 r = -EINVAL;
3559 }
3560
3561 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3562
3563 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3564 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3565
3566 vfree(tmpbuf);
3567 return r;
3568}
3569
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003570long kvm_arch_vcpu_ioctl(struct file *filp,
3571 unsigned int ioctl, unsigned long arg)
3572{
3573 struct kvm_vcpu *vcpu = filp->private_data;
3574 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003575 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003576 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003577
Avi Kivity937366242010-05-13 12:35:17 +03003578 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003579 case KVM_S390_IRQ: {
3580 struct kvm_s390_irq s390irq;
3581
3582 r = -EFAULT;
3583 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3584 break;
3585 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3586 break;
3587 }
Avi Kivity937366242010-05-13 12:35:17 +03003588 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003589 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003590 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003591
Avi Kivity937366242010-05-13 12:35:17 +03003592 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003593 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03003594 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003595 if (s390int_to_s390irq(&s390int, &s390irq))
3596 return -EINVAL;
3597 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03003598 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003599 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003600 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003601 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003602 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003603 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003604 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003605 case KVM_S390_SET_INITIAL_PSW: {
3606 psw_t psw;
3607
Avi Kivitybc923cc2010-05-13 12:21:46 +03003608 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003609 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003610 break;
3611 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3612 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003613 }
3614 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003615 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3616 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003617 case KVM_SET_ONE_REG:
3618 case KVM_GET_ONE_REG: {
3619 struct kvm_one_reg reg;
3620 r = -EFAULT;
3621 if (copy_from_user(&reg, argp, sizeof(reg)))
3622 break;
3623 if (ioctl == KVM_SET_ONE_REG)
3624 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3625 else
3626 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3627 break;
3628 }
Carsten Otte27e03932012-01-04 10:25:21 +01003629#ifdef CONFIG_KVM_S390_UCONTROL
3630 case KVM_S390_UCAS_MAP: {
3631 struct kvm_s390_ucas_mapping ucasmap;
3632
3633 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3634 r = -EFAULT;
3635 break;
3636 }
3637
3638 if (!kvm_is_ucontrol(vcpu->kvm)) {
3639 r = -EINVAL;
3640 break;
3641 }
3642
3643 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3644 ucasmap.vcpu_addr, ucasmap.length);
3645 break;
3646 }
3647 case KVM_S390_UCAS_UNMAP: {
3648 struct kvm_s390_ucas_mapping ucasmap;
3649
3650 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3651 r = -EFAULT;
3652 break;
3653 }
3654
3655 if (!kvm_is_ucontrol(vcpu->kvm)) {
3656 r = -EINVAL;
3657 break;
3658 }
3659
3660 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3661 ucasmap.length);
3662 break;
3663 }
3664#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003665 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003666 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003667 break;
3668 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003669 case KVM_ENABLE_CAP:
3670 {
3671 struct kvm_enable_cap cap;
3672 r = -EFAULT;
3673 if (copy_from_user(&cap, argp, sizeof(cap)))
3674 break;
3675 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3676 break;
3677 }
Thomas Huth41408c282015-02-06 15:01:21 +01003678 case KVM_S390_MEM_OP: {
3679 struct kvm_s390_mem_op mem_op;
3680
3681 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3682 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3683 else
3684 r = -EFAULT;
3685 break;
3686 }
Jens Freimann816c7662014-11-24 17:13:46 +01003687 case KVM_S390_SET_IRQ_STATE: {
3688 struct kvm_s390_irq_state irq_state;
3689
3690 r = -EFAULT;
3691 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3692 break;
3693 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3694 irq_state.len == 0 ||
3695 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3696 r = -EINVAL;
3697 break;
3698 }
3699 r = kvm_s390_set_irq_state(vcpu,
3700 (void __user *) irq_state.buf,
3701 irq_state.len);
3702 break;
3703 }
3704 case KVM_S390_GET_IRQ_STATE: {
3705 struct kvm_s390_irq_state irq_state;
3706
3707 r = -EFAULT;
3708 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3709 break;
3710 if (irq_state.len == 0) {
3711 r = -EINVAL;
3712 break;
3713 }
3714 r = kvm_s390_get_irq_state(vcpu,
3715 (__u8 __user *) irq_state.buf,
3716 irq_state.len);
3717 break;
3718 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003719 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003720 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003721 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003722 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003723}
3724
Carsten Otte5b1c1492012-01-04 10:25:23 +01003725int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3726{
3727#ifdef CONFIG_KVM_S390_UCONTROL
3728 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3729 && (kvm_is_ucontrol(vcpu->kvm))) {
3730 vmf->page = virt_to_page(vcpu->arch.sie_block);
3731 get_page(vmf->page);
3732 return 0;
3733 }
3734#endif
3735 return VM_FAULT_SIGBUS;
3736}
3737
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303738int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3739 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003740{
3741 return 0;
3742}
3743
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003744/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003745int kvm_arch_prepare_memory_region(struct kvm *kvm,
3746 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003747 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003748 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003749{
Nick Wangdd2887e2013-03-25 17:22:57 +01003750 /* A few sanity checks. We can have memory slots which have to be
3751 located/ended at a segment boundary (1MB). The memory in userland is
3752 ok to be fragmented into various different vmas. It is okay to mmap()
3753 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003754
Carsten Otte598841c2011-07-24 10:48:21 +02003755 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003756 return -EINVAL;
3757
Carsten Otte598841c2011-07-24 10:48:21 +02003758 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003759 return -EINVAL;
3760
Dominik Dingela3a92c32014-12-01 17:24:42 +01003761 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3762 return -EINVAL;
3763
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003764 return 0;
3765}
3766
3767void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003768 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003769 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003770 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003771 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003772{
Carsten Ottef7850c92011-07-24 10:48:23 +02003773 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003774
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003775 /* If the basics of the memslot do not change, we do not want
3776 * to update the gmap. Every update causes several unnecessary
3777 * segment translation exceptions. This is usually handled just
3778 * fine by the normal fault handler + gmap, but it will also
3779 * cause faults on the prefix page of running guest CPUs.
3780 */
3781 if (old->userspace_addr == mem->userspace_addr &&
3782 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3783 old->npages * PAGE_SIZE == mem->memory_size)
3784 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003785
3786 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3787 mem->guest_phys_addr, mem->memory_size);
3788 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003789 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003790 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003791}
3792
Alexander Yarygin60a37702016-04-01 15:38:57 +03003793static inline unsigned long nonhyp_mask(int i)
3794{
3795 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3796
3797 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3798}
3799
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003800void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3801{
3802 vcpu->valid_wakeup = false;
3803}
3804
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003805static int __init kvm_s390_init(void)
3806{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003807 int i;
3808
David Hildenbrand07197fd2015-01-30 16:01:38 +01003809 if (!sclp.has_sief2) {
3810 pr_info("SIE not available\n");
3811 return -ENODEV;
3812 }
3813
Alexander Yarygin60a37702016-04-01 15:38:57 +03003814 for (i = 0; i < 16; i++)
3815 kvm_s390_fac_list_mask[i] |=
3816 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3817
Michael Mueller9d8d5782015-02-02 15:42:51 +01003818 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003819}
3820
3821static void __exit kvm_s390_exit(void)
3822{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003823 kvm_exit();
3824}
3825
3826module_init(kvm_s390_init);
3827module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003828
3829/*
3830 * Enable autoloading of the kvm module.
3831 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3832 * since x86 takes a different approach.
3833 */
3834#include <linux/miscdevice.h>
3835MODULE_ALIAS_MISCDEV(KVM_MINOR);
3836MODULE_ALIAS("devname:kvm");