blob: e100a7ff35c7c568ec21f8a8b8642abda678be44 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Claudio Imbrenda4036e382016-08-04 17:58:47 +020034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010047#include "gaccess.h"
48
David Hildenbrandea2cdd22015-05-20 13:24:02 +020049#define KMSG_COMPONENT "kvm-s390"
50#undef pr_fmt
51#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
52
Cornelia Huck5786fff2012-07-23 17:20:29 +020053#define CREATE_TRACE_POINTS
54#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020055#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020056
Thomas Huth41408c282015-02-06 15:01:21 +010057#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010058#define LOCAL_IRQS 32
59#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010061
Heiko Carstensb0c632d2008-03-25 18:47:20 +010062#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
63
64struct kvm_stats_debugfs_item debugfs_entries[] = {
65 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020066 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010067 { "exit_validity", VCPU_STAT(exit_validity) },
68 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
69 { "exit_external_request", VCPU_STAT(exit_external_request) },
70 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010071 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030072 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
74 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020075 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010076 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020077 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020078 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010084 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020085 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
87 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
88 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
89 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
90 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
91 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
92 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020093 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010094 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
95 { "instruction_spx", VCPU_STAT(instruction_spx) },
96 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
97 { "instruction_stap", VCPU_STAT(instruction_stap) },
98 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010099 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100100 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
101 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200102 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100103 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
104 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200105 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200106 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200107 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100108 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100109 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200110 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200112 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
113 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100114 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200115 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
116 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500117 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100118 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
119 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
120 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200121 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
122 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
123 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100124 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100125 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200126 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200127 { "diagnose_258", VCPU_STAT(diagnose_258) },
128 { "diagnose_308", VCPU_STAT(diagnose_308) },
129 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130 { NULL }
131};
132
David Hildenbranda411edf2016-02-02 15:41:22 +0100133/* allow nested virtualization in KVM (if enabled by user space) */
134static int nested;
135module_param(nested, int, S_IRUGO);
136MODULE_PARM_DESC(nested, "Nested virtualization support");
137
Michael Mueller9d8d5782015-02-02 15:42:51 +0100138/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200139unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100140
Michael Mueller9d8d5782015-02-02 15:42:51 +0100141unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200142{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100143 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
144 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200145}
146
David Hildenbrand15c97052015-03-19 17:36:43 +0100147/* available cpu features supported by kvm */
148static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200149/* available subfunctions indicated via query / "test bit" */
150static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100151
Michael Mueller9d8d5782015-02-02 15:42:51 +0100152static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200153static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200154debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100155
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100156/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200157int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100158{
159 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200160 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161}
162
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100163static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
164 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200165
Fan Zhangfdf03652015-05-13 10:58:41 +0200166/*
167 * This callback is executed during stop_machine(). All CPUs are therefore
168 * temporarily stopped. In order not to change guest behavior, we have to
169 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
170 * so a CPU won't be stopped while calculating with the epoch.
171 */
172static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
173 void *v)
174{
175 struct kvm *kvm;
176 struct kvm_vcpu *vcpu;
177 int i;
178 unsigned long long *delta = v;
179
180 list_for_each_entry(kvm, &vm_list, vm_list) {
181 kvm->arch.epoch -= *delta;
182 kvm_for_each_vcpu(i, vcpu, kvm) {
183 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100184 if (vcpu->arch.cputm_enabled)
185 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100186 if (vcpu->arch.vsie_block)
187 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200188 }
189 }
190 return NOTIFY_OK;
191}
192
193static struct notifier_block kvm_clock_notifier = {
194 .notifier_call = kvm_clock_sync,
195};
196
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100197int kvm_arch_hardware_setup(void)
198{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200199 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100200 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200201 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
202 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200203 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100205 return 0;
206}
207
208void kvm_arch_hardware_unsetup(void)
209{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100210 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200211 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200212 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
213 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100214}
215
David Hildenbrand22be5a12016-01-21 13:22:54 +0100216static void allow_cpu_feat(unsigned long nr)
217{
218 set_bit_inv(nr, kvm_s390_available_cpu_feat);
219}
220
David Hildenbrand0a763c72016-05-18 16:03:47 +0200221static inline int plo_test_bit(unsigned char nr)
222{
223 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100224 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200225
226 asm volatile(
227 /* Parameter registers are ignored for "test bit" */
228 " plo 0,0,0,0(0)\n"
229 " ipm %0\n"
230 " srl %0,28\n"
231 : "=d" (cc)
232 : "d" (r0)
233 : "cc");
234 return cc == 0;
235}
236
David Hildenbrand22be5a12016-01-21 13:22:54 +0100237static void kvm_s390_cpu_feat_init(void)
238{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200239 int i;
240
241 for (i = 0; i < 256; ++i) {
242 if (plo_test_bit(i))
243 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
244 }
245
246 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400247 ptff(kvm_s390_available_subfunc.ptff,
248 sizeof(kvm_s390_available_subfunc.ptff),
249 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200250
251 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200252 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
253 kvm_s390_available_subfunc.kmac);
254 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
255 kvm_s390_available_subfunc.kmc);
256 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
257 kvm_s390_available_subfunc.km);
258 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
259 kvm_s390_available_subfunc.kimd);
260 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
261 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200262 }
263 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200264 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
265 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200266 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200267 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.kmctr);
269 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
270 kvm_s390_available_subfunc.kmf);
271 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
272 kvm_s390_available_subfunc.kmo);
273 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
274 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200275 }
276 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100277 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200278 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200279
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400280 if (test_facility(146)) /* MSA8 */
281 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
282 kvm_s390_available_subfunc.kma);
283
David Hildenbrand22be5a12016-01-21 13:22:54 +0100284 if (MACHINE_HAS_ESOP)
285 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200286 /*
287 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
288 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
289 */
290 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100291 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200292 return;
293 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100294 if (sclp.has_64bscao)
295 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100296 if (sclp.has_siif)
297 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100298 if (sclp.has_gpere)
299 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100300 if (sclp.has_gsls)
301 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100302 if (sclp.has_ib)
303 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100304 if (sclp.has_cei)
305 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100306 if (sclp.has_ibs)
307 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500308 if (sclp.has_kss)
309 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200310 /*
311 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
312 * all skey handling functions read/set the skey from the PGSTE
313 * instead of the real storage key.
314 *
315 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
316 * pages being detected as preserved although they are resident.
317 *
318 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
319 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
320 *
321 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
322 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
323 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
324 *
325 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
326 * cannot easily shadow the SCA because of the ipte lock.
327 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100328}
329
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330int kvm_arch_init(void *opaque)
331{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200332 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
333 if (!kvm_s390_dbf)
334 return -ENOMEM;
335
336 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
337 debug_unregister(kvm_s390_dbf);
338 return -ENOMEM;
339 }
340
David Hildenbrand22be5a12016-01-21 13:22:54 +0100341 kvm_s390_cpu_feat_init();
342
Cornelia Huck84877d92014-09-02 10:27:35 +0100343 /* Register floating interrupt controller interface. */
344 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100345}
346
Christian Borntraeger78f26132015-07-22 15:50:58 +0200347void kvm_arch_exit(void)
348{
349 debug_unregister(kvm_s390_dbf);
350}
351
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100352/* Section: device related */
353long kvm_arch_dev_ioctl(struct file *filp,
354 unsigned int ioctl, unsigned long arg)
355{
356 if (ioctl == KVM_S390_ENABLE_SIE)
357 return s390_enable_sie();
358 return -EINVAL;
359}
360
Alexander Graf784aa3d2014-07-14 18:27:35 +0200361int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100362{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100363 int r;
364
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200365 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100366 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200367 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100368 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100369#ifdef CONFIG_KVM_S390_UCONTROL
370 case KVM_CAP_S390_UCONTROL:
371#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200372 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100373 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200374 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100375 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100376 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100377 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200378 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200379 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200380 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200381 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200382 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100383 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100384 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200385 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100386 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400387 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100388 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200389 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200390 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100391 case KVM_CAP_S390_AIS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100392 r = 1;
393 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100394 case KVM_CAP_S390_MEM_OP:
395 r = MEM_OP_MAX_SIZE;
396 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200397 case KVM_CAP_NR_VCPUS:
398 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100399 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200400 if (!kvm_s390_use_sca_entries())
401 r = KVM_MAX_VCPUS;
402 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100403 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200404 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100405 case KVM_CAP_NR_MEMSLOTS:
406 r = KVM_USER_MEM_SLOTS;
407 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200408 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100409 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200410 break;
Eric Farman68c55752014-06-09 10:57:26 -0400411 case KVM_CAP_S390_VECTOR_REGISTERS:
412 r = MACHINE_HAS_VX;
413 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800414 case KVM_CAP_S390_RI:
415 r = test_facility(64);
416 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100417 case KVM_CAP_S390_GS:
418 r = test_facility(133);
419 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200420 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100421 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200422 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100423 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100424}
425
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400426static void kvm_s390_sync_dirty_log(struct kvm *kvm,
427 struct kvm_memory_slot *memslot)
428{
429 gfn_t cur_gfn, last_gfn;
430 unsigned long address;
431 struct gmap *gmap = kvm->arch.gmap;
432
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400433 /* Loop over all guest pages */
434 last_gfn = memslot->base_gfn + memslot->npages;
435 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
436 address = gfn_to_hva_memslot(memslot, cur_gfn);
437
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100438 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400439 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100440 if (fatal_signal_pending(current))
441 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100442 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400443 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400444}
445
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100446/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200447static void sca_del_vcpu(struct kvm_vcpu *vcpu);
448
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100449/*
450 * Get (and clear) the dirty memory log for a memory slot.
451 */
452int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
453 struct kvm_dirty_log *log)
454{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400455 int r;
456 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200457 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400458 struct kvm_memory_slot *memslot;
459 int is_dirty = 0;
460
Janosch Franke1e8a962017-02-02 16:39:31 +0100461 if (kvm_is_ucontrol(kvm))
462 return -EINVAL;
463
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400464 mutex_lock(&kvm->slots_lock);
465
466 r = -EINVAL;
467 if (log->slot >= KVM_USER_MEM_SLOTS)
468 goto out;
469
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200470 slots = kvm_memslots(kvm);
471 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400472 r = -ENOENT;
473 if (!memslot->dirty_bitmap)
474 goto out;
475
476 kvm_s390_sync_dirty_log(kvm, memslot);
477 r = kvm_get_dirty_log(kvm, log, &is_dirty);
478 if (r)
479 goto out;
480
481 /* Clear the dirty log */
482 if (is_dirty) {
483 n = kvm_dirty_bitmap_bytes(memslot);
484 memset(memslot->dirty_bitmap, 0, n);
485 }
486 r = 0;
487out:
488 mutex_unlock(&kvm->slots_lock);
489 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100490}
491
David Hildenbrand6502a342016-06-21 14:19:51 +0200492static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
493{
494 unsigned int i;
495 struct kvm_vcpu *vcpu;
496
497 kvm_for_each_vcpu(i, vcpu, kvm) {
498 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
499 }
500}
501
Cornelia Huckd938dc52013-10-23 18:26:34 +0200502static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
503{
504 int r;
505
506 if (cap->flags)
507 return -EINVAL;
508
509 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200510 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200511 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200512 kvm->arch.use_irqchip = 1;
513 r = 0;
514 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200515 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200516 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200517 kvm->arch.user_sigp = 1;
518 r = 0;
519 break;
Eric Farman68c55752014-06-09 10:57:26 -0400520 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100521 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200522 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100523 r = -EBUSY;
524 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100525 set_kvm_facility(kvm->arch.model.fac_mask, 129);
526 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200527 if (test_facility(134)) {
528 set_kvm_facility(kvm->arch.model.fac_mask, 134);
529 set_kvm_facility(kvm->arch.model.fac_list, 134);
530 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100531 if (test_facility(135)) {
532 set_kvm_facility(kvm->arch.model.fac_mask, 135);
533 set_kvm_facility(kvm->arch.model.fac_list, 135);
534 }
Michael Mueller18280d82015-03-16 16:05:41 +0100535 r = 0;
536 } else
537 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100538 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200539 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
540 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400541 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800542 case KVM_CAP_S390_RI:
543 r = -EINVAL;
544 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200545 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800546 r = -EBUSY;
547 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100548 set_kvm_facility(kvm->arch.model.fac_mask, 64);
549 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800550 r = 0;
551 }
552 mutex_unlock(&kvm->lock);
553 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
554 r ? "(not available)" : "(success)");
555 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100556 case KVM_CAP_S390_AIS:
557 mutex_lock(&kvm->lock);
558 if (kvm->created_vcpus) {
559 r = -EBUSY;
560 } else {
561 set_kvm_facility(kvm->arch.model.fac_mask, 72);
562 set_kvm_facility(kvm->arch.model.fac_list, 72);
563 kvm->arch.float_int.ais_enabled = 1;
564 r = 0;
565 }
566 mutex_unlock(&kvm->lock);
567 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
568 r ? "(not available)" : "(success)");
569 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100570 case KVM_CAP_S390_GS:
571 r = -EINVAL;
572 mutex_lock(&kvm->lock);
573 if (atomic_read(&kvm->online_vcpus)) {
574 r = -EBUSY;
575 } else if (test_facility(133)) {
576 set_kvm_facility(kvm->arch.model.fac_mask, 133);
577 set_kvm_facility(kvm->arch.model.fac_list, 133);
578 r = 0;
579 }
580 mutex_unlock(&kvm->lock);
581 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
582 r ? "(not available)" : "(success)");
583 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100584 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200585 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100586 kvm->arch.user_stsi = 1;
587 r = 0;
588 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200589 case KVM_CAP_S390_USER_INSTR0:
590 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
591 kvm->arch.user_instr0 = 1;
592 icpt_operexc_on_all_vcpus(kvm);
593 r = 0;
594 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200595 default:
596 r = -EINVAL;
597 break;
598 }
599 return r;
600}
601
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100602static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
603{
604 int ret;
605
606 switch (attr->attr) {
607 case KVM_S390_VM_MEM_LIMIT_SIZE:
608 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200609 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100610 kvm->arch.mem_limit);
611 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100612 ret = -EFAULT;
613 break;
614 default:
615 ret = -ENXIO;
616 break;
617 }
618 return ret;
619}
620
621static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200622{
623 int ret;
624 unsigned int idx;
625 switch (attr->attr) {
626 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100627 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100628 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200629 break;
630
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200631 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200632 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200633 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200634 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200635 kvm->arch.use_cmma = 1;
636 ret = 0;
637 }
638 mutex_unlock(&kvm->lock);
639 break;
640 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100641 ret = -ENXIO;
642 if (!sclp.has_cmma)
643 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200644 ret = -EINVAL;
645 if (!kvm->arch.use_cmma)
646 break;
647
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200648 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200649 mutex_lock(&kvm->lock);
650 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200651 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200652 srcu_read_unlock(&kvm->srcu, idx);
653 mutex_unlock(&kvm->lock);
654 ret = 0;
655 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100656 case KVM_S390_VM_MEM_LIMIT_SIZE: {
657 unsigned long new_limit;
658
659 if (kvm_is_ucontrol(kvm))
660 return -EINVAL;
661
662 if (get_user(new_limit, (u64 __user *)attr->addr))
663 return -EFAULT;
664
Dominik Dingela3a92c32014-12-01 17:24:42 +0100665 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
666 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100667 return -E2BIG;
668
Dominik Dingela3a92c32014-12-01 17:24:42 +0100669 if (!new_limit)
670 return -EINVAL;
671
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100672 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100673 if (new_limit != KVM_S390_NO_MEM_LIMIT)
674 new_limit -= 1;
675
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100676 ret = -EBUSY;
677 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200678 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100679 /* gmap_create will round the limit up */
680 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100681
682 if (!new) {
683 ret = -ENOMEM;
684 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100685 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100686 new->private = kvm;
687 kvm->arch.gmap = new;
688 ret = 0;
689 }
690 }
691 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100692 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
693 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
694 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100695 break;
696 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200697 default:
698 ret = -ENXIO;
699 break;
700 }
701 return ret;
702}
703
Tony Krowiaka374e892014-09-03 10:13:53 +0200704static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
705
706static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
707{
708 struct kvm_vcpu *vcpu;
709 int i;
710
Michael Mueller9d8d5782015-02-02 15:42:51 +0100711 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200712 return -EINVAL;
713
714 mutex_lock(&kvm->lock);
715 switch (attr->attr) {
716 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
717 get_random_bytes(
718 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
719 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
720 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200721 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200722 break;
723 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
724 get_random_bytes(
725 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
726 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
727 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200728 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200729 break;
730 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
731 kvm->arch.crypto.aes_kw = 0;
732 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
733 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200734 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200735 break;
736 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
737 kvm->arch.crypto.dea_kw = 0;
738 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
739 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200740 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200741 break;
742 default:
743 mutex_unlock(&kvm->lock);
744 return -ENXIO;
745 }
746
747 kvm_for_each_vcpu(i, vcpu, kvm) {
748 kvm_s390_vcpu_crypto_setup(vcpu);
749 exit_sie(vcpu);
750 }
751 mutex_unlock(&kvm->lock);
752 return 0;
753}
754
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200755static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
756{
757 int cx;
758 struct kvm_vcpu *vcpu;
759
760 kvm_for_each_vcpu(cx, vcpu, kvm)
761 kvm_s390_sync_request(req, vcpu);
762}
763
764/*
765 * Must be called with kvm->srcu held to avoid races on memslots, and with
766 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
767 */
768static int kvm_s390_vm_start_migration(struct kvm *kvm)
769{
770 struct kvm_s390_migration_state *mgs;
771 struct kvm_memory_slot *ms;
772 /* should be the only one */
773 struct kvm_memslots *slots;
774 unsigned long ram_pages;
775 int slotnr;
776
777 /* migration mode already enabled */
778 if (kvm->arch.migration_state)
779 return 0;
780
781 slots = kvm_memslots(kvm);
782 if (!slots || !slots->used_slots)
783 return -EINVAL;
784
785 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
786 if (!mgs)
787 return -ENOMEM;
788 kvm->arch.migration_state = mgs;
789
790 if (kvm->arch.use_cmma) {
791 /*
792 * Get the last slot. They should be sorted by base_gfn, so the
793 * last slot is also the one at the end of the address space.
794 * We have verified above that at least one slot is present.
795 */
796 ms = slots->memslots + slots->used_slots - 1;
797 /* round up so we only use full longs */
798 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
799 /* allocate enough bytes to store all the bits */
800 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
801 if (!mgs->pgste_bitmap) {
802 kfree(mgs);
803 kvm->arch.migration_state = NULL;
804 return -ENOMEM;
805 }
806
807 mgs->bitmap_size = ram_pages;
808 atomic64_set(&mgs->dirty_pages, ram_pages);
809 /* mark all the pages in active slots as dirty */
810 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
811 ms = slots->memslots + slotnr;
812 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
813 }
814
815 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
816 }
817 return 0;
818}
819
820/*
821 * Must be called with kvm->lock to avoid races with ourselves and
822 * kvm_s390_vm_start_migration.
823 */
824static int kvm_s390_vm_stop_migration(struct kvm *kvm)
825{
826 struct kvm_s390_migration_state *mgs;
827
828 /* migration mode already disabled */
829 if (!kvm->arch.migration_state)
830 return 0;
831 mgs = kvm->arch.migration_state;
832 kvm->arch.migration_state = NULL;
833
834 if (kvm->arch.use_cmma) {
835 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
836 vfree(mgs->pgste_bitmap);
837 }
838 kfree(mgs);
839 return 0;
840}
841
842static int kvm_s390_vm_set_migration(struct kvm *kvm,
843 struct kvm_device_attr *attr)
844{
845 int idx, res = -ENXIO;
846
847 mutex_lock(&kvm->lock);
848 switch (attr->attr) {
849 case KVM_S390_VM_MIGRATION_START:
850 idx = srcu_read_lock(&kvm->srcu);
851 res = kvm_s390_vm_start_migration(kvm);
852 srcu_read_unlock(&kvm->srcu, idx);
853 break;
854 case KVM_S390_VM_MIGRATION_STOP:
855 res = kvm_s390_vm_stop_migration(kvm);
856 break;
857 default:
858 break;
859 }
860 mutex_unlock(&kvm->lock);
861
862 return res;
863}
864
865static int kvm_s390_vm_get_migration(struct kvm *kvm,
866 struct kvm_device_attr *attr)
867{
868 u64 mig = (kvm->arch.migration_state != NULL);
869
870 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
871 return -ENXIO;
872
873 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
874 return -EFAULT;
875 return 0;
876}
877
Jason J. Herne72f25022014-11-25 09:46:02 -0500878static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
879{
880 u8 gtod_high;
881
882 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
883 sizeof(gtod_high)))
884 return -EFAULT;
885
886 if (gtod_high != 0)
887 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200888 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500889
890 return 0;
891}
892
893static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
894{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200895 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500896
897 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
898 return -EFAULT;
899
David Hildenbrand25ed1672015-05-12 09:49:14 +0200900 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200901 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500902 return 0;
903}
904
905static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
906{
907 int ret;
908
909 if (attr->flags)
910 return -EINVAL;
911
912 switch (attr->attr) {
913 case KVM_S390_VM_TOD_HIGH:
914 ret = kvm_s390_set_tod_high(kvm, attr);
915 break;
916 case KVM_S390_VM_TOD_LOW:
917 ret = kvm_s390_set_tod_low(kvm, attr);
918 break;
919 default:
920 ret = -ENXIO;
921 break;
922 }
923 return ret;
924}
925
926static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
927{
928 u8 gtod_high = 0;
929
930 if (copy_to_user((void __user *)attr->addr, &gtod_high,
931 sizeof(gtod_high)))
932 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200933 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500934
935 return 0;
936}
937
938static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
939{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200940 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500941
David Hildenbrand60417fc2015-09-29 16:20:36 +0200942 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500943 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
944 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200945 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500946
947 return 0;
948}
949
950static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
951{
952 int ret;
953
954 if (attr->flags)
955 return -EINVAL;
956
957 switch (attr->attr) {
958 case KVM_S390_VM_TOD_HIGH:
959 ret = kvm_s390_get_tod_high(kvm, attr);
960 break;
961 case KVM_S390_VM_TOD_LOW:
962 ret = kvm_s390_get_tod_low(kvm, attr);
963 break;
964 default:
965 ret = -ENXIO;
966 break;
967 }
968 return ret;
969}
970
Michael Mueller658b6ed2015-02-02 15:49:35 +0100971static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
972{
973 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200974 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100975 int ret = 0;
976
977 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200978 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +0100979 ret = -EBUSY;
980 goto out;
981 }
982 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
983 if (!proc) {
984 ret = -ENOMEM;
985 goto out;
986 }
987 if (!copy_from_user(proc, (void __user *)attr->addr,
988 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200989 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200990 lowest_ibc = sclp.ibc >> 16 & 0xfff;
991 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +0200992 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +0200993 if (proc->ibc > unblocked_ibc)
994 kvm->arch.model.ibc = unblocked_ibc;
995 else if (proc->ibc < lowest_ibc)
996 kvm->arch.model.ibc = lowest_ibc;
997 else
998 kvm->arch.model.ibc = proc->ibc;
999 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001000 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001001 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001002 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1003 kvm->arch.model.ibc,
1004 kvm->arch.model.cpuid);
1005 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1006 kvm->arch.model.fac_list[0],
1007 kvm->arch.model.fac_list[1],
1008 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001009 } else
1010 ret = -EFAULT;
1011 kfree(proc);
1012out:
1013 mutex_unlock(&kvm->lock);
1014 return ret;
1015}
1016
David Hildenbrand15c97052015-03-19 17:36:43 +01001017static int kvm_s390_set_processor_feat(struct kvm *kvm,
1018 struct kvm_device_attr *attr)
1019{
1020 struct kvm_s390_vm_cpu_feat data;
1021 int ret = -EBUSY;
1022
1023 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1024 return -EFAULT;
1025 if (!bitmap_subset((unsigned long *) data.feat,
1026 kvm_s390_available_cpu_feat,
1027 KVM_S390_VM_CPU_FEAT_NR_BITS))
1028 return -EINVAL;
1029
1030 mutex_lock(&kvm->lock);
1031 if (!atomic_read(&kvm->online_vcpus)) {
1032 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1033 KVM_S390_VM_CPU_FEAT_NR_BITS);
1034 ret = 0;
1035 }
1036 mutex_unlock(&kvm->lock);
1037 return ret;
1038}
1039
David Hildenbrand0a763c72016-05-18 16:03:47 +02001040static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1041 struct kvm_device_attr *attr)
1042{
1043 /*
1044 * Once supported by kernel + hw, we have to store the subfunctions
1045 * in kvm->arch and remember that user space configured them.
1046 */
1047 return -ENXIO;
1048}
1049
Michael Mueller658b6ed2015-02-02 15:49:35 +01001050static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1051{
1052 int ret = -ENXIO;
1053
1054 switch (attr->attr) {
1055 case KVM_S390_VM_CPU_PROCESSOR:
1056 ret = kvm_s390_set_processor(kvm, attr);
1057 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001058 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1059 ret = kvm_s390_set_processor_feat(kvm, attr);
1060 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001061 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1062 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1063 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001064 }
1065 return ret;
1066}
1067
1068static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1069{
1070 struct kvm_s390_vm_cpu_processor *proc;
1071 int ret = 0;
1072
1073 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1074 if (!proc) {
1075 ret = -ENOMEM;
1076 goto out;
1077 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001078 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001079 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001080 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1081 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001082 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1083 kvm->arch.model.ibc,
1084 kvm->arch.model.cpuid);
1085 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1086 kvm->arch.model.fac_list[0],
1087 kvm->arch.model.fac_list[1],
1088 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001089 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1090 ret = -EFAULT;
1091 kfree(proc);
1092out:
1093 return ret;
1094}
1095
1096static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1097{
1098 struct kvm_s390_vm_cpu_machine *mach;
1099 int ret = 0;
1100
1101 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1102 if (!mach) {
1103 ret = -ENOMEM;
1104 goto out;
1105 }
1106 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001107 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001108 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001109 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001110 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001111 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001112 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1113 kvm->arch.model.ibc,
1114 kvm->arch.model.cpuid);
1115 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1116 mach->fac_mask[0],
1117 mach->fac_mask[1],
1118 mach->fac_mask[2]);
1119 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1120 mach->fac_list[0],
1121 mach->fac_list[1],
1122 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001123 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1124 ret = -EFAULT;
1125 kfree(mach);
1126out:
1127 return ret;
1128}
1129
David Hildenbrand15c97052015-03-19 17:36:43 +01001130static int kvm_s390_get_processor_feat(struct kvm *kvm,
1131 struct kvm_device_attr *attr)
1132{
1133 struct kvm_s390_vm_cpu_feat data;
1134
1135 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1136 KVM_S390_VM_CPU_FEAT_NR_BITS);
1137 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1138 return -EFAULT;
1139 return 0;
1140}
1141
1142static int kvm_s390_get_machine_feat(struct kvm *kvm,
1143 struct kvm_device_attr *attr)
1144{
1145 struct kvm_s390_vm_cpu_feat data;
1146
1147 bitmap_copy((unsigned long *) data.feat,
1148 kvm_s390_available_cpu_feat,
1149 KVM_S390_VM_CPU_FEAT_NR_BITS);
1150 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1151 return -EFAULT;
1152 return 0;
1153}
1154
David Hildenbrand0a763c72016-05-18 16:03:47 +02001155static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1156 struct kvm_device_attr *attr)
1157{
1158 /*
1159 * Once we can actually configure subfunctions (kernel + hw support),
1160 * we have to check if they were already set by user space, if so copy
1161 * them from kvm->arch.
1162 */
1163 return -ENXIO;
1164}
1165
1166static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1167 struct kvm_device_attr *attr)
1168{
1169 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1170 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1171 return -EFAULT;
1172 return 0;
1173}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001174static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1175{
1176 int ret = -ENXIO;
1177
1178 switch (attr->attr) {
1179 case KVM_S390_VM_CPU_PROCESSOR:
1180 ret = kvm_s390_get_processor(kvm, attr);
1181 break;
1182 case KVM_S390_VM_CPU_MACHINE:
1183 ret = kvm_s390_get_machine(kvm, attr);
1184 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001185 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1186 ret = kvm_s390_get_processor_feat(kvm, attr);
1187 break;
1188 case KVM_S390_VM_CPU_MACHINE_FEAT:
1189 ret = kvm_s390_get_machine_feat(kvm, attr);
1190 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001191 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1192 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1193 break;
1194 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1195 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1196 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001197 }
1198 return ret;
1199}
1200
Dominik Dingelf2061652014-04-09 13:13:00 +02001201static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1202{
1203 int ret;
1204
1205 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001206 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001207 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001208 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001209 case KVM_S390_VM_TOD:
1210 ret = kvm_s390_set_tod(kvm, attr);
1211 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001212 case KVM_S390_VM_CPU_MODEL:
1213 ret = kvm_s390_set_cpu_model(kvm, attr);
1214 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001215 case KVM_S390_VM_CRYPTO:
1216 ret = kvm_s390_vm_set_crypto(kvm, attr);
1217 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001218 case KVM_S390_VM_MIGRATION:
1219 ret = kvm_s390_vm_set_migration(kvm, attr);
1220 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001221 default:
1222 ret = -ENXIO;
1223 break;
1224 }
1225
1226 return ret;
1227}
1228
1229static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1230{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001231 int ret;
1232
1233 switch (attr->group) {
1234 case KVM_S390_VM_MEM_CTRL:
1235 ret = kvm_s390_get_mem_control(kvm, attr);
1236 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001237 case KVM_S390_VM_TOD:
1238 ret = kvm_s390_get_tod(kvm, attr);
1239 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001240 case KVM_S390_VM_CPU_MODEL:
1241 ret = kvm_s390_get_cpu_model(kvm, attr);
1242 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001243 case KVM_S390_VM_MIGRATION:
1244 ret = kvm_s390_vm_get_migration(kvm, attr);
1245 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001246 default:
1247 ret = -ENXIO;
1248 break;
1249 }
1250
1251 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001252}
1253
1254static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1255{
1256 int ret;
1257
1258 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001259 case KVM_S390_VM_MEM_CTRL:
1260 switch (attr->attr) {
1261 case KVM_S390_VM_MEM_ENABLE_CMMA:
1262 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001263 ret = sclp.has_cmma ? 0 : -ENXIO;
1264 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001265 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001266 ret = 0;
1267 break;
1268 default:
1269 ret = -ENXIO;
1270 break;
1271 }
1272 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001273 case KVM_S390_VM_TOD:
1274 switch (attr->attr) {
1275 case KVM_S390_VM_TOD_LOW:
1276 case KVM_S390_VM_TOD_HIGH:
1277 ret = 0;
1278 break;
1279 default:
1280 ret = -ENXIO;
1281 break;
1282 }
1283 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001284 case KVM_S390_VM_CPU_MODEL:
1285 switch (attr->attr) {
1286 case KVM_S390_VM_CPU_PROCESSOR:
1287 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001288 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1289 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001290 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001291 ret = 0;
1292 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001293 /* configuring subfunctions is not supported yet */
1294 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001295 default:
1296 ret = -ENXIO;
1297 break;
1298 }
1299 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001300 case KVM_S390_VM_CRYPTO:
1301 switch (attr->attr) {
1302 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1303 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1304 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1305 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1306 ret = 0;
1307 break;
1308 default:
1309 ret = -ENXIO;
1310 break;
1311 }
1312 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001313 case KVM_S390_VM_MIGRATION:
1314 ret = 0;
1315 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001316 default:
1317 ret = -ENXIO;
1318 break;
1319 }
1320
1321 return ret;
1322}
1323
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001324static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1325{
1326 uint8_t *keys;
1327 uint64_t hva;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001328 int i, r = 0;
1329
1330 if (args->flags != 0)
1331 return -EINVAL;
1332
1333 /* Is this guest using storage keys? */
1334 if (!mm_use_skey(current->mm))
1335 return KVM_S390_GET_SKEYS_NONE;
1336
1337 /* Enforce sane limit on memory allocation */
1338 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1339 return -EINVAL;
1340
Michal Hocko752ade62017-05-08 15:57:27 -07001341 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001342 if (!keys)
1343 return -ENOMEM;
1344
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001345 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001346 for (i = 0; i < args->count; i++) {
1347 hva = gfn_to_hva(kvm, args->start_gfn + i);
1348 if (kvm_is_error_hva(hva)) {
1349 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001350 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001351 }
1352
David Hildenbrand154c8c12016-05-09 11:22:34 +02001353 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1354 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001355 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001356 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001357 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001358
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001359 if (!r) {
1360 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1361 sizeof(uint8_t) * args->count);
1362 if (r)
1363 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001364 }
1365
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001366 kvfree(keys);
1367 return r;
1368}
1369
1370static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1371{
1372 uint8_t *keys;
1373 uint64_t hva;
1374 int i, r = 0;
1375
1376 if (args->flags != 0)
1377 return -EINVAL;
1378
1379 /* Enforce sane limit on memory allocation */
1380 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1381 return -EINVAL;
1382
Michal Hocko752ade62017-05-08 15:57:27 -07001383 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001384 if (!keys)
1385 return -ENOMEM;
1386
1387 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1388 sizeof(uint8_t) * args->count);
1389 if (r) {
1390 r = -EFAULT;
1391 goto out;
1392 }
1393
1394 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001395 r = s390_enable_skey();
1396 if (r)
1397 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001398
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001399 down_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001400 for (i = 0; i < args->count; i++) {
1401 hva = gfn_to_hva(kvm, args->start_gfn + i);
1402 if (kvm_is_error_hva(hva)) {
1403 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001404 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001405 }
1406
1407 /* Lowest order bit is reserved */
1408 if (keys[i] & 0x01) {
1409 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001410 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001411 }
1412
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001413 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001414 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001415 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001416 }
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001417 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001418out:
1419 kvfree(keys);
1420 return r;
1421}
1422
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001423/*
1424 * Base address and length must be sent at the start of each block, therefore
1425 * it's cheaper to send some clean data, as long as it's less than the size of
1426 * two longs.
1427 */
1428#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1429/* for consistency */
1430#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1431
1432/*
1433 * This function searches for the next page with dirty CMMA attributes, and
1434 * saves the attributes in the buffer up to either the end of the buffer or
1435 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1436 * no trailing clean bytes are saved.
1437 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1438 * output buffer will indicate 0 as length.
1439 */
1440static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1441 struct kvm_s390_cmma_log *args)
1442{
1443 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1444 unsigned long bufsize, hva, pgstev, i, next, cur;
1445 int srcu_idx, peek, r = 0, rr;
1446 u8 *res;
1447
1448 cur = args->start_gfn;
1449 i = next = pgstev = 0;
1450
1451 if (unlikely(!kvm->arch.use_cmma))
1452 return -ENXIO;
1453 /* Invalid/unsupported flags were specified */
1454 if (args->flags & ~KVM_S390_CMMA_PEEK)
1455 return -EINVAL;
1456 /* Migration mode query, and we are not doing a migration */
1457 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1458 if (!peek && !s)
1459 return -EINVAL;
1460 /* CMMA is disabled or was not used, or the buffer has length zero */
1461 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
1462 if (!bufsize || !kvm->mm->context.use_cmma) {
1463 memset(args, 0, sizeof(*args));
1464 return 0;
1465 }
1466
1467 if (!peek) {
1468 /* We are not peeking, and there are no dirty pages */
1469 if (!atomic64_read(&s->dirty_pages)) {
1470 memset(args, 0, sizeof(*args));
1471 return 0;
1472 }
1473 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1474 args->start_gfn);
1475 if (cur >= s->bitmap_size) /* nothing found, loop back */
1476 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1477 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1478 memset(args, 0, sizeof(*args));
1479 return 0;
1480 }
1481 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1482 }
1483
1484 res = vmalloc(bufsize);
1485 if (!res)
1486 return -ENOMEM;
1487
1488 args->start_gfn = cur;
1489
1490 down_read(&kvm->mm->mmap_sem);
1491 srcu_idx = srcu_read_lock(&kvm->srcu);
1492 while (i < bufsize) {
1493 hva = gfn_to_hva(kvm, cur);
1494 if (kvm_is_error_hva(hva)) {
1495 r = -EFAULT;
1496 break;
1497 }
1498 /* decrement only if we actually flipped the bit to 0 */
1499 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1500 atomic64_dec(&s->dirty_pages);
1501 r = get_pgste(kvm->mm, hva, &pgstev);
1502 if (r < 0)
1503 pgstev = 0;
1504 /* save the value */
1505 res[i++] = (pgstev >> 24) & 0x3;
1506 /*
1507 * if the next bit is too far away, stop.
1508 * if we reached the previous "next", find the next one
1509 */
1510 if (!peek) {
1511 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1512 break;
1513 if (cur == next)
1514 next = find_next_bit(s->pgste_bitmap,
1515 s->bitmap_size, cur + 1);
1516 /* reached the end of the bitmap or of the buffer, stop */
1517 if ((next >= s->bitmap_size) ||
1518 (next >= args->start_gfn + bufsize))
1519 break;
1520 }
1521 cur++;
1522 }
1523 srcu_read_unlock(&kvm->srcu, srcu_idx);
1524 up_read(&kvm->mm->mmap_sem);
1525 args->count = i;
1526 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1527
1528 rr = copy_to_user((void __user *)args->values, res, args->count);
1529 if (rr)
1530 r = -EFAULT;
1531
1532 vfree(res);
1533 return r;
1534}
1535
1536/*
1537 * This function sets the CMMA attributes for the given pages. If the input
1538 * buffer has zero length, no action is taken, otherwise the attributes are
1539 * set and the mm->context.use_cmma flag is set.
1540 */
1541static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1542 const struct kvm_s390_cmma_log *args)
1543{
1544 unsigned long hva, mask, pgstev, i;
1545 uint8_t *bits;
1546 int srcu_idx, r = 0;
1547
1548 mask = args->mask;
1549
1550 if (!kvm->arch.use_cmma)
1551 return -ENXIO;
1552 /* invalid/unsupported flags */
1553 if (args->flags != 0)
1554 return -EINVAL;
1555 /* Enforce sane limit on memory allocation */
1556 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1557 return -EINVAL;
1558 /* Nothing to do */
1559 if (args->count == 0)
1560 return 0;
1561
1562 bits = vmalloc(sizeof(*bits) * args->count);
1563 if (!bits)
1564 return -ENOMEM;
1565
1566 r = copy_from_user(bits, (void __user *)args->values, args->count);
1567 if (r) {
1568 r = -EFAULT;
1569 goto out;
1570 }
1571
1572 down_read(&kvm->mm->mmap_sem);
1573 srcu_idx = srcu_read_lock(&kvm->srcu);
1574 for (i = 0; i < args->count; i++) {
1575 hva = gfn_to_hva(kvm, args->start_gfn + i);
1576 if (kvm_is_error_hva(hva)) {
1577 r = -EFAULT;
1578 break;
1579 }
1580
1581 pgstev = bits[i];
1582 pgstev = pgstev << 24;
1583 mask &= _PGSTE_GPS_USAGE_MASK;
1584 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1585 }
1586 srcu_read_unlock(&kvm->srcu, srcu_idx);
1587 up_read(&kvm->mm->mmap_sem);
1588
1589 if (!kvm->mm->context.use_cmma) {
1590 down_write(&kvm->mm->mmap_sem);
1591 kvm->mm->context.use_cmma = 1;
1592 up_write(&kvm->mm->mmap_sem);
1593 }
1594out:
1595 vfree(bits);
1596 return r;
1597}
1598
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001599long kvm_arch_vm_ioctl(struct file *filp,
1600 unsigned int ioctl, unsigned long arg)
1601{
1602 struct kvm *kvm = filp->private_data;
1603 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001604 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001605 int r;
1606
1607 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001608 case KVM_S390_INTERRUPT: {
1609 struct kvm_s390_interrupt s390int;
1610
1611 r = -EFAULT;
1612 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1613 break;
1614 r = kvm_s390_inject_vm(kvm, &s390int);
1615 break;
1616 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001617 case KVM_ENABLE_CAP: {
1618 struct kvm_enable_cap cap;
1619 r = -EFAULT;
1620 if (copy_from_user(&cap, argp, sizeof(cap)))
1621 break;
1622 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1623 break;
1624 }
Cornelia Huck84223592013-07-15 13:36:01 +02001625 case KVM_CREATE_IRQCHIP: {
1626 struct kvm_irq_routing_entry routing;
1627
1628 r = -EINVAL;
1629 if (kvm->arch.use_irqchip) {
1630 /* Set up dummy routing. */
1631 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001632 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001633 }
1634 break;
1635 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001636 case KVM_SET_DEVICE_ATTR: {
1637 r = -EFAULT;
1638 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1639 break;
1640 r = kvm_s390_vm_set_attr(kvm, &attr);
1641 break;
1642 }
1643 case KVM_GET_DEVICE_ATTR: {
1644 r = -EFAULT;
1645 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1646 break;
1647 r = kvm_s390_vm_get_attr(kvm, &attr);
1648 break;
1649 }
1650 case KVM_HAS_DEVICE_ATTR: {
1651 r = -EFAULT;
1652 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1653 break;
1654 r = kvm_s390_vm_has_attr(kvm, &attr);
1655 break;
1656 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001657 case KVM_S390_GET_SKEYS: {
1658 struct kvm_s390_skeys args;
1659
1660 r = -EFAULT;
1661 if (copy_from_user(&args, argp,
1662 sizeof(struct kvm_s390_skeys)))
1663 break;
1664 r = kvm_s390_get_skeys(kvm, &args);
1665 break;
1666 }
1667 case KVM_S390_SET_SKEYS: {
1668 struct kvm_s390_skeys args;
1669
1670 r = -EFAULT;
1671 if (copy_from_user(&args, argp,
1672 sizeof(struct kvm_s390_skeys)))
1673 break;
1674 r = kvm_s390_set_skeys(kvm, &args);
1675 break;
1676 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001677 case KVM_S390_GET_CMMA_BITS: {
1678 struct kvm_s390_cmma_log args;
1679
1680 r = -EFAULT;
1681 if (copy_from_user(&args, argp, sizeof(args)))
1682 break;
1683 r = kvm_s390_get_cmma_bits(kvm, &args);
1684 if (!r) {
1685 r = copy_to_user(argp, &args, sizeof(args));
1686 if (r)
1687 r = -EFAULT;
1688 }
1689 break;
1690 }
1691 case KVM_S390_SET_CMMA_BITS: {
1692 struct kvm_s390_cmma_log args;
1693
1694 r = -EFAULT;
1695 if (copy_from_user(&args, argp, sizeof(args)))
1696 break;
1697 r = kvm_s390_set_cmma_bits(kvm, &args);
1698 break;
1699 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001700 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001701 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001702 }
1703
1704 return r;
1705}
1706
Tony Krowiak45c9b472015-01-13 11:33:26 -05001707static int kvm_s390_query_ap_config(u8 *config)
1708{
1709 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001710 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001711
Christian Borntraeger86044c82015-02-26 13:53:47 +01001712 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001713 asm volatile(
1714 "lgr 0,%1\n"
1715 "lgr 2,%2\n"
1716 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001717 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001718 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001719 "1:\n"
1720 EX_TABLE(0b, 1b)
1721 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001722 : "r" (fcn_code), "r" (config)
1723 : "cc", "0", "2", "memory"
1724 );
1725
1726 return cc;
1727}
1728
1729static int kvm_s390_apxa_installed(void)
1730{
1731 u8 config[128];
1732 int cc;
1733
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001734 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001735 cc = kvm_s390_query_ap_config(config);
1736
1737 if (cc)
1738 pr_err("PQAP(QCI) failed with cc=%d", cc);
1739 else
1740 return config[0] & 0x40;
1741 }
1742
1743 return 0;
1744}
1745
1746static void kvm_s390_set_crycb_format(struct kvm *kvm)
1747{
1748 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1749
1750 if (kvm_s390_apxa_installed())
1751 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1752 else
1753 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1754}
1755
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001756static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001757{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001758 struct cpuid cpuid;
1759
1760 get_cpu_id(&cpuid);
1761 cpuid.version = 0xff;
1762 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001763}
1764
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001765static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001766{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001767 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001768 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001769
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001770 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001771 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001772
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001773 /* Enable AES/DEA protected key functions by default */
1774 kvm->arch.crypto.aes_kw = 1;
1775 kvm->arch.crypto.dea_kw = 1;
1776 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1777 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1778 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1779 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001780}
1781
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001782static void sca_dispose(struct kvm *kvm)
1783{
1784 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001785 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001786 else
1787 free_page((unsigned long)(kvm->arch.sca));
1788 kvm->arch.sca = NULL;
1789}
1790
Carsten Ottee08b9632012-01-04 10:25:20 +01001791int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001792{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001793 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001794 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001795 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001796 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001797
Carsten Ottee08b9632012-01-04 10:25:20 +01001798 rc = -EINVAL;
1799#ifdef CONFIG_KVM_S390_UCONTROL
1800 if (type & ~KVM_VM_S390_UCONTROL)
1801 goto out_err;
1802 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1803 goto out_err;
1804#else
1805 if (type)
1806 goto out_err;
1807#endif
1808
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001809 rc = s390_enable_sie();
1810 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001811 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001812
Carsten Otteb2904112011-10-18 12:27:13 +02001813 rc = -ENOMEM;
1814
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001815 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1816
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001817 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001818 if (!sclp.has_64bscao)
1819 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001820 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001821 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001822 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001823 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001824 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001825 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001826 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001827 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001828 kvm->arch.sca = (struct bsca_block *)
1829 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001830 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001831
1832 sprintf(debug_name, "kvm-%u", current->pid);
1833
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001834 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001835 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001836 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001837
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001838 kvm->arch.sie_page2 =
1839 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1840 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001841 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001842
Michael Muellerfb5bf932015-02-27 14:25:10 +01001843 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001844 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001845 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001846 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1847 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001848 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001849 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001850 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001851 }
1852
Michael Mueller981467c2015-02-24 13:51:04 +01001853 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001854 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1855 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001856 S390_ARCH_FAC_LIST_SIZE_BYTE);
1857
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001858 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1859 set_kvm_facility(kvm->arch.model.fac_list, 74);
1860
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001861 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001862 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001863
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001864 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001865
Fei Li51978392017-02-17 17:06:26 +08001866 mutex_init(&kvm->arch.float_int.ais_lock);
1867 kvm->arch.float_int.simm = 0;
1868 kvm->arch.float_int.nimm = 0;
1869 kvm->arch.float_int.ais_enabled = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001870 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001871 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1872 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001873 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001874 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001875
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001876 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001877 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001878
Carsten Ottee08b9632012-01-04 10:25:20 +01001879 if (type & KVM_VM_S390_UCONTROL) {
1880 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001881 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001882 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001883 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001884 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001885 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001886 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001887 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001888 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001889 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001890 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001891 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001892 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001893 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001894
1895 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001896 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001897 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001898
David Hildenbrand8ad35752014-03-14 11:00:21 +01001899 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001900 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001901 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001902
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001903 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001904out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001905 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001906 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001907 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001908 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001909 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001910}
1911
Luiz Capitulino235539b2016-09-07 14:47:23 -04001912bool kvm_arch_has_vcpu_debugfs(void)
1913{
1914 return false;
1915}
1916
1917int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1918{
1919 return 0;
1920}
1921
Christian Borntraegerd329c032008-11-26 14:50:27 +01001922void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1923{
1924 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001925 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001926 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001927 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001928 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001929 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001930
1931 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001932 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01001933
Dominik Dingele6db1d62015-05-07 15:41:57 +02001934 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001935 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001936 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001937
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001938 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001939 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001940}
1941
1942static void kvm_free_vcpus(struct kvm *kvm)
1943{
1944 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001945 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001946
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001947 kvm_for_each_vcpu(i, vcpu, kvm)
1948 kvm_arch_vcpu_destroy(vcpu);
1949
1950 mutex_lock(&kvm->lock);
1951 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1952 kvm->vcpus[i] = NULL;
1953
1954 atomic_set(&kvm->online_vcpus, 0);
1955 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001956}
1957
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001958void kvm_arch_destroy_vm(struct kvm *kvm)
1959{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001960 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001961 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001962 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001963 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001964 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001965 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001966 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001967 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001968 kvm_s390_vsie_destroy(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001969 if (kvm->arch.migration_state) {
1970 vfree(kvm->arch.migration_state->pgste_bitmap);
1971 kfree(kvm->arch.migration_state);
1972 }
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001973 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001974}
1975
1976/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001977static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1978{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001979 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001980 if (!vcpu->arch.gmap)
1981 return -ENOMEM;
1982 vcpu->arch.gmap->private = vcpu->kvm;
1983
1984 return 0;
1985}
1986
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001987static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1988{
David Hildenbranda6940672016-08-08 22:39:32 +02001989 if (!kvm_s390_use_sca_entries())
1990 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001991 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001992 if (vcpu->kvm->arch.use_esca) {
1993 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001994
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001995 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001996 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02001997 } else {
1998 struct bsca_block *sca = vcpu->kvm->arch.sca;
1999
2000 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002001 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002002 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002003 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002004}
2005
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002006static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002007{
David Hildenbranda6940672016-08-08 22:39:32 +02002008 if (!kvm_s390_use_sca_entries()) {
2009 struct bsca_block *sca = vcpu->kvm->arch.sca;
2010
2011 /* we still need the basic sca for the ipte control */
2012 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2013 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2014 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002015 read_lock(&vcpu->kvm->arch.sca_lock);
2016 if (vcpu->kvm->arch.use_esca) {
2017 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002018
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002019 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002020 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2021 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002022 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002023 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002024 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002025 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002026
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002027 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002028 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2029 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002030 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43baf2015-04-22 17:09:44 +02002031 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002032 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002033}
2034
2035/* Basic SCA to Extended SCA data copy routines */
2036static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2037{
2038 d->sda = s->sda;
2039 d->sigp_ctrl.c = s->sigp_ctrl.c;
2040 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2041}
2042
2043static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2044{
2045 int i;
2046
2047 d->ipte_control = s->ipte_control;
2048 d->mcn[0] = s->mcn;
2049 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2050 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2051}
2052
2053static int sca_switch_to_extended(struct kvm *kvm)
2054{
2055 struct bsca_block *old_sca = kvm->arch.sca;
2056 struct esca_block *new_sca;
2057 struct kvm_vcpu *vcpu;
2058 unsigned int vcpu_idx;
2059 u32 scaol, scaoh;
2060
2061 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2062 if (!new_sca)
2063 return -ENOMEM;
2064
2065 scaoh = (u32)((u64)(new_sca) >> 32);
2066 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2067
2068 kvm_s390_vcpu_block_all(kvm);
2069 write_lock(&kvm->arch.sca_lock);
2070
2071 sca_copy_b_to_e(new_sca, old_sca);
2072
2073 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2074 vcpu->arch.sie_block->scaoh = scaoh;
2075 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002076 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002077 }
2078 kvm->arch.sca = new_sca;
2079 kvm->arch.use_esca = 1;
2080
2081 write_unlock(&kvm->arch.sca_lock);
2082 kvm_s390_vcpu_unblock_all(kvm);
2083
2084 free_page((unsigned long)old_sca);
2085
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002086 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2087 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002088 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002089}
2090
2091static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2092{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002093 int rc;
2094
David Hildenbranda6940672016-08-08 22:39:32 +02002095 if (!kvm_s390_use_sca_entries()) {
2096 if (id < KVM_MAX_VCPUS)
2097 return true;
2098 return false;
2099 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002100 if (id < KVM_S390_BSCA_CPU_SLOTS)
2101 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002102 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002103 return false;
2104
2105 mutex_lock(&kvm->lock);
2106 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2107 mutex_unlock(&kvm->lock);
2108
2109 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002110}
2111
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002112int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2113{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002114 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2115 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002116 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2117 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002118 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002119 KVM_SYNC_CRS |
2120 KVM_SYNC_ARCH0 |
2121 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002122 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002123 if (test_kvm_facility(vcpu->kvm, 64))
2124 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002125 if (test_kvm_facility(vcpu->kvm, 133))
2126 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002127 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2128 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2129 */
2130 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002131 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002132 else
2133 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002134
2135 if (kvm_is_ucontrol(vcpu->kvm))
2136 return __kvm_ucontrol_vcpu_init(vcpu);
2137
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002138 return 0;
2139}
2140
David Hildenbranddb0758b2016-02-15 09:42:25 +01002141/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2142static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2143{
2144 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002145 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002146 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002147 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002148}
2149
2150/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2151static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2152{
2153 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002154 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002155 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2156 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002157 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002158}
2159
2160/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2161static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2162{
2163 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2164 vcpu->arch.cputm_enabled = true;
2165 __start_cpu_timer_accounting(vcpu);
2166}
2167
2168/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2169static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2170{
2171 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2172 __stop_cpu_timer_accounting(vcpu);
2173 vcpu->arch.cputm_enabled = false;
2174}
2175
2176static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2177{
2178 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2179 __enable_cpu_timer_accounting(vcpu);
2180 preempt_enable();
2181}
2182
2183static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2184{
2185 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2186 __disable_cpu_timer_accounting(vcpu);
2187 preempt_enable();
2188}
2189
David Hildenbrand4287f242016-02-15 09:40:12 +01002190/* set the cpu timer - may only be called from the VCPU thread itself */
2191void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2192{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002193 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002194 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002195 if (vcpu->arch.cputm_enabled)
2196 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002197 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002198 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002199 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002200}
2201
David Hildenbranddb0758b2016-02-15 09:42:25 +01002202/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002203__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2204{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002205 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002206 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002207
2208 if (unlikely(!vcpu->arch.cputm_enabled))
2209 return vcpu->arch.sie_block->cputm;
2210
David Hildenbrand9c23a132016-02-17 21:53:33 +01002211 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2212 do {
2213 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2214 /*
2215 * If the writer would ever execute a read in the critical
2216 * section, e.g. in irq context, we have a deadlock.
2217 */
2218 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2219 value = vcpu->arch.sie_block->cputm;
2220 /* if cputm_start is 0, accounting is being started/stopped */
2221 if (likely(vcpu->arch.cputm_start))
2222 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2223 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2224 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002225 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002226}
2227
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002228void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2229{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002230
David Hildenbrand37d9df92015-03-11 16:47:33 +01002231 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002232 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002233 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002234 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002235 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002236}
2237
2238void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2239{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002240 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002241 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002242 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002243 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002244 vcpu->arch.enabled_gmap = gmap_get_enabled();
2245 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002246
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002247}
2248
2249static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2250{
2251 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2252 vcpu->arch.sie_block->gpsw.mask = 0UL;
2253 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002254 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002255 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002256 vcpu->arch.sie_block->ckc = 0UL;
2257 vcpu->arch.sie_block->todpr = 0;
2258 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2259 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2260 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002261 /* make sure the new fpc will be lazily loaded */
2262 save_fpu_regs();
2263 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002264 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002265 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002266 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2267 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002268 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2269 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002270 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002271}
2272
Dominik Dingel31928aa2014-12-04 15:47:07 +01002273void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002274{
Jason J. Herne72f25022014-11-25 09:46:02 -05002275 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002276 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002277 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02002278 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002279 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002280 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002281 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002282 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002283 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002284 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2285 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002286 /* make vcpu_load load the right gmap on the first trigger */
2287 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002288}
2289
Tony Krowiak5102ee82014-06-27 14:46:01 -04002290static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2291{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002292 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002293 return;
2294
Tony Krowiaka374e892014-09-03 10:13:53 +02002295 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2296
2297 if (vcpu->kvm->arch.crypto.aes_kw)
2298 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2299 if (vcpu->kvm->arch.crypto.dea_kw)
2300 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2301
Tony Krowiak5102ee82014-06-27 14:46:01 -04002302 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2303}
2304
Dominik Dingelb31605c2014-03-25 13:47:11 +01002305void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2306{
2307 free_page(vcpu->arch.sie_block->cbrlo);
2308 vcpu->arch.sie_block->cbrlo = 0;
2309}
2310
2311int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2312{
2313 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2314 if (!vcpu->arch.sie_block->cbrlo)
2315 return -ENOMEM;
2316
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002317 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002318 return 0;
2319}
2320
Michael Mueller91520f12015-02-27 14:32:11 +01002321static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2322{
2323 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2324
Michael Mueller91520f12015-02-27 14:32:11 +01002325 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002326 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002327 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002328}
2329
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002330int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2331{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002332 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002333
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002334 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2335 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002336 CPUSTAT_STOPPED);
2337
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002338 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002339 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002340 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002341 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002342
Michael Mueller91520f12015-02-27 14:32:11 +01002343 kvm_s390_vcpu_setup_model(vcpu);
2344
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002345 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2346 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002347 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002348 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002349 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002350 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002351 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002352
David Hildenbrand873b4252016-04-04 15:53:47 +02002353 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002354 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002355 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002356 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2357 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002358 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002359 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002360 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002361 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002362 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002363 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002364 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002365 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002366 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002367 vcpu->arch.sie_block->eca |= ECA_VX;
2368 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002369 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002370 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2371 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002372 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002373
2374 if (sclp.has_kss)
2375 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2376 else
2377 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002378
Dominik Dingele6db1d62015-05-07 15:41:57 +02002379 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002380 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2381 if (rc)
2382 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002383 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01002384 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002385 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002386
Tony Krowiak5102ee82014-06-27 14:46:01 -04002387 kvm_s390_vcpu_crypto_setup(vcpu);
2388
Dominik Dingelb31605c2014-03-25 13:47:11 +01002389 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002390}
2391
2392struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2393 unsigned int id)
2394{
Carsten Otte4d475552011-10-18 12:27:12 +02002395 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002396 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002397 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002398
David Hildenbrand42158252015-10-12 12:57:22 +02002399 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002400 goto out;
2401
2402 rc = -ENOMEM;
2403
Michael Muellerb110fea2013-06-12 13:54:54 +02002404 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002405 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002406 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002407
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002408 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2409 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002410 goto out_free_cpu;
2411
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002412 vcpu->arch.sie_block = &sie_page->sie_block;
2413 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2414
David Hildenbrandefed1102015-04-16 12:32:41 +02002415 /* the real guest size will always be smaller than msl */
2416 vcpu->arch.sie_block->mso = 0;
2417 vcpu->arch.sie_block->msl = sclp.hamax;
2418
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002419 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002420 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002421 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002422 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002423 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002424 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002425
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002426 rc = kvm_vcpu_init(vcpu, kvm, id);
2427 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002428 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002429 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002430 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002431 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002432
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002433 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002434out_free_sie_block:
2435 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002436out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002437 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002438out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002439 return ERR_PTR(rc);
2440}
2441
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002442int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2443{
David Hildenbrand9a022062014-08-05 17:40:47 +02002444 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002445}
2446
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002447void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002448{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002449 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002450 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002451}
2452
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002453void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002454{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002455 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002456}
2457
Christian Borntraeger8e236542015-04-09 13:49:04 +02002458static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2459{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002460 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002461 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002462}
2463
2464static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2465{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002466 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002467}
2468
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002469/*
2470 * Kick a guest cpu out of SIE and wait until SIE is not running.
2471 * If the CPU is not running (e.g. waiting as idle) the function will
2472 * return immediately. */
2473void exit_sie(struct kvm_vcpu *vcpu)
2474{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002475 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002476 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2477 cpu_relax();
2478}
2479
Christian Borntraeger8e236542015-04-09 13:49:04 +02002480/* Kick a guest cpu out of SIE to process a request synchronously */
2481void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002482{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002483 kvm_make_request(req, vcpu);
2484 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002485}
2486
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002487static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2488 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002489{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002490 struct kvm *kvm = gmap->private;
2491 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002492 unsigned long prefix;
2493 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002494
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002495 if (gmap_is_shadow(gmap))
2496 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002497 if (start >= 1UL << 31)
2498 /* We are only interested in prefix pages */
2499 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002500 kvm_for_each_vcpu(i, vcpu, kvm) {
2501 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002502 prefix = kvm_s390_get_prefix(vcpu);
2503 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2504 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2505 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002506 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002507 }
2508 }
2509}
2510
Christoffer Dallb6d33832012-03-08 16:44:24 -05002511int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2512{
2513 /* kvm common code refers to this, but never calls it */
2514 BUG();
2515 return 0;
2516}
2517
Carsten Otte14eebd92012-05-15 14:15:26 +02002518static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2519 struct kvm_one_reg *reg)
2520{
2521 int r = -EINVAL;
2522
2523 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002524 case KVM_REG_S390_TODPR:
2525 r = put_user(vcpu->arch.sie_block->todpr,
2526 (u32 __user *)reg->addr);
2527 break;
2528 case KVM_REG_S390_EPOCHDIFF:
2529 r = put_user(vcpu->arch.sie_block->epoch,
2530 (u64 __user *)reg->addr);
2531 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002532 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002533 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002534 (u64 __user *)reg->addr);
2535 break;
2536 case KVM_REG_S390_CLOCK_COMP:
2537 r = put_user(vcpu->arch.sie_block->ckc,
2538 (u64 __user *)reg->addr);
2539 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002540 case KVM_REG_S390_PFTOKEN:
2541 r = put_user(vcpu->arch.pfault_token,
2542 (u64 __user *)reg->addr);
2543 break;
2544 case KVM_REG_S390_PFCOMPARE:
2545 r = put_user(vcpu->arch.pfault_compare,
2546 (u64 __user *)reg->addr);
2547 break;
2548 case KVM_REG_S390_PFSELECT:
2549 r = put_user(vcpu->arch.pfault_select,
2550 (u64 __user *)reg->addr);
2551 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002552 case KVM_REG_S390_PP:
2553 r = put_user(vcpu->arch.sie_block->pp,
2554 (u64 __user *)reg->addr);
2555 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002556 case KVM_REG_S390_GBEA:
2557 r = put_user(vcpu->arch.sie_block->gbea,
2558 (u64 __user *)reg->addr);
2559 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002560 default:
2561 break;
2562 }
2563
2564 return r;
2565}
2566
2567static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2568 struct kvm_one_reg *reg)
2569{
2570 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002571 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002572
2573 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002574 case KVM_REG_S390_TODPR:
2575 r = get_user(vcpu->arch.sie_block->todpr,
2576 (u32 __user *)reg->addr);
2577 break;
2578 case KVM_REG_S390_EPOCHDIFF:
2579 r = get_user(vcpu->arch.sie_block->epoch,
2580 (u64 __user *)reg->addr);
2581 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002582 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002583 r = get_user(val, (u64 __user *)reg->addr);
2584 if (!r)
2585 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002586 break;
2587 case KVM_REG_S390_CLOCK_COMP:
2588 r = get_user(vcpu->arch.sie_block->ckc,
2589 (u64 __user *)reg->addr);
2590 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002591 case KVM_REG_S390_PFTOKEN:
2592 r = get_user(vcpu->arch.pfault_token,
2593 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002594 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2595 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002596 break;
2597 case KVM_REG_S390_PFCOMPARE:
2598 r = get_user(vcpu->arch.pfault_compare,
2599 (u64 __user *)reg->addr);
2600 break;
2601 case KVM_REG_S390_PFSELECT:
2602 r = get_user(vcpu->arch.pfault_select,
2603 (u64 __user *)reg->addr);
2604 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002605 case KVM_REG_S390_PP:
2606 r = get_user(vcpu->arch.sie_block->pp,
2607 (u64 __user *)reg->addr);
2608 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002609 case KVM_REG_S390_GBEA:
2610 r = get_user(vcpu->arch.sie_block->gbea,
2611 (u64 __user *)reg->addr);
2612 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002613 default:
2614 break;
2615 }
2616
2617 return r;
2618}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002619
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002620static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2621{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002622 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002623 return 0;
2624}
2625
2626int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2627{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002628 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002629 return 0;
2630}
2631
2632int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2633{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002634 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002635 return 0;
2636}
2637
2638int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2639 struct kvm_sregs *sregs)
2640{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002641 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002642 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002643 return 0;
2644}
2645
2646int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2647 struct kvm_sregs *sregs)
2648{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002649 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002650 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002651 return 0;
2652}
2653
2654int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2655{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002656 if (test_fp_ctl(fpu->fpc))
2657 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002658 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002659 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002660 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2661 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002662 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002663 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002664 return 0;
2665}
2666
2667int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2668{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002669 /* make sure we have the latest values */
2670 save_fpu_regs();
2671 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002672 convert_vx_to_fp((freg_t *) fpu->fprs,
2673 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002674 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002675 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002676 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002677 return 0;
2678}
2679
2680static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2681{
2682 int rc = 0;
2683
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002684 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002685 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002686 else {
2687 vcpu->run->psw_mask = psw.mask;
2688 vcpu->run->psw_addr = psw.addr;
2689 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002690 return rc;
2691}
2692
2693int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2694 struct kvm_translation *tr)
2695{
2696 return -EINVAL; /* not implemented yet */
2697}
2698
David Hildenbrand27291e22014-01-23 12:26:52 +01002699#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2700 KVM_GUESTDBG_USE_HW_BP | \
2701 KVM_GUESTDBG_ENABLE)
2702
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002703int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2704 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002705{
David Hildenbrand27291e22014-01-23 12:26:52 +01002706 int rc = 0;
2707
2708 vcpu->guest_debug = 0;
2709 kvm_s390_clear_bp_data(vcpu);
2710
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002711 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002712 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002713 if (!sclp.has_gpere)
2714 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002715
2716 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2717 vcpu->guest_debug = dbg->control;
2718 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002719 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002720
2721 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2722 rc = kvm_s390_import_bp_data(vcpu, dbg);
2723 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002724 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002725 vcpu->arch.guestdbg.last_bp = 0;
2726 }
2727
2728 if (rc) {
2729 vcpu->guest_debug = 0;
2730 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002731 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002732 }
2733
2734 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002735}
2736
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002737int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2738 struct kvm_mp_state *mp_state)
2739{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002740 /* CHECK_STOP and LOAD are not supported yet */
2741 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2742 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002743}
2744
2745int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2746 struct kvm_mp_state *mp_state)
2747{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002748 int rc = 0;
2749
2750 /* user space knows about this interface - let it control the state */
2751 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2752
2753 switch (mp_state->mp_state) {
2754 case KVM_MP_STATE_STOPPED:
2755 kvm_s390_vcpu_stop(vcpu);
2756 break;
2757 case KVM_MP_STATE_OPERATING:
2758 kvm_s390_vcpu_start(vcpu);
2759 break;
2760 case KVM_MP_STATE_LOAD:
2761 case KVM_MP_STATE_CHECK_STOP:
2762 /* fall through - CHECK_STOP and LOAD are not supported yet */
2763 default:
2764 rc = -ENXIO;
2765 }
2766
2767 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002768}
2769
David Hildenbrand8ad35752014-03-14 11:00:21 +01002770static bool ibs_enabled(struct kvm_vcpu *vcpu)
2771{
2772 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2773}
2774
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002775static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2776{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002777retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002778 kvm_s390_vcpu_request_handled(vcpu);
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002779 if (!vcpu->requests)
2780 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002781 /*
2782 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002783 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002784 * This ensures that the ipte instruction for this request has
2785 * already finished. We might race against a second unmapper that
2786 * wants to set the blocking bit. Lets just retry the request loop.
2787 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002788 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002789 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002790 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2791 kvm_s390_get_prefix(vcpu),
2792 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002793 if (rc) {
2794 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002795 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002796 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002797 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002798 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002799
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002800 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2801 vcpu->arch.sie_block->ihcpu = 0xffff;
2802 goto retry;
2803 }
2804
David Hildenbrand8ad35752014-03-14 11:00:21 +01002805 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2806 if (!ibs_enabled(vcpu)) {
2807 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002808 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002809 &vcpu->arch.sie_block->cpuflags);
2810 }
2811 goto retry;
2812 }
2813
2814 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2815 if (ibs_enabled(vcpu)) {
2816 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002817 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002818 &vcpu->arch.sie_block->cpuflags);
2819 }
2820 goto retry;
2821 }
2822
David Hildenbrand6502a342016-06-21 14:19:51 +02002823 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2824 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2825 goto retry;
2826 }
2827
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002828 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
2829 /*
2830 * Disable CMMA virtualization; we will emulate the ESSA
2831 * instruction manually, in order to provide additional
2832 * functionalities needed for live migration.
2833 */
2834 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
2835 goto retry;
2836 }
2837
2838 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
2839 /*
2840 * Re-enable CMMA virtualization if CMMA is available and
2841 * was used.
2842 */
2843 if ((vcpu->kvm->arch.use_cmma) &&
2844 (vcpu->kvm->mm->context.use_cmma))
2845 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
2846 goto retry;
2847 }
2848
David Hildenbrand0759d062014-05-13 16:54:32 +02002849 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02002850 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02002851
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002852 return 0;
2853}
2854
David Hildenbrand25ed1672015-05-12 09:49:14 +02002855void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2856{
2857 struct kvm_vcpu *vcpu;
2858 int i;
2859
2860 mutex_lock(&kvm->lock);
2861 preempt_disable();
2862 kvm->arch.epoch = tod - get_tod_clock();
2863 kvm_s390_vcpu_block_all(kvm);
2864 kvm_for_each_vcpu(i, vcpu, kvm)
2865 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2866 kvm_s390_vcpu_unblock_all(kvm);
2867 preempt_enable();
2868 mutex_unlock(&kvm->lock);
2869}
2870
Thomas Huthfa576c52014-05-06 17:20:16 +02002871/**
2872 * kvm_arch_fault_in_page - fault-in guest page if necessary
2873 * @vcpu: The corresponding virtual cpu
2874 * @gpa: Guest physical address
2875 * @writable: Whether the page should be writable or not
2876 *
2877 * Make sure that a guest page has been faulted-in on the host.
2878 *
2879 * Return: Zero on success, negative error code otherwise.
2880 */
2881long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002882{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002883 return gmap_fault(vcpu->arch.gmap, gpa,
2884 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002885}
2886
Dominik Dingel3c038e62013-10-07 17:11:48 +02002887static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2888 unsigned long token)
2889{
2890 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002891 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002892
2893 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002894 irq.u.ext.ext_params2 = token;
2895 irq.type = KVM_S390_INT_PFAULT_INIT;
2896 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002897 } else {
2898 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002899 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002900 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2901 }
2902}
2903
2904void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2905 struct kvm_async_pf *work)
2906{
2907 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2908 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2909}
2910
2911void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2912 struct kvm_async_pf *work)
2913{
2914 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2915 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2916}
2917
2918void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2919 struct kvm_async_pf *work)
2920{
2921 /* s390 will always inject the page directly */
2922}
2923
2924bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2925{
2926 /*
2927 * s390 will always inject the page directly,
2928 * but we still want check_async_completion to cleanup
2929 */
2930 return true;
2931}
2932
2933static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2934{
2935 hva_t hva;
2936 struct kvm_arch_async_pf arch;
2937 int rc;
2938
2939 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2940 return 0;
2941 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2942 vcpu->arch.pfault_compare)
2943 return 0;
2944 if (psw_extint_disabled(vcpu))
2945 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002946 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002947 return 0;
2948 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2949 return 0;
2950 if (!vcpu->arch.gmap->pfault_enabled)
2951 return 0;
2952
Heiko Carstens81480cc2014-01-01 16:36:07 +01002953 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2954 hva += current->thread.gmap_addr & ~PAGE_MASK;
2955 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002956 return 0;
2957
2958 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2959 return rc;
2960}
2961
Thomas Huth3fb4c402013-09-12 10:33:43 +02002962static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002963{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002964 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002965
Dominik Dingel3c038e62013-10-07 17:11:48 +02002966 /*
2967 * On s390 notifications for arriving pages will be delivered directly
2968 * to the guest but the house keeping for completed pfaults is
2969 * handled outside the worker.
2970 */
2971 kvm_check_async_pf_completion(vcpu);
2972
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002973 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2974 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002975
2976 if (need_resched())
2977 schedule();
2978
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002979 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002980 s390_handle_mcck();
2981
Jens Freimann79395032014-04-17 10:10:30 +02002982 if (!kvm_is_ucontrol(vcpu->kvm)) {
2983 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2984 if (rc)
2985 return rc;
2986 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002987
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002988 rc = kvm_s390_handle_requests(vcpu);
2989 if (rc)
2990 return rc;
2991
David Hildenbrand27291e22014-01-23 12:26:52 +01002992 if (guestdbg_enabled(vcpu)) {
2993 kvm_s390_backup_guest_per_regs(vcpu);
2994 kvm_s390_patch_guest_per_regs(vcpu);
2995 }
2996
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002997 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02002998 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2999 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3000 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003001
Thomas Huth3fb4c402013-09-12 10:33:43 +02003002 return 0;
3003}
3004
Thomas Huth492d8642015-02-10 16:11:01 +01003005static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3006{
David Hildenbrand56317922016-01-12 17:37:58 +01003007 struct kvm_s390_pgm_info pgm_info = {
3008 .code = PGM_ADDRESSING,
3009 };
3010 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003011 int rc;
3012
3013 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3014 trace_kvm_s390_sie_fault(vcpu);
3015
3016 /*
3017 * We want to inject an addressing exception, which is defined as a
3018 * suppressing or terminating exception. However, since we came here
3019 * by a DAT access exception, the PSW still points to the faulting
3020 * instruction since DAT exceptions are nullifying. So we've got
3021 * to look up the current opcode to get the length of the instruction
3022 * to be able to forward the PSW.
3023 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003024 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003025 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003026 if (rc < 0) {
3027 return rc;
3028 } else if (rc) {
3029 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3030 * Forward by arbitrary ilc, injection will take care of
3031 * nullification if necessary.
3032 */
3033 pgm_info = vcpu->arch.pgm;
3034 ilen = 4;
3035 }
David Hildenbrand56317922016-01-12 17:37:58 +01003036 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3037 kvm_s390_forward_psw(vcpu, ilen);
3038 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003039}
3040
Thomas Huth3fb4c402013-09-12 10:33:43 +02003041static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3042{
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003043 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3044 vcpu->arch.sie_block->icptcode);
3045 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3046
David Hildenbrand27291e22014-01-23 12:26:52 +01003047 if (guestdbg_enabled(vcpu))
3048 kvm_s390_restore_guest_per_regs(vcpu);
3049
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003050 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3051 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003052
3053 if (vcpu->arch.sie_block->icptcode > 0) {
3054 int rc = kvm_handle_sie_intercept(vcpu);
3055
3056 if (rc != -EOPNOTSUPP)
3057 return rc;
3058 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3059 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3060 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3061 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3062 return -EREMOTE;
3063 } else if (exit_reason != -EFAULT) {
3064 vcpu->stat.exit_null++;
3065 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003066 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3067 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3068 vcpu->run->s390_ucontrol.trans_exc_code =
3069 current->thread.gmap_addr;
3070 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003071 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003072 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003073 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003074 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003075 if (kvm_arch_setup_async_pf(vcpu))
3076 return 0;
3077 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003078 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003079 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003080}
3081
3082static int __vcpu_run(struct kvm_vcpu *vcpu)
3083{
3084 int rc, exit_reason;
3085
Thomas Huth800c1062013-09-12 10:33:45 +02003086 /*
3087 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3088 * ning the guest), so that memslots (and other stuff) are protected
3089 */
3090 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3091
Thomas Hutha76ccff2013-09-12 10:33:44 +02003092 do {
3093 rc = vcpu_pre_run(vcpu);
3094 if (rc)
3095 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003096
Thomas Huth800c1062013-09-12 10:33:45 +02003097 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003098 /*
3099 * As PF_VCPU will be used in fault handler, between
3100 * guest_enter and guest_exit should be no uaccess.
3101 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003102 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003103 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003104 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003105 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003106 exit_reason = sie64a(vcpu->arch.sie_block,
3107 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003108 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003109 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003110 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003111 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003112 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003113
Thomas Hutha76ccff2013-09-12 10:33:44 +02003114 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003115 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003116
Thomas Huth800c1062013-09-12 10:33:45 +02003117 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003118 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003119}
3120
David Hildenbrandb028ee32014-07-17 10:47:43 +02003121static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3122{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003123 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003124 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003125
3126 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003127 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003128 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3129 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3130 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3131 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3132 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3133 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003134 /* some control register changes require a tlb flush */
3135 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003136 }
3137 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003138 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003139 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3140 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3141 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3142 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3143 }
3144 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3145 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3146 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3147 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003148 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3149 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003150 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003151 /*
3152 * If userspace sets the riccb (e.g. after migration) to a valid state,
3153 * we should enable RI here instead of doing the lazy enablement.
3154 */
3155 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003156 test_kvm_facility(vcpu->kvm, 64) &&
3157 riccb->valid &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003158 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003159 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003160 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003161 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003162 /*
3163 * If userspace sets the gscb (e.g. after migration) to non-zero,
3164 * we should enable GS here instead of doing the lazy enablement.
3165 */
3166 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3167 test_kvm_facility(vcpu->kvm, 133) &&
3168 gscb->gssm &&
3169 !vcpu->arch.gs_enabled) {
3170 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3171 vcpu->arch.sie_block->ecb |= ECB_GS;
3172 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3173 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003174 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003175 save_access_regs(vcpu->arch.host_acrs);
3176 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003177 /* save host (userspace) fprs/vrs */
3178 save_fpu_regs();
3179 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3180 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3181 if (MACHINE_HAS_VX)
3182 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3183 else
3184 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3185 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3186 if (test_fp_ctl(current->thread.fpu.fpc))
3187 /* User space provided an invalid FPC, let's clear it */
3188 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003189 if (MACHINE_HAS_GS) {
3190 preempt_disable();
3191 __ctl_set_bit(2, 4);
3192 if (current->thread.gs_cb) {
3193 vcpu->arch.host_gscb = current->thread.gs_cb;
3194 save_gs_cb(vcpu->arch.host_gscb);
3195 }
3196 if (vcpu->arch.gs_enabled) {
3197 current->thread.gs_cb = (struct gs_cb *)
3198 &vcpu->run->s.regs.gscb;
3199 restore_gs_cb(current->thread.gs_cb);
3200 }
3201 preempt_enable();
3202 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003203
David Hildenbrandb028ee32014-07-17 10:47:43 +02003204 kvm_run->kvm_dirty_regs = 0;
3205}
3206
3207static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3208{
3209 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3210 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3211 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3212 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003213 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003214 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3215 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3216 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3217 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3218 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3219 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3220 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003221 save_access_regs(vcpu->run->s.regs.acrs);
3222 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003223 /* Save guest register state */
3224 save_fpu_regs();
3225 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3226 /* Restore will be done lazily at return */
3227 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3228 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003229 if (MACHINE_HAS_GS) {
3230 __ctl_set_bit(2, 4);
3231 if (vcpu->arch.gs_enabled)
3232 save_gs_cb(current->thread.gs_cb);
3233 preempt_disable();
3234 current->thread.gs_cb = vcpu->arch.host_gscb;
3235 restore_gs_cb(vcpu->arch.host_gscb);
3236 preempt_enable();
3237 if (!vcpu->arch.host_gscb)
3238 __ctl_clear_bit(2, 4);
3239 vcpu->arch.host_gscb = NULL;
3240 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003241
David Hildenbrandb028ee32014-07-17 10:47:43 +02003242}
3243
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003244int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3245{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003246 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003247 sigset_t sigsaved;
3248
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003249 if (kvm_run->immediate_exit)
3250 return -EINTR;
3251
David Hildenbrand27291e22014-01-23 12:26:52 +01003252 if (guestdbg_exit_pending(vcpu)) {
3253 kvm_s390_prepare_debug_exit(vcpu);
3254 return 0;
3255 }
3256
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003257 if (vcpu->sigset_active)
3258 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3259
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003260 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3261 kvm_s390_vcpu_start(vcpu);
3262 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003263 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003264 vcpu->vcpu_id);
3265 return -EINVAL;
3266 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003267
David Hildenbrandb028ee32014-07-17 10:47:43 +02003268 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003269 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003270
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003271 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003272 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003273
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003274 if (signal_pending(current) && !rc) {
3275 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003276 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003277 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003278
David Hildenbrand27291e22014-01-23 12:26:52 +01003279 if (guestdbg_exit_pending(vcpu) && !rc) {
3280 kvm_s390_prepare_debug_exit(vcpu);
3281 rc = 0;
3282 }
3283
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003284 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003285 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003286 rc = 0;
3287 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003288
David Hildenbranddb0758b2016-02-15 09:42:25 +01003289 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003290 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003291
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003292 if (vcpu->sigset_active)
3293 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3294
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003295 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003296 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003297}
3298
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003299/*
3300 * store status at address
3301 * we use have two special cases:
3302 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3303 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3304 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003305int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003306{
Carsten Otte092670c2011-07-24 10:48:22 +02003307 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003308 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003309 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003310 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003311 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003312
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003313 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003314 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3315 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003316 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003317 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003318 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3319 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003320 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003321 gpa = px;
3322 } else
3323 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003324
3325 /* manually convert vector registers if necessary */
3326 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003327 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003328 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3329 fprs, 128);
3330 } else {
3331 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003332 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003333 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003334 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003335 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003336 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003337 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003338 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003339 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003340 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003341 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003342 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003343 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003344 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003345 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003346 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003347 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003348 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003349 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003350 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003351 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003352 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003353 &vcpu->arch.sie_block->gcr, 128);
3354 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003355}
3356
Thomas Huthe8798922013-11-06 15:46:33 +01003357int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3358{
3359 /*
3360 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003361 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003362 * it into the save area
3363 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003364 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003365 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003366 save_access_regs(vcpu->run->s.regs.acrs);
3367
3368 return kvm_s390_store_status_unloaded(vcpu, addr);
3369}
3370
David Hildenbrand8ad35752014-03-14 11:00:21 +01003371static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3372{
3373 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003374 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003375}
3376
3377static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3378{
3379 unsigned int i;
3380 struct kvm_vcpu *vcpu;
3381
3382 kvm_for_each_vcpu(i, vcpu, kvm) {
3383 __disable_ibs_on_vcpu(vcpu);
3384 }
3385}
3386
3387static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3388{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003389 if (!sclp.has_ibs)
3390 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003391 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003392 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003393}
3394
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003395void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3396{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003397 int i, online_vcpus, started_vcpus = 0;
3398
3399 if (!is_vcpu_stopped(vcpu))
3400 return;
3401
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003402 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003403 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003404 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003405 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3406
3407 for (i = 0; i < online_vcpus; i++) {
3408 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3409 started_vcpus++;
3410 }
3411
3412 if (started_vcpus == 0) {
3413 /* we're the only active VCPU -> speed it up */
3414 __enable_ibs_on_vcpu(vcpu);
3415 } else if (started_vcpus == 1) {
3416 /*
3417 * As we are starting a second VCPU, we have to disable
3418 * the IBS facility on all VCPUs to remove potentially
3419 * oustanding ENABLE requests.
3420 */
3421 __disable_ibs_on_all_vcpus(vcpu->kvm);
3422 }
3423
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003424 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003425 /*
3426 * Another VCPU might have used IBS while we were offline.
3427 * Let's play safe and flush the VCPU at startup.
3428 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003429 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003430 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003431 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003432}
3433
3434void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3435{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003436 int i, online_vcpus, started_vcpus = 0;
3437 struct kvm_vcpu *started_vcpu = NULL;
3438
3439 if (is_vcpu_stopped(vcpu))
3440 return;
3441
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003442 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003443 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003444 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003445 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3446
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003447 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003448 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003449
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003450 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003451 __disable_ibs_on_vcpu(vcpu);
3452
3453 for (i = 0; i < online_vcpus; i++) {
3454 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3455 started_vcpus++;
3456 started_vcpu = vcpu->kvm->vcpus[i];
3457 }
3458 }
3459
3460 if (started_vcpus == 1) {
3461 /*
3462 * As we only have one VCPU left, we want to enable the
3463 * IBS facility for that VCPU to speed it up.
3464 */
3465 __enable_ibs_on_vcpu(started_vcpu);
3466 }
3467
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003468 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003469 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003470}
3471
Cornelia Huckd6712df2012-12-20 15:32:11 +01003472static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3473 struct kvm_enable_cap *cap)
3474{
3475 int r;
3476
3477 if (cap->flags)
3478 return -EINVAL;
3479
3480 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003481 case KVM_CAP_S390_CSS_SUPPORT:
3482 if (!vcpu->kvm->arch.css_support) {
3483 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003484 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003485 trace_kvm_s390_enable_css(vcpu->kvm);
3486 }
3487 r = 0;
3488 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003489 default:
3490 r = -EINVAL;
3491 break;
3492 }
3493 return r;
3494}
3495
Thomas Huth41408c282015-02-06 15:01:21 +01003496static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3497 struct kvm_s390_mem_op *mop)
3498{
3499 void __user *uaddr = (void __user *)mop->buf;
3500 void *tmpbuf = NULL;
3501 int r, srcu_idx;
3502 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3503 | KVM_S390_MEMOP_F_CHECK_ONLY;
3504
3505 if (mop->flags & ~supported_flags)
3506 return -EINVAL;
3507
3508 if (mop->size > MEM_OP_MAX_SIZE)
3509 return -E2BIG;
3510
3511 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3512 tmpbuf = vmalloc(mop->size);
3513 if (!tmpbuf)
3514 return -ENOMEM;
3515 }
3516
3517 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3518
3519 switch (mop->op) {
3520 case KVM_S390_MEMOP_LOGICAL_READ:
3521 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003522 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3523 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003524 break;
3525 }
3526 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3527 if (r == 0) {
3528 if (copy_to_user(uaddr, tmpbuf, mop->size))
3529 r = -EFAULT;
3530 }
3531 break;
3532 case KVM_S390_MEMOP_LOGICAL_WRITE:
3533 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003534 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3535 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003536 break;
3537 }
3538 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3539 r = -EFAULT;
3540 break;
3541 }
3542 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3543 break;
3544 default:
3545 r = -EINVAL;
3546 }
3547
3548 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3549
3550 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3551 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3552
3553 vfree(tmpbuf);
3554 return r;
3555}
3556
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003557long kvm_arch_vcpu_ioctl(struct file *filp,
3558 unsigned int ioctl, unsigned long arg)
3559{
3560 struct kvm_vcpu *vcpu = filp->private_data;
3561 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003562 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003563 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003564
Avi Kivity937366242010-05-13 12:35:17 +03003565 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003566 case KVM_S390_IRQ: {
3567 struct kvm_s390_irq s390irq;
3568
3569 r = -EFAULT;
3570 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3571 break;
3572 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3573 break;
3574 }
Avi Kivity937366242010-05-13 12:35:17 +03003575 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003576 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003577 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003578
Avi Kivity937366242010-05-13 12:35:17 +03003579 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003580 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03003581 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003582 if (s390int_to_s390irq(&s390int, &s390irq))
3583 return -EINVAL;
3584 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03003585 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003586 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003587 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003588 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003589 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003590 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003591 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003592 case KVM_S390_SET_INITIAL_PSW: {
3593 psw_t psw;
3594
Avi Kivitybc923cc2010-05-13 12:21:46 +03003595 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003596 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003597 break;
3598 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3599 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003600 }
3601 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003602 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3603 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003604 case KVM_SET_ONE_REG:
3605 case KVM_GET_ONE_REG: {
3606 struct kvm_one_reg reg;
3607 r = -EFAULT;
3608 if (copy_from_user(&reg, argp, sizeof(reg)))
3609 break;
3610 if (ioctl == KVM_SET_ONE_REG)
3611 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3612 else
3613 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3614 break;
3615 }
Carsten Otte27e03932012-01-04 10:25:21 +01003616#ifdef CONFIG_KVM_S390_UCONTROL
3617 case KVM_S390_UCAS_MAP: {
3618 struct kvm_s390_ucas_mapping ucasmap;
3619
3620 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3621 r = -EFAULT;
3622 break;
3623 }
3624
3625 if (!kvm_is_ucontrol(vcpu->kvm)) {
3626 r = -EINVAL;
3627 break;
3628 }
3629
3630 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3631 ucasmap.vcpu_addr, ucasmap.length);
3632 break;
3633 }
3634 case KVM_S390_UCAS_UNMAP: {
3635 struct kvm_s390_ucas_mapping ucasmap;
3636
3637 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3638 r = -EFAULT;
3639 break;
3640 }
3641
3642 if (!kvm_is_ucontrol(vcpu->kvm)) {
3643 r = -EINVAL;
3644 break;
3645 }
3646
3647 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3648 ucasmap.length);
3649 break;
3650 }
3651#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003652 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003653 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003654 break;
3655 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003656 case KVM_ENABLE_CAP:
3657 {
3658 struct kvm_enable_cap cap;
3659 r = -EFAULT;
3660 if (copy_from_user(&cap, argp, sizeof(cap)))
3661 break;
3662 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3663 break;
3664 }
Thomas Huth41408c282015-02-06 15:01:21 +01003665 case KVM_S390_MEM_OP: {
3666 struct kvm_s390_mem_op mem_op;
3667
3668 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3669 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3670 else
3671 r = -EFAULT;
3672 break;
3673 }
Jens Freimann816c7662014-11-24 17:13:46 +01003674 case KVM_S390_SET_IRQ_STATE: {
3675 struct kvm_s390_irq_state irq_state;
3676
3677 r = -EFAULT;
3678 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3679 break;
3680 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3681 irq_state.len == 0 ||
3682 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3683 r = -EINVAL;
3684 break;
3685 }
3686 r = kvm_s390_set_irq_state(vcpu,
3687 (void __user *) irq_state.buf,
3688 irq_state.len);
3689 break;
3690 }
3691 case KVM_S390_GET_IRQ_STATE: {
3692 struct kvm_s390_irq_state irq_state;
3693
3694 r = -EFAULT;
3695 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3696 break;
3697 if (irq_state.len == 0) {
3698 r = -EINVAL;
3699 break;
3700 }
3701 r = kvm_s390_get_irq_state(vcpu,
3702 (__u8 __user *) irq_state.buf,
3703 irq_state.len);
3704 break;
3705 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003706 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003707 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003708 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003709 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003710}
3711
Carsten Otte5b1c1492012-01-04 10:25:23 +01003712int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3713{
3714#ifdef CONFIG_KVM_S390_UCONTROL
3715 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3716 && (kvm_is_ucontrol(vcpu->kvm))) {
3717 vmf->page = virt_to_page(vcpu->arch.sie_block);
3718 get_page(vmf->page);
3719 return 0;
3720 }
3721#endif
3722 return VM_FAULT_SIGBUS;
3723}
3724
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303725int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3726 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003727{
3728 return 0;
3729}
3730
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003731/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003732int kvm_arch_prepare_memory_region(struct kvm *kvm,
3733 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003734 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003735 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003736{
Nick Wangdd2887e2013-03-25 17:22:57 +01003737 /* A few sanity checks. We can have memory slots which have to be
3738 located/ended at a segment boundary (1MB). The memory in userland is
3739 ok to be fragmented into various different vmas. It is okay to mmap()
3740 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003741
Carsten Otte598841c2011-07-24 10:48:21 +02003742 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003743 return -EINVAL;
3744
Carsten Otte598841c2011-07-24 10:48:21 +02003745 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003746 return -EINVAL;
3747
Dominik Dingela3a92c32014-12-01 17:24:42 +01003748 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3749 return -EINVAL;
3750
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003751 return 0;
3752}
3753
3754void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003755 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003756 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003757 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003758 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003759{
Carsten Ottef7850c92011-07-24 10:48:23 +02003760 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003761
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003762 /* If the basics of the memslot do not change, we do not want
3763 * to update the gmap. Every update causes several unnecessary
3764 * segment translation exceptions. This is usually handled just
3765 * fine by the normal fault handler + gmap, but it will also
3766 * cause faults on the prefix page of running guest CPUs.
3767 */
3768 if (old->userspace_addr == mem->userspace_addr &&
3769 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3770 old->npages * PAGE_SIZE == mem->memory_size)
3771 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003772
3773 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3774 mem->guest_phys_addr, mem->memory_size);
3775 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003776 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003777 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003778}
3779
Alexander Yarygin60a37702016-04-01 15:38:57 +03003780static inline unsigned long nonhyp_mask(int i)
3781{
3782 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3783
3784 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3785}
3786
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003787void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3788{
3789 vcpu->valid_wakeup = false;
3790}
3791
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003792static int __init kvm_s390_init(void)
3793{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003794 int i;
3795
David Hildenbrand07197fd2015-01-30 16:01:38 +01003796 if (!sclp.has_sief2) {
3797 pr_info("SIE not available\n");
3798 return -ENODEV;
3799 }
3800
Alexander Yarygin60a37702016-04-01 15:38:57 +03003801 for (i = 0; i < 16; i++)
3802 kvm_s390_fac_list_mask[i] |=
3803 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3804
Michael Mueller9d8d5782015-02-02 15:42:51 +01003805 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003806}
3807
3808static void __exit kvm_s390_exit(void)
3809{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003810 kvm_exit();
3811}
3812
3813module_init(kvm_s390_init);
3814module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003815
3816/*
3817 * Enable autoloading of the kvm module.
3818 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3819 * since x86 takes a different approach.
3820 */
3821#include <linux/miscdevice.h>
3822MODULE_ALIAS_MISCDEV(KVM_MINOR);
3823MODULE_ALIAS("devname:kvm");