blob: a8fe3ab76d68f0c1076541e4728ecf4605e4d2cf [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010028#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010029#include <asm/lowcore.h>
30#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010031#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010032#include <asm/switch_to.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020033#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include "gaccess.h"
36
Cornelia Huck5786fff2012-07-23 17:20:29 +020037#define CREATE_TRACE_POINTS
38#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020039#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020040
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020045 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010050 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010053 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020054 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020055 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010056 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010057 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020060 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010061 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020068 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010069 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
70 { "instruction_spx", VCPU_STAT(instruction_spx) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
72 { "instruction_stap", VCPU_STAT(instruction_stap) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010074 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010075 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020077 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010078 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020080 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010081 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010082 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020083 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010084 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020085 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010087 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020088 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010090 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020093 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010096 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010097 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020098 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010099 { NULL }
100};
101
Michael Mueller9d8d5782015-02-02 15:42:51 +0100102/* upper facilities limit for kvm */
103unsigned long kvm_s390_fac_list_mask[] = {
104 0xff82fffbf4fc2000UL,
105 0x005c000000000000UL,
106};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100107
Michael Mueller9d8d5782015-02-02 15:42:51 +0100108unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200109{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100110 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
111 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200112}
113
Michael Mueller9d8d5782015-02-02 15:42:51 +0100114static struct gmap_notifier gmap_notifier;
115
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100116/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200117int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100118{
119 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200120 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121}
122
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200123static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
124
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100125int kvm_arch_hardware_setup(void)
126{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200127 gmap_notifier.notifier_call = kvm_gmap_notifier;
128 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100129 return 0;
130}
131
132void kvm_arch_hardware_unsetup(void)
133{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200134 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100135}
136
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100137int kvm_arch_init(void *opaque)
138{
Cornelia Huck84877d92014-09-02 10:27:35 +0100139 /* Register floating interrupt controller interface. */
140 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100141}
142
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100143/* Section: device related */
144long kvm_arch_dev_ioctl(struct file *filp,
145 unsigned int ioctl, unsigned long arg)
146{
147 if (ioctl == KVM_S390_ENABLE_SIE)
148 return s390_enable_sie();
149 return -EINVAL;
150}
151
Alexander Graf784aa3d2014-07-14 18:27:35 +0200152int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100153{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100154 int r;
155
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200156 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100157 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200158 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100159 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100160#ifdef CONFIG_KVM_S390_UCONTROL
161 case KVM_CAP_S390_UCONTROL:
162#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200163 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100164 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200165 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100166 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100167 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200168 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100169 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200170 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200171 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200172 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200173 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200174 case KVM_CAP_MP_STATE:
David Hildenbrand2444b352014-10-09 14:10:13 +0200175 case KVM_CAP_S390_USER_SIGP:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100176 r = 1;
177 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200178 case KVM_CAP_NR_VCPUS:
179 case KVM_CAP_MAX_VCPUS:
180 r = KVM_MAX_VCPUS;
181 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100182 case KVM_CAP_NR_MEMSLOTS:
183 r = KVM_USER_MEM_SLOTS;
184 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200185 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100186 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200187 break;
Eric Farman68c55752014-06-09 10:57:26 -0400188 case KVM_CAP_S390_VECTOR_REGISTERS:
189 r = MACHINE_HAS_VX;
190 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200191 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100192 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200193 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100194 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100195}
196
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400197static void kvm_s390_sync_dirty_log(struct kvm *kvm,
198 struct kvm_memory_slot *memslot)
199{
200 gfn_t cur_gfn, last_gfn;
201 unsigned long address;
202 struct gmap *gmap = kvm->arch.gmap;
203
204 down_read(&gmap->mm->mmap_sem);
205 /* Loop over all guest pages */
206 last_gfn = memslot->base_gfn + memslot->npages;
207 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
208 address = gfn_to_hva_memslot(memslot, cur_gfn);
209
210 if (gmap_test_and_clear_dirty(address, gmap))
211 mark_page_dirty(kvm, cur_gfn);
212 }
213 up_read(&gmap->mm->mmap_sem);
214}
215
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100216/* Section: vm related */
217/*
218 * Get (and clear) the dirty memory log for a memory slot.
219 */
220int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
221 struct kvm_dirty_log *log)
222{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400223 int r;
224 unsigned long n;
225 struct kvm_memory_slot *memslot;
226 int is_dirty = 0;
227
228 mutex_lock(&kvm->slots_lock);
229
230 r = -EINVAL;
231 if (log->slot >= KVM_USER_MEM_SLOTS)
232 goto out;
233
234 memslot = id_to_memslot(kvm->memslots, log->slot);
235 r = -ENOENT;
236 if (!memslot->dirty_bitmap)
237 goto out;
238
239 kvm_s390_sync_dirty_log(kvm, memslot);
240 r = kvm_get_dirty_log(kvm, log, &is_dirty);
241 if (r)
242 goto out;
243
244 /* Clear the dirty log */
245 if (is_dirty) {
246 n = kvm_dirty_bitmap_bytes(memslot);
247 memset(memslot->dirty_bitmap, 0, n);
248 }
249 r = 0;
250out:
251 mutex_unlock(&kvm->slots_lock);
252 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100253}
254
Cornelia Huckd938dc52013-10-23 18:26:34 +0200255static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
256{
257 int r;
258
259 if (cap->flags)
260 return -EINVAL;
261
262 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200263 case KVM_CAP_S390_IRQCHIP:
264 kvm->arch.use_irqchip = 1;
265 r = 0;
266 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200267 case KVM_CAP_S390_USER_SIGP:
268 kvm->arch.user_sigp = 1;
269 r = 0;
270 break;
Eric Farman68c55752014-06-09 10:57:26 -0400271 case KVM_CAP_S390_VECTOR_REGISTERS:
272 kvm->arch.use_vectors = MACHINE_HAS_VX;
273 r = MACHINE_HAS_VX ? 0 : -EINVAL;
274 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200275 default:
276 r = -EINVAL;
277 break;
278 }
279 return r;
280}
281
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100282static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
283{
284 int ret;
285
286 switch (attr->attr) {
287 case KVM_S390_VM_MEM_LIMIT_SIZE:
288 ret = 0;
289 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
290 ret = -EFAULT;
291 break;
292 default:
293 ret = -ENXIO;
294 break;
295 }
296 return ret;
297}
298
299static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200300{
301 int ret;
302 unsigned int idx;
303 switch (attr->attr) {
304 case KVM_S390_VM_MEM_ENABLE_CMMA:
305 ret = -EBUSY;
306 mutex_lock(&kvm->lock);
307 if (atomic_read(&kvm->online_vcpus) == 0) {
308 kvm->arch.use_cmma = 1;
309 ret = 0;
310 }
311 mutex_unlock(&kvm->lock);
312 break;
313 case KVM_S390_VM_MEM_CLR_CMMA:
314 mutex_lock(&kvm->lock);
315 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200316 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200317 srcu_read_unlock(&kvm->srcu, idx);
318 mutex_unlock(&kvm->lock);
319 ret = 0;
320 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100321 case KVM_S390_VM_MEM_LIMIT_SIZE: {
322 unsigned long new_limit;
323
324 if (kvm_is_ucontrol(kvm))
325 return -EINVAL;
326
327 if (get_user(new_limit, (u64 __user *)attr->addr))
328 return -EFAULT;
329
330 if (new_limit > kvm->arch.gmap->asce_end)
331 return -E2BIG;
332
333 ret = -EBUSY;
334 mutex_lock(&kvm->lock);
335 if (atomic_read(&kvm->online_vcpus) == 0) {
336 /* gmap_alloc will round the limit up */
337 struct gmap *new = gmap_alloc(current->mm, new_limit);
338
339 if (!new) {
340 ret = -ENOMEM;
341 } else {
342 gmap_free(kvm->arch.gmap);
343 new->private = kvm;
344 kvm->arch.gmap = new;
345 ret = 0;
346 }
347 }
348 mutex_unlock(&kvm->lock);
349 break;
350 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200351 default:
352 ret = -ENXIO;
353 break;
354 }
355 return ret;
356}
357
Tony Krowiaka374e892014-09-03 10:13:53 +0200358static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
359
360static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
361{
362 struct kvm_vcpu *vcpu;
363 int i;
364
Michael Mueller9d8d5782015-02-02 15:42:51 +0100365 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200366 return -EINVAL;
367
368 mutex_lock(&kvm->lock);
369 switch (attr->attr) {
370 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
371 get_random_bytes(
372 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
373 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
374 kvm->arch.crypto.aes_kw = 1;
375 break;
376 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
377 get_random_bytes(
378 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
379 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
380 kvm->arch.crypto.dea_kw = 1;
381 break;
382 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
383 kvm->arch.crypto.aes_kw = 0;
384 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
385 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
386 break;
387 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
388 kvm->arch.crypto.dea_kw = 0;
389 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
390 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
391 break;
392 default:
393 mutex_unlock(&kvm->lock);
394 return -ENXIO;
395 }
396
397 kvm_for_each_vcpu(i, vcpu, kvm) {
398 kvm_s390_vcpu_crypto_setup(vcpu);
399 exit_sie(vcpu);
400 }
401 mutex_unlock(&kvm->lock);
402 return 0;
403}
404
Jason J. Herne72f25022014-11-25 09:46:02 -0500405static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
406{
407 u8 gtod_high;
408
409 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
410 sizeof(gtod_high)))
411 return -EFAULT;
412
413 if (gtod_high != 0)
414 return -EINVAL;
415
416 return 0;
417}
418
419static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
420{
421 struct kvm_vcpu *cur_vcpu;
422 unsigned int vcpu_idx;
423 u64 host_tod, gtod;
424 int r;
425
426 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
427 return -EFAULT;
428
429 r = store_tod_clock(&host_tod);
430 if (r)
431 return r;
432
433 mutex_lock(&kvm->lock);
434 kvm->arch.epoch = gtod - host_tod;
435 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
436 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
437 exit_sie(cur_vcpu);
438 }
439 mutex_unlock(&kvm->lock);
440 return 0;
441}
442
443static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
444{
445 int ret;
446
447 if (attr->flags)
448 return -EINVAL;
449
450 switch (attr->attr) {
451 case KVM_S390_VM_TOD_HIGH:
452 ret = kvm_s390_set_tod_high(kvm, attr);
453 break;
454 case KVM_S390_VM_TOD_LOW:
455 ret = kvm_s390_set_tod_low(kvm, attr);
456 break;
457 default:
458 ret = -ENXIO;
459 break;
460 }
461 return ret;
462}
463
464static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
465{
466 u8 gtod_high = 0;
467
468 if (copy_to_user((void __user *)attr->addr, &gtod_high,
469 sizeof(gtod_high)))
470 return -EFAULT;
471
472 return 0;
473}
474
475static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
476{
477 u64 host_tod, gtod;
478 int r;
479
480 r = store_tod_clock(&host_tod);
481 if (r)
482 return r;
483
484 gtod = host_tod + kvm->arch.epoch;
485 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
486 return -EFAULT;
487
488 return 0;
489}
490
491static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
492{
493 int ret;
494
495 if (attr->flags)
496 return -EINVAL;
497
498 switch (attr->attr) {
499 case KVM_S390_VM_TOD_HIGH:
500 ret = kvm_s390_get_tod_high(kvm, attr);
501 break;
502 case KVM_S390_VM_TOD_LOW:
503 ret = kvm_s390_get_tod_low(kvm, attr);
504 break;
505 default:
506 ret = -ENXIO;
507 break;
508 }
509 return ret;
510}
511
Michael Mueller658b6ed2015-02-02 15:49:35 +0100512static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
513{
514 struct kvm_s390_vm_cpu_processor *proc;
515 int ret = 0;
516
517 mutex_lock(&kvm->lock);
518 if (atomic_read(&kvm->online_vcpus)) {
519 ret = -EBUSY;
520 goto out;
521 }
522 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
523 if (!proc) {
524 ret = -ENOMEM;
525 goto out;
526 }
527 if (!copy_from_user(proc, (void __user *)attr->addr,
528 sizeof(*proc))) {
529 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
530 sizeof(struct cpuid));
531 kvm->arch.model.ibc = proc->ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100532 memcpy(kvm->arch.model.fac->list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +0100533 S390_ARCH_FAC_LIST_SIZE_BYTE);
534 } else
535 ret = -EFAULT;
536 kfree(proc);
537out:
538 mutex_unlock(&kvm->lock);
539 return ret;
540}
541
542static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
543{
544 int ret = -ENXIO;
545
546 switch (attr->attr) {
547 case KVM_S390_VM_CPU_PROCESSOR:
548 ret = kvm_s390_set_processor(kvm, attr);
549 break;
550 }
551 return ret;
552}
553
554static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
555{
556 struct kvm_s390_vm_cpu_processor *proc;
557 int ret = 0;
558
559 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
560 if (!proc) {
561 ret = -ENOMEM;
562 goto out;
563 }
564 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
565 proc->ibc = kvm->arch.model.ibc;
Michael Mueller981467c2015-02-24 13:51:04 +0100566 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100567 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
568 ret = -EFAULT;
569 kfree(proc);
570out:
571 return ret;
572}
573
574static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
575{
576 struct kvm_s390_vm_cpu_machine *mach;
577 int ret = 0;
578
579 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
580 if (!mach) {
581 ret = -ENOMEM;
582 goto out;
583 }
584 get_cpu_id((struct cpuid *) &mach->cpuid);
585 mach->ibc = sclp_get_ibc();
Michael Mueller981467c2015-02-24 13:51:04 +0100586 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
587 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100588 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100589 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100590 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
591 ret = -EFAULT;
592 kfree(mach);
593out:
594 return ret;
595}
596
597static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
598{
599 int ret = -ENXIO;
600
601 switch (attr->attr) {
602 case KVM_S390_VM_CPU_PROCESSOR:
603 ret = kvm_s390_get_processor(kvm, attr);
604 break;
605 case KVM_S390_VM_CPU_MACHINE:
606 ret = kvm_s390_get_machine(kvm, attr);
607 break;
608 }
609 return ret;
610}
611
Dominik Dingelf2061652014-04-09 13:13:00 +0200612static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
613{
614 int ret;
615
616 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200617 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100618 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200619 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500620 case KVM_S390_VM_TOD:
621 ret = kvm_s390_set_tod(kvm, attr);
622 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100623 case KVM_S390_VM_CPU_MODEL:
624 ret = kvm_s390_set_cpu_model(kvm, attr);
625 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200626 case KVM_S390_VM_CRYPTO:
627 ret = kvm_s390_vm_set_crypto(kvm, attr);
628 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200629 default:
630 ret = -ENXIO;
631 break;
632 }
633
634 return ret;
635}
636
637static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
638{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100639 int ret;
640
641 switch (attr->group) {
642 case KVM_S390_VM_MEM_CTRL:
643 ret = kvm_s390_get_mem_control(kvm, attr);
644 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500645 case KVM_S390_VM_TOD:
646 ret = kvm_s390_get_tod(kvm, attr);
647 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100648 case KVM_S390_VM_CPU_MODEL:
649 ret = kvm_s390_get_cpu_model(kvm, attr);
650 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100651 default:
652 ret = -ENXIO;
653 break;
654 }
655
656 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200657}
658
659static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
660{
661 int ret;
662
663 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200664 case KVM_S390_VM_MEM_CTRL:
665 switch (attr->attr) {
666 case KVM_S390_VM_MEM_ENABLE_CMMA:
667 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100668 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200669 ret = 0;
670 break;
671 default:
672 ret = -ENXIO;
673 break;
674 }
675 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500676 case KVM_S390_VM_TOD:
677 switch (attr->attr) {
678 case KVM_S390_VM_TOD_LOW:
679 case KVM_S390_VM_TOD_HIGH:
680 ret = 0;
681 break;
682 default:
683 ret = -ENXIO;
684 break;
685 }
686 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100687 case KVM_S390_VM_CPU_MODEL:
688 switch (attr->attr) {
689 case KVM_S390_VM_CPU_PROCESSOR:
690 case KVM_S390_VM_CPU_MACHINE:
691 ret = 0;
692 break;
693 default:
694 ret = -ENXIO;
695 break;
696 }
697 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200698 case KVM_S390_VM_CRYPTO:
699 switch (attr->attr) {
700 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
701 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
702 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
703 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
704 ret = 0;
705 break;
706 default:
707 ret = -ENXIO;
708 break;
709 }
710 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200711 default:
712 ret = -ENXIO;
713 break;
714 }
715
716 return ret;
717}
718
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100719long kvm_arch_vm_ioctl(struct file *filp,
720 unsigned int ioctl, unsigned long arg)
721{
722 struct kvm *kvm = filp->private_data;
723 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200724 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100725 int r;
726
727 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100728 case KVM_S390_INTERRUPT: {
729 struct kvm_s390_interrupt s390int;
730
731 r = -EFAULT;
732 if (copy_from_user(&s390int, argp, sizeof(s390int)))
733 break;
734 r = kvm_s390_inject_vm(kvm, &s390int);
735 break;
736 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200737 case KVM_ENABLE_CAP: {
738 struct kvm_enable_cap cap;
739 r = -EFAULT;
740 if (copy_from_user(&cap, argp, sizeof(cap)))
741 break;
742 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
743 break;
744 }
Cornelia Huck84223592013-07-15 13:36:01 +0200745 case KVM_CREATE_IRQCHIP: {
746 struct kvm_irq_routing_entry routing;
747
748 r = -EINVAL;
749 if (kvm->arch.use_irqchip) {
750 /* Set up dummy routing. */
751 memset(&routing, 0, sizeof(routing));
752 kvm_set_irq_routing(kvm, &routing, 0, 0);
753 r = 0;
754 }
755 break;
756 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200757 case KVM_SET_DEVICE_ATTR: {
758 r = -EFAULT;
759 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
760 break;
761 r = kvm_s390_vm_set_attr(kvm, &attr);
762 break;
763 }
764 case KVM_GET_DEVICE_ATTR: {
765 r = -EFAULT;
766 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
767 break;
768 r = kvm_s390_vm_get_attr(kvm, &attr);
769 break;
770 }
771 case KVM_HAS_DEVICE_ATTR: {
772 r = -EFAULT;
773 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
774 break;
775 r = kvm_s390_vm_has_attr(kvm, &attr);
776 break;
777 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100778 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300779 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100780 }
781
782 return r;
783}
784
Tony Krowiak45c9b472015-01-13 11:33:26 -0500785static int kvm_s390_query_ap_config(u8 *config)
786{
787 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +0100788 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -0500789
Christian Borntraeger86044c82015-02-26 13:53:47 +0100790 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -0500791 asm volatile(
792 "lgr 0,%1\n"
793 "lgr 2,%2\n"
794 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +0100795 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -0500796 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +0100797 "1:\n"
798 EX_TABLE(0b, 1b)
799 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -0500800 : "r" (fcn_code), "r" (config)
801 : "cc", "0", "2", "memory"
802 );
803
804 return cc;
805}
806
807static int kvm_s390_apxa_installed(void)
808{
809 u8 config[128];
810 int cc;
811
812 if (test_facility(2) && test_facility(12)) {
813 cc = kvm_s390_query_ap_config(config);
814
815 if (cc)
816 pr_err("PQAP(QCI) failed with cc=%d", cc);
817 else
818 return config[0] & 0x40;
819 }
820
821 return 0;
822}
823
824static void kvm_s390_set_crycb_format(struct kvm *kvm)
825{
826 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
827
828 if (kvm_s390_apxa_installed())
829 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
830 else
831 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
832}
833
Michael Mueller9d8d5782015-02-02 15:42:51 +0100834static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
835{
836 get_cpu_id(cpu_id);
837 cpu_id->version = 0xff;
838}
839
Tony Krowiak5102ee82014-06-27 14:46:01 -0400840static int kvm_s390_crypto_init(struct kvm *kvm)
841{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100842 if (!test_kvm_facility(kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -0400843 return 0;
844
845 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
846 GFP_KERNEL | GFP_DMA);
847 if (!kvm->arch.crypto.crycb)
848 return -ENOMEM;
849
Tony Krowiak45c9b472015-01-13 11:33:26 -0500850 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400851
Tony Krowiaked6f76b2015-02-24 14:06:57 -0500852 /* Enable AES/DEA protected key functions by default */
853 kvm->arch.crypto.aes_kw = 1;
854 kvm->arch.crypto.dea_kw = 1;
855 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
856 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
857 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
858 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiaka374e892014-09-03 10:13:53 +0200859
Tony Krowiak5102ee82014-06-27 14:46:01 -0400860 return 0;
861}
862
Carsten Ottee08b9632012-01-04 10:25:20 +0100863int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100864{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100865 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100866 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100867 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100868
Carsten Ottee08b9632012-01-04 10:25:20 +0100869 rc = -EINVAL;
870#ifdef CONFIG_KVM_S390_UCONTROL
871 if (type & ~KVM_VM_S390_UCONTROL)
872 goto out_err;
873 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
874 goto out_err;
875#else
876 if (type)
877 goto out_err;
878#endif
879
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100880 rc = s390_enable_sie();
881 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100882 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100883
Carsten Otteb2904112011-10-18 12:27:13 +0200884 rc = -ENOMEM;
885
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100886 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
887 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100888 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100889 spin_lock(&kvm_lock);
890 sca_offset = (sca_offset + 16) & 0x7f0;
891 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
892 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100893
894 sprintf(debug_name, "kvm-%u", current->pid);
895
896 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
897 if (!kvm->arch.dbf)
898 goto out_nodbf;
899
Michael Mueller9d8d5782015-02-02 15:42:51 +0100900 /*
901 * The architectural maximum amount of facilities is 16 kbit. To store
902 * this amount, 2 kbyte of memory is required. Thus we need a full
Michael Mueller981467c2015-02-24 13:51:04 +0100903 * page to hold the guest facility list (arch.model.fac->list) and the
904 * facility mask (arch.model.fac->mask). Its address size has to be
Michael Mueller9d8d5782015-02-02 15:42:51 +0100905 * 31 bits and word aligned.
906 */
907 kvm->arch.model.fac =
Michael Mueller981467c2015-02-24 13:51:04 +0100908 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
Michael Mueller9d8d5782015-02-02 15:42:51 +0100909 if (!kvm->arch.model.fac)
910 goto out_nofac;
911
Michael Muellerfb5bf932015-02-27 14:25:10 +0100912 /* Populate the facility mask initially. */
Michael Mueller981467c2015-02-24 13:51:04 +0100913 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
Michael Mueller94422ee2015-02-26 12:12:40 +0100914 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller9d8d5782015-02-02 15:42:51 +0100915 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
916 if (i < kvm_s390_fac_list_mask_size())
Michael Mueller981467c2015-02-24 13:51:04 +0100917 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +0100918 else
Michael Mueller981467c2015-02-24 13:51:04 +0100919 kvm->arch.model.fac->mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100920 }
921
Michael Mueller981467c2015-02-24 13:51:04 +0100922 /* Populate the facility list initially. */
923 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
924 S390_ARCH_FAC_LIST_SIZE_BYTE);
925
Michael Mueller9d8d5782015-02-02 15:42:51 +0100926 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
Michael Mueller658b6ed2015-02-02 15:49:35 +0100927 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100928
Tony Krowiak5102ee82014-06-27 14:46:01 -0400929 if (kvm_s390_crypto_init(kvm) < 0)
930 goto out_crypto;
931
Carsten Otteba5c1e92008-03-25 18:47:26 +0100932 spin_lock_init(&kvm->arch.float_int.lock);
933 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100934 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +0200935 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100936
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100937 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
938 VM_EVENT(kvm, 3, "%s", "vm created");
939
Carsten Ottee08b9632012-01-04 10:25:20 +0100940 if (type & KVM_VM_S390_UCONTROL) {
941 kvm->arch.gmap = NULL;
942 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +0200943 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +0100944 if (!kvm->arch.gmap)
945 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200946 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200947 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100948 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100949
950 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200951 kvm->arch.use_irqchip = 0;
Eric Farman68c55752014-06-09 10:57:26 -0400952 kvm->arch.use_vectors = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -0500953 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100954
David Hildenbrand8ad35752014-03-14 11:00:21 +0100955 spin_lock_init(&kvm->arch.start_stop_lock);
956
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100957 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200958out_nogmap:
Tony Krowiak5102ee82014-06-27 14:46:01 -0400959 kfree(kvm->arch.crypto.crycb);
960out_crypto:
Michael Mueller9d8d5782015-02-02 15:42:51 +0100961 free_page((unsigned long)kvm->arch.model.fac);
962out_nofac:
Carsten Otte598841c2011-07-24 10:48:21 +0200963 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100964out_nodbf:
965 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100966out_err:
967 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100968}
969
Christian Borntraegerd329c032008-11-26 14:50:27 +0100970void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
971{
972 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200973 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100974 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200975 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100976 if (!kvm_is_ucontrol(vcpu->kvm)) {
977 clear_bit(63 - vcpu->vcpu_id,
978 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
979 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
980 (__u64) vcpu->arch.sie_block)
981 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
982 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200983 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100984
985 if (kvm_is_ucontrol(vcpu->kvm))
986 gmap_free(vcpu->arch.gmap);
987
Dominik Dingelb31605c2014-03-25 13:47:11 +0100988 if (kvm_s390_cmma_enabled(vcpu->kvm))
989 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100990 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200991
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100992 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200993 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100994}
995
996static void kvm_free_vcpus(struct kvm *kvm)
997{
998 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300999 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001000
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001001 kvm_for_each_vcpu(i, vcpu, kvm)
1002 kvm_arch_vcpu_destroy(vcpu);
1003
1004 mutex_lock(&kvm->lock);
1005 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1006 kvm->vcpus[i] = NULL;
1007
1008 atomic_set(&kvm->online_vcpus, 0);
1009 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001010}
1011
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001012void kvm_arch_destroy_vm(struct kvm *kvm)
1013{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001014 kvm_free_vcpus(kvm);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001015 free_page((unsigned long)kvm->arch.model.fac);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001016 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +01001017 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001018 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +01001019 if (!kvm_is_ucontrol(kvm))
1020 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001021 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001022 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001023}
1024
1025/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001026static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1027{
1028 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1029 if (!vcpu->arch.gmap)
1030 return -ENOMEM;
1031 vcpu->arch.gmap->private = vcpu->kvm;
1032
1033 return 0;
1034}
1035
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001036int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1037{
Dominik Dingel3c038e62013-10-07 17:11:48 +02001038 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1039 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01001040 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1041 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001042 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02001043 KVM_SYNC_CRS |
1044 KVM_SYNC_ARCH0 |
1045 KVM_SYNC_PFAULT;
Eric Farman68c55752014-06-09 10:57:26 -04001046 if (test_kvm_facility(vcpu->kvm, 129))
1047 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01001048
1049 if (kvm_is_ucontrol(vcpu->kvm))
1050 return __kvm_ucontrol_vcpu_init(vcpu);
1051
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001052 return 0;
1053}
1054
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001055void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1056{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001057 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Eric Farman68c55752014-06-09 10:57:26 -04001058 if (vcpu->kvm->arch.use_vectors)
1059 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1060 else
1061 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001062 save_access_regs(vcpu->arch.host_acrs);
Eric Farman68c55752014-06-09 10:57:26 -04001063 if (vcpu->kvm->arch.use_vectors) {
1064 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1065 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1066 } else {
1067 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1068 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1069 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001070 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001071 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001072 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001073}
1074
1075void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1076{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001077 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +02001078 gmap_disable(vcpu->arch.gmap);
Eric Farman68c55752014-06-09 10:57:26 -04001079 if (vcpu->kvm->arch.use_vectors) {
1080 save_fp_ctl(&vcpu->run->s.regs.fpc);
1081 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1082 } else {
1083 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1084 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1085 }
Christian Borntraeger59674c12012-01-11 11:20:33 +01001086 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001087 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
Eric Farman68c55752014-06-09 10:57:26 -04001088 if (vcpu->kvm->arch.use_vectors)
1089 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1090 else
1091 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001092 restore_access_regs(vcpu->arch.host_acrs);
1093}
1094
1095static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1096{
1097 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1098 vcpu->arch.sie_block->gpsw.mask = 0UL;
1099 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01001100 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001101 vcpu->arch.sie_block->cputm = 0UL;
1102 vcpu->arch.sie_block->ckc = 0UL;
1103 vcpu->arch.sie_block->todpr = 0;
1104 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1105 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1106 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1107 vcpu->arch.guest_fpregs.fpc = 0;
1108 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1109 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001110 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001111 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1112 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001113 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1114 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01001115 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001116}
1117
Dominik Dingel31928aa2014-12-04 15:47:07 +01001118void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001119{
Jason J. Herne72f25022014-11-25 09:46:02 -05001120 mutex_lock(&vcpu->kvm->lock);
1121 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1122 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001123 if (!kvm_is_ucontrol(vcpu->kvm))
1124 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02001125}
1126
Tony Krowiak5102ee82014-06-27 14:46:01 -04001127static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1128{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001129 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04001130 return;
1131
Tony Krowiaka374e892014-09-03 10:13:53 +02001132 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1133
1134 if (vcpu->kvm->arch.crypto.aes_kw)
1135 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1136 if (vcpu->kvm->arch.crypto.dea_kw)
1137 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1138
Tony Krowiak5102ee82014-06-27 14:46:01 -04001139 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1140}
1141
Dominik Dingelb31605c2014-03-25 13:47:11 +01001142void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1143{
1144 free_page(vcpu->arch.sie_block->cbrlo);
1145 vcpu->arch.sie_block->cbrlo = 0;
1146}
1147
1148int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1149{
1150 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1151 if (!vcpu->arch.sie_block->cbrlo)
1152 return -ENOMEM;
1153
1154 vcpu->arch.sie_block->ecb2 |= 0x80;
1155 vcpu->arch.sie_block->ecb2 &= ~0x08;
1156 return 0;
1157}
1158
Michael Mueller91520f12015-02-27 14:32:11 +01001159static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1160{
1161 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1162
1163 vcpu->arch.cpu_id = model->cpu_id;
1164 vcpu->arch.sie_block->ibc = model->ibc;
1165 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1166}
1167
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001168int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1169{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001170 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001171
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001172 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1173 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001174 CPUSTAT_STOPPED |
1175 CPUSTAT_GED);
Michael Mueller91520f12015-02-27 14:32:11 +01001176 kvm_s390_vcpu_setup_model(vcpu);
1177
Christian Borntraegerfc345312010-06-17 23:16:20 +02001178 vcpu->arch.sie_block->ecb = 6;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001179 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001180 vcpu->arch.sie_block->ecb |= 0x10;
1181
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001182 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001183 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +01001184 if (sclp_has_siif())
1185 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001186 if (sclp_has_sigpif())
1187 vcpu->arch.sie_block->eca |= 0x10000000U;
Thomas Huth492d8642015-02-10 16:11:01 +01001188 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001189
Dominik Dingelb31605c2014-03-25 13:47:11 +01001190 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1191 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1192 if (rc)
1193 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001194 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001195 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001196 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001197
Tony Krowiak5102ee82014-06-27 14:46:01 -04001198 kvm_s390_vcpu_crypto_setup(vcpu);
1199
Dominik Dingelb31605c2014-03-25 13:47:11 +01001200 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001201}
1202
1203struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1204 unsigned int id)
1205{
Carsten Otte4d475552011-10-18 12:27:12 +02001206 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001207 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001208 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001209
Carsten Otte4d475552011-10-18 12:27:12 +02001210 if (id >= KVM_MAX_VCPUS)
1211 goto out;
1212
1213 rc = -ENOMEM;
1214
Michael Muellerb110fea2013-06-12 13:54:54 +02001215 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001216 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001217 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001218
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001219 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1220 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001221 goto out_free_cpu;
1222
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001223 vcpu->arch.sie_block = &sie_page->sie_block;
1224 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
Eric Farman68c55752014-06-09 10:57:26 -04001225 vcpu->arch.host_vregs = &sie_page->vregs;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001226
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001227 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001228 if (!kvm_is_ucontrol(kvm)) {
1229 if (!kvm->arch.sca) {
1230 WARN_ON_ONCE(1);
1231 goto out_free_cpu;
1232 }
1233 if (!kvm->arch.sca->cpu[id].sda)
1234 kvm->arch.sca->cpu[id].sda =
1235 (__u64) vcpu->arch.sie_block;
1236 vcpu->arch.sie_block->scaoh =
1237 (__u32)(((__u64)kvm->arch.sca) >> 32);
1238 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1239 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1240 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001241
Carsten Otteba5c1e92008-03-25 18:47:26 +01001242 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001243 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001244 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001245 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001246
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001247 rc = kvm_vcpu_init(vcpu, kvm, id);
1248 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001249 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001250 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1251 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001252 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001253
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001254 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001255out_free_sie_block:
1256 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001257out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001258 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001259out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001260 return ERR_PTR(rc);
1261}
1262
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001263int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1264{
David Hildenbrand9a022062014-08-05 17:40:47 +02001265 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001266}
1267
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001268void s390_vcpu_block(struct kvm_vcpu *vcpu)
1269{
1270 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1271}
1272
1273void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1274{
1275 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1276}
1277
1278/*
1279 * Kick a guest cpu out of SIE and wait until SIE is not running.
1280 * If the CPU is not running (e.g. waiting as idle) the function will
1281 * return immediately. */
1282void exit_sie(struct kvm_vcpu *vcpu)
1283{
1284 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1285 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1286 cpu_relax();
1287}
1288
1289/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1290void exit_sie_sync(struct kvm_vcpu *vcpu)
1291{
1292 s390_vcpu_block(vcpu);
1293 exit_sie(vcpu);
1294}
1295
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001296static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1297{
1298 int i;
1299 struct kvm *kvm = gmap->private;
1300 struct kvm_vcpu *vcpu;
1301
1302 kvm_for_each_vcpu(i, vcpu, kvm) {
1303 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001304 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001305 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1306 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1307 exit_sie_sync(vcpu);
1308 }
1309 }
1310}
1311
Christoffer Dallb6d33832012-03-08 16:44:24 -05001312int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1313{
1314 /* kvm common code refers to this, but never calls it */
1315 BUG();
1316 return 0;
1317}
1318
Carsten Otte14eebd92012-05-15 14:15:26 +02001319static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1320 struct kvm_one_reg *reg)
1321{
1322 int r = -EINVAL;
1323
1324 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001325 case KVM_REG_S390_TODPR:
1326 r = put_user(vcpu->arch.sie_block->todpr,
1327 (u32 __user *)reg->addr);
1328 break;
1329 case KVM_REG_S390_EPOCHDIFF:
1330 r = put_user(vcpu->arch.sie_block->epoch,
1331 (u64 __user *)reg->addr);
1332 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001333 case KVM_REG_S390_CPU_TIMER:
1334 r = put_user(vcpu->arch.sie_block->cputm,
1335 (u64 __user *)reg->addr);
1336 break;
1337 case KVM_REG_S390_CLOCK_COMP:
1338 r = put_user(vcpu->arch.sie_block->ckc,
1339 (u64 __user *)reg->addr);
1340 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001341 case KVM_REG_S390_PFTOKEN:
1342 r = put_user(vcpu->arch.pfault_token,
1343 (u64 __user *)reg->addr);
1344 break;
1345 case KVM_REG_S390_PFCOMPARE:
1346 r = put_user(vcpu->arch.pfault_compare,
1347 (u64 __user *)reg->addr);
1348 break;
1349 case KVM_REG_S390_PFSELECT:
1350 r = put_user(vcpu->arch.pfault_select,
1351 (u64 __user *)reg->addr);
1352 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001353 case KVM_REG_S390_PP:
1354 r = put_user(vcpu->arch.sie_block->pp,
1355 (u64 __user *)reg->addr);
1356 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001357 case KVM_REG_S390_GBEA:
1358 r = put_user(vcpu->arch.sie_block->gbea,
1359 (u64 __user *)reg->addr);
1360 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001361 default:
1362 break;
1363 }
1364
1365 return r;
1366}
1367
1368static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1369 struct kvm_one_reg *reg)
1370{
1371 int r = -EINVAL;
1372
1373 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001374 case KVM_REG_S390_TODPR:
1375 r = get_user(vcpu->arch.sie_block->todpr,
1376 (u32 __user *)reg->addr);
1377 break;
1378 case KVM_REG_S390_EPOCHDIFF:
1379 r = get_user(vcpu->arch.sie_block->epoch,
1380 (u64 __user *)reg->addr);
1381 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001382 case KVM_REG_S390_CPU_TIMER:
1383 r = get_user(vcpu->arch.sie_block->cputm,
1384 (u64 __user *)reg->addr);
1385 break;
1386 case KVM_REG_S390_CLOCK_COMP:
1387 r = get_user(vcpu->arch.sie_block->ckc,
1388 (u64 __user *)reg->addr);
1389 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001390 case KVM_REG_S390_PFTOKEN:
1391 r = get_user(vcpu->arch.pfault_token,
1392 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001393 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1394 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001395 break;
1396 case KVM_REG_S390_PFCOMPARE:
1397 r = get_user(vcpu->arch.pfault_compare,
1398 (u64 __user *)reg->addr);
1399 break;
1400 case KVM_REG_S390_PFSELECT:
1401 r = get_user(vcpu->arch.pfault_select,
1402 (u64 __user *)reg->addr);
1403 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001404 case KVM_REG_S390_PP:
1405 r = get_user(vcpu->arch.sie_block->pp,
1406 (u64 __user *)reg->addr);
1407 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001408 case KVM_REG_S390_GBEA:
1409 r = get_user(vcpu->arch.sie_block->gbea,
1410 (u64 __user *)reg->addr);
1411 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001412 default:
1413 break;
1414 }
1415
1416 return r;
1417}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001418
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001419static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1420{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001421 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001422 return 0;
1423}
1424
1425int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1426{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001427 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001428 return 0;
1429}
1430
1431int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1432{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001433 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001434 return 0;
1435}
1436
1437int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1438 struct kvm_sregs *sregs)
1439{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001440 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001441 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001442 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001443 return 0;
1444}
1445
1446int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1447 struct kvm_sregs *sregs)
1448{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001449 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001450 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001451 return 0;
1452}
1453
1454int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1455{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001456 if (test_fp_ctl(fpu->fpc))
1457 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001458 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001459 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1460 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1461 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001462 return 0;
1463}
1464
1465int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1466{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001467 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1468 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001469 return 0;
1470}
1471
1472static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1473{
1474 int rc = 0;
1475
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001476 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001477 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001478 else {
1479 vcpu->run->psw_mask = psw.mask;
1480 vcpu->run->psw_addr = psw.addr;
1481 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001482 return rc;
1483}
1484
1485int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1486 struct kvm_translation *tr)
1487{
1488 return -EINVAL; /* not implemented yet */
1489}
1490
David Hildenbrand27291e22014-01-23 12:26:52 +01001491#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1492 KVM_GUESTDBG_USE_HW_BP | \
1493 KVM_GUESTDBG_ENABLE)
1494
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001495int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1496 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001497{
David Hildenbrand27291e22014-01-23 12:26:52 +01001498 int rc = 0;
1499
1500 vcpu->guest_debug = 0;
1501 kvm_s390_clear_bp_data(vcpu);
1502
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001503 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001504 return -EINVAL;
1505
1506 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1507 vcpu->guest_debug = dbg->control;
1508 /* enforce guest PER */
1509 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1510
1511 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1512 rc = kvm_s390_import_bp_data(vcpu, dbg);
1513 } else {
1514 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1515 vcpu->arch.guestdbg.last_bp = 0;
1516 }
1517
1518 if (rc) {
1519 vcpu->guest_debug = 0;
1520 kvm_s390_clear_bp_data(vcpu);
1521 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1522 }
1523
1524 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001525}
1526
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001527int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1528 struct kvm_mp_state *mp_state)
1529{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001530 /* CHECK_STOP and LOAD are not supported yet */
1531 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1532 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001533}
1534
1535int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1536 struct kvm_mp_state *mp_state)
1537{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001538 int rc = 0;
1539
1540 /* user space knows about this interface - let it control the state */
1541 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1542
1543 switch (mp_state->mp_state) {
1544 case KVM_MP_STATE_STOPPED:
1545 kvm_s390_vcpu_stop(vcpu);
1546 break;
1547 case KVM_MP_STATE_OPERATING:
1548 kvm_s390_vcpu_start(vcpu);
1549 break;
1550 case KVM_MP_STATE_LOAD:
1551 case KVM_MP_STATE_CHECK_STOP:
1552 /* fall through - CHECK_STOP and LOAD are not supported yet */
1553 default:
1554 rc = -ENXIO;
1555 }
1556
1557 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001558}
1559
Dominik Dingelb31605c2014-03-25 13:47:11 +01001560bool kvm_s390_cmma_enabled(struct kvm *kvm)
1561{
1562 if (!MACHINE_IS_LPAR)
1563 return false;
1564 /* only enable for z10 and later */
1565 if (!MACHINE_HAS_EDAT1)
1566 return false;
1567 if (!kvm->arch.use_cmma)
1568 return false;
1569 return true;
1570}
1571
David Hildenbrand8ad35752014-03-14 11:00:21 +01001572static bool ibs_enabled(struct kvm_vcpu *vcpu)
1573{
1574 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1575}
1576
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001577static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1578{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001579retry:
1580 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001581 /*
1582 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1583 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1584 * This ensures that the ipte instruction for this request has
1585 * already finished. We might race against a second unmapper that
1586 * wants to set the blocking bit. Lets just retry the request loop.
1587 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001588 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001589 int rc;
1590 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001591 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001592 PAGE_SIZE * 2);
1593 if (rc)
1594 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001595 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001596 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001597
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001598 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1599 vcpu->arch.sie_block->ihcpu = 0xffff;
1600 goto retry;
1601 }
1602
David Hildenbrand8ad35752014-03-14 11:00:21 +01001603 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1604 if (!ibs_enabled(vcpu)) {
1605 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1606 atomic_set_mask(CPUSTAT_IBS,
1607 &vcpu->arch.sie_block->cpuflags);
1608 }
1609 goto retry;
1610 }
1611
1612 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1613 if (ibs_enabled(vcpu)) {
1614 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1615 atomic_clear_mask(CPUSTAT_IBS,
1616 &vcpu->arch.sie_block->cpuflags);
1617 }
1618 goto retry;
1619 }
1620
David Hildenbrand0759d062014-05-13 16:54:32 +02001621 /* nothing to do, just clear the request */
1622 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1623
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001624 return 0;
1625}
1626
Thomas Huthfa576c52014-05-06 17:20:16 +02001627/**
1628 * kvm_arch_fault_in_page - fault-in guest page if necessary
1629 * @vcpu: The corresponding virtual cpu
1630 * @gpa: Guest physical address
1631 * @writable: Whether the page should be writable or not
1632 *
1633 * Make sure that a guest page has been faulted-in on the host.
1634 *
1635 * Return: Zero on success, negative error code otherwise.
1636 */
1637long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001638{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001639 return gmap_fault(vcpu->arch.gmap, gpa,
1640 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001641}
1642
Dominik Dingel3c038e62013-10-07 17:11:48 +02001643static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1644 unsigned long token)
1645{
1646 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001647 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001648
1649 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001650 irq.u.ext.ext_params2 = token;
1651 irq.type = KVM_S390_INT_PFAULT_INIT;
1652 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001653 } else {
1654 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001655 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001656 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1657 }
1658}
1659
1660void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1661 struct kvm_async_pf *work)
1662{
1663 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1664 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1665}
1666
1667void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1668 struct kvm_async_pf *work)
1669{
1670 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1671 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1672}
1673
1674void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1675 struct kvm_async_pf *work)
1676{
1677 /* s390 will always inject the page directly */
1678}
1679
1680bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1681{
1682 /*
1683 * s390 will always inject the page directly,
1684 * but we still want check_async_completion to cleanup
1685 */
1686 return true;
1687}
1688
1689static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1690{
1691 hva_t hva;
1692 struct kvm_arch_async_pf arch;
1693 int rc;
1694
1695 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1696 return 0;
1697 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1698 vcpu->arch.pfault_compare)
1699 return 0;
1700 if (psw_extint_disabled(vcpu))
1701 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001702 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001703 return 0;
1704 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1705 return 0;
1706 if (!vcpu->arch.gmap->pfault_enabled)
1707 return 0;
1708
Heiko Carstens81480cc2014-01-01 16:36:07 +01001709 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1710 hva += current->thread.gmap_addr & ~PAGE_MASK;
1711 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001712 return 0;
1713
1714 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1715 return rc;
1716}
1717
Thomas Huth3fb4c402013-09-12 10:33:43 +02001718static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001719{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001720 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001721
Dominik Dingel3c038e62013-10-07 17:11:48 +02001722 /*
1723 * On s390 notifications for arriving pages will be delivered directly
1724 * to the guest but the house keeping for completed pfaults is
1725 * handled outside the worker.
1726 */
1727 kvm_check_async_pf_completion(vcpu);
1728
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001729 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001730
1731 if (need_resched())
1732 schedule();
1733
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001734 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001735 s390_handle_mcck();
1736
Jens Freimann79395032014-04-17 10:10:30 +02001737 if (!kvm_is_ucontrol(vcpu->kvm)) {
1738 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1739 if (rc)
1740 return rc;
1741 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001742
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001743 rc = kvm_s390_handle_requests(vcpu);
1744 if (rc)
1745 return rc;
1746
David Hildenbrand27291e22014-01-23 12:26:52 +01001747 if (guestdbg_enabled(vcpu)) {
1748 kvm_s390_backup_guest_per_regs(vcpu);
1749 kvm_s390_patch_guest_per_regs(vcpu);
1750 }
1751
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001752 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001753 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1754 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1755 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001756
Thomas Huth3fb4c402013-09-12 10:33:43 +02001757 return 0;
1758}
1759
Thomas Huth492d8642015-02-10 16:11:01 +01001760static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1761{
1762 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1763 u8 opcode;
1764 int rc;
1765
1766 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1767 trace_kvm_s390_sie_fault(vcpu);
1768
1769 /*
1770 * We want to inject an addressing exception, which is defined as a
1771 * suppressing or terminating exception. However, since we came here
1772 * by a DAT access exception, the PSW still points to the faulting
1773 * instruction since DAT exceptions are nullifying. So we've got
1774 * to look up the current opcode to get the length of the instruction
1775 * to be able to forward the PSW.
1776 */
1777 rc = read_guest(vcpu, psw->addr, &opcode, 1);
1778 if (rc)
1779 return kvm_s390_inject_prog_cond(vcpu, rc);
1780 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1781
1782 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1783}
1784
Thomas Huth3fb4c402013-09-12 10:33:43 +02001785static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1786{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001787 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001788
1789 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1790 vcpu->arch.sie_block->icptcode);
1791 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1792
David Hildenbrand27291e22014-01-23 12:26:52 +01001793 if (guestdbg_enabled(vcpu))
1794 kvm_s390_restore_guest_per_regs(vcpu);
1795
Thomas Huth3fb4c402013-09-12 10:33:43 +02001796 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001797 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001798 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1799 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1800 vcpu->run->s390_ucontrol.trans_exc_code =
1801 current->thread.gmap_addr;
1802 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1803 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001804
1805 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001806 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001807 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001808 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001809 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001810 } else {
1811 gpa_t gpa = current->thread.gmap_addr;
1812 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1813 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001814 }
1815
Thomas Huth492d8642015-02-10 16:11:01 +01001816 if (rc == -1)
1817 rc = vcpu_post_run_fault_in_sie(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001818
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001819 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001820
Thomas Hutha76ccff2013-09-12 10:33:44 +02001821 if (rc == 0) {
1822 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001823 /* Don't exit for host interrupts. */
1824 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001825 else
1826 rc = kvm_handle_sie_intercept(vcpu);
1827 }
1828
Thomas Huth3fb4c402013-09-12 10:33:43 +02001829 return rc;
1830}
1831
1832static int __vcpu_run(struct kvm_vcpu *vcpu)
1833{
1834 int rc, exit_reason;
1835
Thomas Huth800c1062013-09-12 10:33:45 +02001836 /*
1837 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1838 * ning the guest), so that memslots (and other stuff) are protected
1839 */
1840 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1841
Thomas Hutha76ccff2013-09-12 10:33:44 +02001842 do {
1843 rc = vcpu_pre_run(vcpu);
1844 if (rc)
1845 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001846
Thomas Huth800c1062013-09-12 10:33:45 +02001847 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001848 /*
1849 * As PF_VCPU will be used in fault handler, between
1850 * guest_enter and guest_exit should be no uaccess.
1851 */
1852 preempt_disable();
1853 kvm_guest_enter();
1854 preempt_enable();
1855 exit_reason = sie64a(vcpu->arch.sie_block,
1856 vcpu->run->s.regs.gprs);
1857 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001858 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001859
Thomas Hutha76ccff2013-09-12 10:33:44 +02001860 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001861 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001862
Thomas Huth800c1062013-09-12 10:33:45 +02001863 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001864 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001865}
1866
David Hildenbrandb028ee32014-07-17 10:47:43 +02001867static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1868{
1869 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1870 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1871 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1872 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1873 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1874 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001875 /* some control register changes require a tlb flush */
1876 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001877 }
1878 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1879 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1880 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1881 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1882 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1883 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1884 }
1885 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1886 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1887 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1888 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001889 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1890 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001891 }
1892 kvm_run->kvm_dirty_regs = 0;
1893}
1894
1895static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1896{
1897 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1898 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1899 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1900 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1901 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1902 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1903 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1904 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1905 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1906 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1907 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1908 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1909}
1910
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001911int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1912{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001913 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001914 sigset_t sigsaved;
1915
David Hildenbrand27291e22014-01-23 12:26:52 +01001916 if (guestdbg_exit_pending(vcpu)) {
1917 kvm_s390_prepare_debug_exit(vcpu);
1918 return 0;
1919 }
1920
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001921 if (vcpu->sigset_active)
1922 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1923
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001924 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1925 kvm_s390_vcpu_start(vcpu);
1926 } else if (is_vcpu_stopped(vcpu)) {
1927 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1928 vcpu->vcpu_id);
1929 return -EINVAL;
1930 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001931
David Hildenbrandb028ee32014-07-17 10:47:43 +02001932 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001933
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001934 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001935 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001936
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001937 if (signal_pending(current) && !rc) {
1938 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001939 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001940 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001941
David Hildenbrand27291e22014-01-23 12:26:52 +01001942 if (guestdbg_exit_pending(vcpu) && !rc) {
1943 kvm_s390_prepare_debug_exit(vcpu);
1944 rc = 0;
1945 }
1946
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001947 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001948 /* intercept cannot be handled in-kernel, prepare kvm-run */
1949 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1950 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001951 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1952 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1953 rc = 0;
1954 }
1955
1956 if (rc == -EREMOTE) {
1957 /* intercept was handled, but userspace support is needed
1958 * kvm_run has been prepared by the handler */
1959 rc = 0;
1960 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001961
David Hildenbrandb028ee32014-07-17 10:47:43 +02001962 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001963
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001964 if (vcpu->sigset_active)
1965 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1966
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001967 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001968 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001969}
1970
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001971/*
1972 * store status at address
1973 * we use have two special cases:
1974 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1975 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1976 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001977int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001978{
Carsten Otte092670c2011-07-24 10:48:22 +02001979 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001980 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001981 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001982 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001983
Heiko Carstensd0bce602014-01-01 16:45:58 +01001984 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1985 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001986 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001987 gpa = SAVE_AREA_BASE;
1988 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1989 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001990 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001991 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1992 }
1993 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1994 vcpu->arch.guest_fpregs.fprs, 128);
1995 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1996 vcpu->run->s.regs.gprs, 128);
1997 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1998 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001999 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002000 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02002001 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01002002 rc |= write_guest_abs(vcpu,
2003 gpa + offsetof(struct save_area, fp_ctrl_reg),
2004 &vcpu->arch.guest_fpregs.fpc, 4);
2005 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2006 &vcpu->arch.sie_block->todpr, 4);
2007 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2008 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01002009 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01002010 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2011 &clkcomp, 8);
2012 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2013 &vcpu->run->s.regs.acrs, 64);
2014 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2015 &vcpu->arch.sie_block->gcr, 128);
2016 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002017}
2018
Thomas Huthe8798922013-11-06 15:46:33 +01002019int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2020{
2021 /*
2022 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2023 * copying in vcpu load/put. Lets update our copies before we save
2024 * it into the save area
2025 */
2026 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2027 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2028 save_access_regs(vcpu->run->s.regs.acrs);
2029
2030 return kvm_s390_store_status_unloaded(vcpu, addr);
2031}
2032
David Hildenbrand8ad35752014-03-14 11:00:21 +01002033static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2034{
2035 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2036 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
2037 exit_sie_sync(vcpu);
2038}
2039
2040static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2041{
2042 unsigned int i;
2043 struct kvm_vcpu *vcpu;
2044
2045 kvm_for_each_vcpu(i, vcpu, kvm) {
2046 __disable_ibs_on_vcpu(vcpu);
2047 }
2048}
2049
2050static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2051{
2052 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2053 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2054 exit_sie_sync(vcpu);
2055}
2056
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002057void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2058{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002059 int i, online_vcpus, started_vcpus = 0;
2060
2061 if (!is_vcpu_stopped(vcpu))
2062 return;
2063
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002064 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002065 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002066 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002067 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2068
2069 for (i = 0; i < online_vcpus; i++) {
2070 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2071 started_vcpus++;
2072 }
2073
2074 if (started_vcpus == 0) {
2075 /* we're the only active VCPU -> speed it up */
2076 __enable_ibs_on_vcpu(vcpu);
2077 } else if (started_vcpus == 1) {
2078 /*
2079 * As we are starting a second VCPU, we have to disable
2080 * the IBS facility on all VCPUs to remove potentially
2081 * oustanding ENABLE requests.
2082 */
2083 __disable_ibs_on_all_vcpus(vcpu->kvm);
2084 }
2085
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002086 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002087 /*
2088 * Another VCPU might have used IBS while we were offline.
2089 * Let's play safe and flush the VCPU at startup.
2090 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002091 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002092 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002093 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002094}
2095
2096void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2097{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002098 int i, online_vcpus, started_vcpus = 0;
2099 struct kvm_vcpu *started_vcpu = NULL;
2100
2101 if (is_vcpu_stopped(vcpu))
2102 return;
2103
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002104 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002105 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002106 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002107 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2108
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002109 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02002110 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02002111
David Hildenbrand6cddd432014-10-15 16:48:53 +02002112 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002113 __disable_ibs_on_vcpu(vcpu);
2114
2115 for (i = 0; i < online_vcpus; i++) {
2116 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2117 started_vcpus++;
2118 started_vcpu = vcpu->kvm->vcpus[i];
2119 }
2120 }
2121
2122 if (started_vcpus == 1) {
2123 /*
2124 * As we only have one VCPU left, we want to enable the
2125 * IBS facility for that VCPU to speed it up.
2126 */
2127 __enable_ibs_on_vcpu(started_vcpu);
2128 }
2129
David Hildenbrand433b9ee2014-05-06 16:11:14 +02002130 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002131 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01002132}
2133
Cornelia Huckd6712df2012-12-20 15:32:11 +01002134static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2135 struct kvm_enable_cap *cap)
2136{
2137 int r;
2138
2139 if (cap->flags)
2140 return -EINVAL;
2141
2142 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002143 case KVM_CAP_S390_CSS_SUPPORT:
2144 if (!vcpu->kvm->arch.css_support) {
2145 vcpu->kvm->arch.css_support = 1;
2146 trace_kvm_s390_enable_css(vcpu->kvm);
2147 }
2148 r = 0;
2149 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01002150 default:
2151 r = -EINVAL;
2152 break;
2153 }
2154 return r;
2155}
2156
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002157long kvm_arch_vcpu_ioctl(struct file *filp,
2158 unsigned int ioctl, unsigned long arg)
2159{
2160 struct kvm_vcpu *vcpu = filp->private_data;
2161 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02002162 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03002163 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002164
Avi Kivity937366242010-05-13 12:35:17 +03002165 switch (ioctl) {
2166 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002167 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02002168 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002169
Avi Kivity937366242010-05-13 12:35:17 +03002170 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002171 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03002172 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02002173 if (s390int_to_s390irq(&s390int, &s390irq))
2174 return -EINVAL;
2175 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03002176 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002177 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002178 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002179 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002180 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002181 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002182 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002183 case KVM_S390_SET_INITIAL_PSW: {
2184 psw_t psw;
2185
Avi Kivitybc923cc2010-05-13 12:21:46 +03002186 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002187 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002188 break;
2189 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2190 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002191 }
2192 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002193 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2194 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002195 case KVM_SET_ONE_REG:
2196 case KVM_GET_ONE_REG: {
2197 struct kvm_one_reg reg;
2198 r = -EFAULT;
2199 if (copy_from_user(&reg, argp, sizeof(reg)))
2200 break;
2201 if (ioctl == KVM_SET_ONE_REG)
2202 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2203 else
2204 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2205 break;
2206 }
Carsten Otte27e03932012-01-04 10:25:21 +01002207#ifdef CONFIG_KVM_S390_UCONTROL
2208 case KVM_S390_UCAS_MAP: {
2209 struct kvm_s390_ucas_mapping ucasmap;
2210
2211 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2212 r = -EFAULT;
2213 break;
2214 }
2215
2216 if (!kvm_is_ucontrol(vcpu->kvm)) {
2217 r = -EINVAL;
2218 break;
2219 }
2220
2221 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2222 ucasmap.vcpu_addr, ucasmap.length);
2223 break;
2224 }
2225 case KVM_S390_UCAS_UNMAP: {
2226 struct kvm_s390_ucas_mapping ucasmap;
2227
2228 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2229 r = -EFAULT;
2230 break;
2231 }
2232
2233 if (!kvm_is_ucontrol(vcpu->kvm)) {
2234 r = -EINVAL;
2235 break;
2236 }
2237
2238 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2239 ucasmap.length);
2240 break;
2241 }
2242#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002243 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002244 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002245 break;
2246 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002247 case KVM_ENABLE_CAP:
2248 {
2249 struct kvm_enable_cap cap;
2250 r = -EFAULT;
2251 if (copy_from_user(&cap, argp, sizeof(cap)))
2252 break;
2253 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2254 break;
2255 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002256 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002257 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002258 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002259 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002260}
2261
Carsten Otte5b1c1492012-01-04 10:25:23 +01002262int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2263{
2264#ifdef CONFIG_KVM_S390_UCONTROL
2265 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2266 && (kvm_is_ucontrol(vcpu->kvm))) {
2267 vmf->page = virt_to_page(vcpu->arch.sie_block);
2268 get_page(vmf->page);
2269 return 0;
2270 }
2271#endif
2272 return VM_FAULT_SIGBUS;
2273}
2274
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302275int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2276 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002277{
2278 return 0;
2279}
2280
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002281/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002282int kvm_arch_prepare_memory_region(struct kvm *kvm,
2283 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002284 struct kvm_userspace_memory_region *mem,
2285 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002286{
Nick Wangdd2887e2013-03-25 17:22:57 +01002287 /* A few sanity checks. We can have memory slots which have to be
2288 located/ended at a segment boundary (1MB). The memory in userland is
2289 ok to be fragmented into various different vmas. It is okay to mmap()
2290 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002291
Carsten Otte598841c2011-07-24 10:48:21 +02002292 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002293 return -EINVAL;
2294
Carsten Otte598841c2011-07-24 10:48:21 +02002295 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002296 return -EINVAL;
2297
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002298 return 0;
2299}
2300
2301void kvm_arch_commit_memory_region(struct kvm *kvm,
2302 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002303 const struct kvm_memory_slot *old,
2304 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002305{
Carsten Ottef7850c92011-07-24 10:48:23 +02002306 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002307
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002308 /* If the basics of the memslot do not change, we do not want
2309 * to update the gmap. Every update causes several unnecessary
2310 * segment translation exceptions. This is usually handled just
2311 * fine by the normal fault handler + gmap, but it will also
2312 * cause faults on the prefix page of running guest CPUs.
2313 */
2314 if (old->userspace_addr == mem->userspace_addr &&
2315 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2316 old->npages * PAGE_SIZE == mem->memory_size)
2317 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002318
2319 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2320 mem->guest_phys_addr, mem->memory_size);
2321 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02002322 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002323 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002324}
2325
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002326static int __init kvm_s390_init(void)
2327{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002328 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002329}
2330
2331static void __exit kvm_s390_exit(void)
2332{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002333 kvm_exit();
2334}
2335
2336module_init(kvm_s390_init);
2337module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002338
2339/*
2340 * Enable autoloading of the kvm module.
2341 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2342 * since x86 takes a different approach.
2343 */
2344#include <linux/miscdevice.h>
2345MODULE_ALIAS_MISCDEV(KVM_MINOR);
2346MODULE_ALIAS("devname:kvm");