blob: 8c538a1a23c1d8f1ce29f09a92734cf2123945c9 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020025#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010027#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010028#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010029#include <asm/lowcore.h>
30#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010031#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010032#include <asm/switch_to.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020033#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include "gaccess.h"
36
Cornelia Huck5786fff2012-07-23 17:20:29 +020037#define CREATE_TRACE_POINTS
38#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020039#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020040
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020045 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010050 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010053 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020054 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020055 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010056 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010057 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020060 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010061 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020068 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010069 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
70 { "instruction_spx", VCPU_STAT(instruction_spx) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
72 { "instruction_stap", VCPU_STAT(instruction_stap) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010074 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010075 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020077 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010078 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020080 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010081 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010082 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020083 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010084 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020085 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010087 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020088 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010090 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +020093 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010096 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010097 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020098 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010099 { NULL }
100};
101
Michael Mueller9d8d5782015-02-02 15:42:51 +0100102/* upper facilities limit for kvm */
103unsigned long kvm_s390_fac_list_mask[] = {
104 0xff82fffbf4fc2000UL,
105 0x005c000000000000UL,
106};
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100107
Michael Mueller9d8d5782015-02-02 15:42:51 +0100108unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200109{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100110 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
111 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200112}
113
Michael Mueller9d8d5782015-02-02 15:42:51 +0100114static struct gmap_notifier gmap_notifier;
115
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100116/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200117int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100118{
119 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200120 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121}
122
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200123static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
124
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100125int kvm_arch_hardware_setup(void)
126{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200127 gmap_notifier.notifier_call = kvm_gmap_notifier;
128 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100129 return 0;
130}
131
132void kvm_arch_hardware_unsetup(void)
133{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200134 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100135}
136
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100137int kvm_arch_init(void *opaque)
138{
Cornelia Huck84877d92014-09-02 10:27:35 +0100139 /* Register floating interrupt controller interface. */
140 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100141}
142
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100143/* Section: device related */
144long kvm_arch_dev_ioctl(struct file *filp,
145 unsigned int ioctl, unsigned long arg)
146{
147 if (ioctl == KVM_S390_ENABLE_SIE)
148 return s390_enable_sie();
149 return -EINVAL;
150}
151
Alexander Graf784aa3d2014-07-14 18:27:35 +0200152int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100153{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100154 int r;
155
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200156 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100157 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200158 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100159 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100160#ifdef CONFIG_KVM_S390_UCONTROL
161 case KVM_CAP_S390_UCONTROL:
162#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200163 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100164 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200165 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100166 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100167 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200168 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100169 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200170 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200171 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200172 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200173 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200174 case KVM_CAP_MP_STATE:
David Hildenbrand2444b352014-10-09 14:10:13 +0200175 case KVM_CAP_S390_USER_SIGP:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100176 r = 1;
177 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200178 case KVM_CAP_NR_VCPUS:
179 case KVM_CAP_MAX_VCPUS:
180 r = KVM_MAX_VCPUS;
181 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100182 case KVM_CAP_NR_MEMSLOTS:
183 r = KVM_USER_MEM_SLOTS;
184 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200185 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100186 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200187 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200188 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100189 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200190 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100191 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100192}
193
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400194static void kvm_s390_sync_dirty_log(struct kvm *kvm,
195 struct kvm_memory_slot *memslot)
196{
197 gfn_t cur_gfn, last_gfn;
198 unsigned long address;
199 struct gmap *gmap = kvm->arch.gmap;
200
201 down_read(&gmap->mm->mmap_sem);
202 /* Loop over all guest pages */
203 last_gfn = memslot->base_gfn + memslot->npages;
204 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
205 address = gfn_to_hva_memslot(memslot, cur_gfn);
206
207 if (gmap_test_and_clear_dirty(address, gmap))
208 mark_page_dirty(kvm, cur_gfn);
209 }
210 up_read(&gmap->mm->mmap_sem);
211}
212
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100213/* Section: vm related */
214/*
215 * Get (and clear) the dirty memory log for a memory slot.
216 */
217int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
218 struct kvm_dirty_log *log)
219{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400220 int r;
221 unsigned long n;
222 struct kvm_memory_slot *memslot;
223 int is_dirty = 0;
224
225 mutex_lock(&kvm->slots_lock);
226
227 r = -EINVAL;
228 if (log->slot >= KVM_USER_MEM_SLOTS)
229 goto out;
230
231 memslot = id_to_memslot(kvm->memslots, log->slot);
232 r = -ENOENT;
233 if (!memslot->dirty_bitmap)
234 goto out;
235
236 kvm_s390_sync_dirty_log(kvm, memslot);
237 r = kvm_get_dirty_log(kvm, log, &is_dirty);
238 if (r)
239 goto out;
240
241 /* Clear the dirty log */
242 if (is_dirty) {
243 n = kvm_dirty_bitmap_bytes(memslot);
244 memset(memslot->dirty_bitmap, 0, n);
245 }
246 r = 0;
247out:
248 mutex_unlock(&kvm->slots_lock);
249 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100250}
251
Cornelia Huckd938dc52013-10-23 18:26:34 +0200252static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
253{
254 int r;
255
256 if (cap->flags)
257 return -EINVAL;
258
259 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200260 case KVM_CAP_S390_IRQCHIP:
261 kvm->arch.use_irqchip = 1;
262 r = 0;
263 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200264 case KVM_CAP_S390_USER_SIGP:
265 kvm->arch.user_sigp = 1;
266 r = 0;
267 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200268 default:
269 r = -EINVAL;
270 break;
271 }
272 return r;
273}
274
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100275static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
276{
277 int ret;
278
279 switch (attr->attr) {
280 case KVM_S390_VM_MEM_LIMIT_SIZE:
281 ret = 0;
282 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
283 ret = -EFAULT;
284 break;
285 default:
286 ret = -ENXIO;
287 break;
288 }
289 return ret;
290}
291
292static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200293{
294 int ret;
295 unsigned int idx;
296 switch (attr->attr) {
297 case KVM_S390_VM_MEM_ENABLE_CMMA:
298 ret = -EBUSY;
299 mutex_lock(&kvm->lock);
300 if (atomic_read(&kvm->online_vcpus) == 0) {
301 kvm->arch.use_cmma = 1;
302 ret = 0;
303 }
304 mutex_unlock(&kvm->lock);
305 break;
306 case KVM_S390_VM_MEM_CLR_CMMA:
307 mutex_lock(&kvm->lock);
308 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200309 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200310 srcu_read_unlock(&kvm->srcu, idx);
311 mutex_unlock(&kvm->lock);
312 ret = 0;
313 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100314 case KVM_S390_VM_MEM_LIMIT_SIZE: {
315 unsigned long new_limit;
316
317 if (kvm_is_ucontrol(kvm))
318 return -EINVAL;
319
320 if (get_user(new_limit, (u64 __user *)attr->addr))
321 return -EFAULT;
322
323 if (new_limit > kvm->arch.gmap->asce_end)
324 return -E2BIG;
325
326 ret = -EBUSY;
327 mutex_lock(&kvm->lock);
328 if (atomic_read(&kvm->online_vcpus) == 0) {
329 /* gmap_alloc will round the limit up */
330 struct gmap *new = gmap_alloc(current->mm, new_limit);
331
332 if (!new) {
333 ret = -ENOMEM;
334 } else {
335 gmap_free(kvm->arch.gmap);
336 new->private = kvm;
337 kvm->arch.gmap = new;
338 ret = 0;
339 }
340 }
341 mutex_unlock(&kvm->lock);
342 break;
343 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200344 default:
345 ret = -ENXIO;
346 break;
347 }
348 return ret;
349}
350
Tony Krowiaka374e892014-09-03 10:13:53 +0200351static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
352
353static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
354{
355 struct kvm_vcpu *vcpu;
356 int i;
357
Michael Mueller9d8d5782015-02-02 15:42:51 +0100358 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200359 return -EINVAL;
360
361 mutex_lock(&kvm->lock);
362 switch (attr->attr) {
363 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
364 get_random_bytes(
365 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
366 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
367 kvm->arch.crypto.aes_kw = 1;
368 break;
369 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
370 get_random_bytes(
371 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
372 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
373 kvm->arch.crypto.dea_kw = 1;
374 break;
375 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
376 kvm->arch.crypto.aes_kw = 0;
377 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
378 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
379 break;
380 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
381 kvm->arch.crypto.dea_kw = 0;
382 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
383 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
384 break;
385 default:
386 mutex_unlock(&kvm->lock);
387 return -ENXIO;
388 }
389
390 kvm_for_each_vcpu(i, vcpu, kvm) {
391 kvm_s390_vcpu_crypto_setup(vcpu);
392 exit_sie(vcpu);
393 }
394 mutex_unlock(&kvm->lock);
395 return 0;
396}
397
Jason J. Herne72f25022014-11-25 09:46:02 -0500398static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
399{
400 u8 gtod_high;
401
402 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
403 sizeof(gtod_high)))
404 return -EFAULT;
405
406 if (gtod_high != 0)
407 return -EINVAL;
408
409 return 0;
410}
411
412static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
413{
414 struct kvm_vcpu *cur_vcpu;
415 unsigned int vcpu_idx;
416 u64 host_tod, gtod;
417 int r;
418
419 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
420 return -EFAULT;
421
422 r = store_tod_clock(&host_tod);
423 if (r)
424 return r;
425
426 mutex_lock(&kvm->lock);
427 kvm->arch.epoch = gtod - host_tod;
428 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
429 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
430 exit_sie(cur_vcpu);
431 }
432 mutex_unlock(&kvm->lock);
433 return 0;
434}
435
436static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
437{
438 int ret;
439
440 if (attr->flags)
441 return -EINVAL;
442
443 switch (attr->attr) {
444 case KVM_S390_VM_TOD_HIGH:
445 ret = kvm_s390_set_tod_high(kvm, attr);
446 break;
447 case KVM_S390_VM_TOD_LOW:
448 ret = kvm_s390_set_tod_low(kvm, attr);
449 break;
450 default:
451 ret = -ENXIO;
452 break;
453 }
454 return ret;
455}
456
457static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
458{
459 u8 gtod_high = 0;
460
461 if (copy_to_user((void __user *)attr->addr, &gtod_high,
462 sizeof(gtod_high)))
463 return -EFAULT;
464
465 return 0;
466}
467
468static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
469{
470 u64 host_tod, gtod;
471 int r;
472
473 r = store_tod_clock(&host_tod);
474 if (r)
475 return r;
476
477 gtod = host_tod + kvm->arch.epoch;
478 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
479 return -EFAULT;
480
481 return 0;
482}
483
484static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
485{
486 int ret;
487
488 if (attr->flags)
489 return -EINVAL;
490
491 switch (attr->attr) {
492 case KVM_S390_VM_TOD_HIGH:
493 ret = kvm_s390_get_tod_high(kvm, attr);
494 break;
495 case KVM_S390_VM_TOD_LOW:
496 ret = kvm_s390_get_tod_low(kvm, attr);
497 break;
498 default:
499 ret = -ENXIO;
500 break;
501 }
502 return ret;
503}
504
Dominik Dingelf2061652014-04-09 13:13:00 +0200505static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
506{
507 int ret;
508
509 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200510 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100511 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200512 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500513 case KVM_S390_VM_TOD:
514 ret = kvm_s390_set_tod(kvm, attr);
515 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200516 case KVM_S390_VM_CRYPTO:
517 ret = kvm_s390_vm_set_crypto(kvm, attr);
518 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200519 default:
520 ret = -ENXIO;
521 break;
522 }
523
524 return ret;
525}
526
527static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
528{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100529 int ret;
530
531 switch (attr->group) {
532 case KVM_S390_VM_MEM_CTRL:
533 ret = kvm_s390_get_mem_control(kvm, attr);
534 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500535 case KVM_S390_VM_TOD:
536 ret = kvm_s390_get_tod(kvm, attr);
537 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100538 default:
539 ret = -ENXIO;
540 break;
541 }
542
543 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +0200544}
545
546static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
547{
548 int ret;
549
550 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200551 case KVM_S390_VM_MEM_CTRL:
552 switch (attr->attr) {
553 case KVM_S390_VM_MEM_ENABLE_CMMA:
554 case KVM_S390_VM_MEM_CLR_CMMA:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100555 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200556 ret = 0;
557 break;
558 default:
559 ret = -ENXIO;
560 break;
561 }
562 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500563 case KVM_S390_VM_TOD:
564 switch (attr->attr) {
565 case KVM_S390_VM_TOD_LOW:
566 case KVM_S390_VM_TOD_HIGH:
567 ret = 0;
568 break;
569 default:
570 ret = -ENXIO;
571 break;
572 }
573 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200574 case KVM_S390_VM_CRYPTO:
575 switch (attr->attr) {
576 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
577 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
578 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
579 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
580 ret = 0;
581 break;
582 default:
583 ret = -ENXIO;
584 break;
585 }
586 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200587 default:
588 ret = -ENXIO;
589 break;
590 }
591
592 return ret;
593}
594
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100595long kvm_arch_vm_ioctl(struct file *filp,
596 unsigned int ioctl, unsigned long arg)
597{
598 struct kvm *kvm = filp->private_data;
599 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200600 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100601 int r;
602
603 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100604 case KVM_S390_INTERRUPT: {
605 struct kvm_s390_interrupt s390int;
606
607 r = -EFAULT;
608 if (copy_from_user(&s390int, argp, sizeof(s390int)))
609 break;
610 r = kvm_s390_inject_vm(kvm, &s390int);
611 break;
612 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200613 case KVM_ENABLE_CAP: {
614 struct kvm_enable_cap cap;
615 r = -EFAULT;
616 if (copy_from_user(&cap, argp, sizeof(cap)))
617 break;
618 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
619 break;
620 }
Cornelia Huck84223592013-07-15 13:36:01 +0200621 case KVM_CREATE_IRQCHIP: {
622 struct kvm_irq_routing_entry routing;
623
624 r = -EINVAL;
625 if (kvm->arch.use_irqchip) {
626 /* Set up dummy routing. */
627 memset(&routing, 0, sizeof(routing));
628 kvm_set_irq_routing(kvm, &routing, 0, 0);
629 r = 0;
630 }
631 break;
632 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200633 case KVM_SET_DEVICE_ATTR: {
634 r = -EFAULT;
635 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
636 break;
637 r = kvm_s390_vm_set_attr(kvm, &attr);
638 break;
639 }
640 case KVM_GET_DEVICE_ATTR: {
641 r = -EFAULT;
642 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
643 break;
644 r = kvm_s390_vm_get_attr(kvm, &attr);
645 break;
646 }
647 case KVM_HAS_DEVICE_ATTR: {
648 r = -EFAULT;
649 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
650 break;
651 r = kvm_s390_vm_has_attr(kvm, &attr);
652 break;
653 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100654 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300655 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100656 }
657
658 return r;
659}
660
Tony Krowiak45c9b472015-01-13 11:33:26 -0500661static int kvm_s390_query_ap_config(u8 *config)
662{
663 u32 fcn_code = 0x04000000UL;
664 u32 cc;
665
666 asm volatile(
667 "lgr 0,%1\n"
668 "lgr 2,%2\n"
669 ".long 0xb2af0000\n" /* PQAP(QCI) */
670 "ipm %0\n"
671 "srl %0,28\n"
672 : "=r" (cc)
673 : "r" (fcn_code), "r" (config)
674 : "cc", "0", "2", "memory"
675 );
676
677 return cc;
678}
679
680static int kvm_s390_apxa_installed(void)
681{
682 u8 config[128];
683 int cc;
684
685 if (test_facility(2) && test_facility(12)) {
686 cc = kvm_s390_query_ap_config(config);
687
688 if (cc)
689 pr_err("PQAP(QCI) failed with cc=%d", cc);
690 else
691 return config[0] & 0x40;
692 }
693
694 return 0;
695}
696
697static void kvm_s390_set_crycb_format(struct kvm *kvm)
698{
699 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
700
701 if (kvm_s390_apxa_installed())
702 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
703 else
704 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
705}
706
Michael Mueller9d8d5782015-02-02 15:42:51 +0100707static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
708{
709 get_cpu_id(cpu_id);
710 cpu_id->version = 0xff;
711}
712
Tony Krowiak5102ee82014-06-27 14:46:01 -0400713static int kvm_s390_crypto_init(struct kvm *kvm)
714{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100715 if (!test_kvm_facility(kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -0400716 return 0;
717
718 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
719 GFP_KERNEL | GFP_DMA);
720 if (!kvm->arch.crypto.crycb)
721 return -ENOMEM;
722
Tony Krowiak45c9b472015-01-13 11:33:26 -0500723 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400724
Tony Krowiaka374e892014-09-03 10:13:53 +0200725 /* Disable AES/DEA protected key functions by default */
726 kvm->arch.crypto.aes_kw = 0;
727 kvm->arch.crypto.dea_kw = 0;
728
Tony Krowiak5102ee82014-06-27 14:46:01 -0400729 return 0;
730}
731
Carsten Ottee08b9632012-01-04 10:25:20 +0100732int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100733{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100734 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100735 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100736 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100737
Carsten Ottee08b9632012-01-04 10:25:20 +0100738 rc = -EINVAL;
739#ifdef CONFIG_KVM_S390_UCONTROL
740 if (type & ~KVM_VM_S390_UCONTROL)
741 goto out_err;
742 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
743 goto out_err;
744#else
745 if (type)
746 goto out_err;
747#endif
748
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100749 rc = s390_enable_sie();
750 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100751 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100752
Carsten Otteb2904112011-10-18 12:27:13 +0200753 rc = -ENOMEM;
754
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100755 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
756 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100757 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100758 spin_lock(&kvm_lock);
759 sca_offset = (sca_offset + 16) & 0x7f0;
760 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
761 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100762
763 sprintf(debug_name, "kvm-%u", current->pid);
764
765 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
766 if (!kvm->arch.dbf)
767 goto out_nodbf;
768
Michael Mueller9d8d5782015-02-02 15:42:51 +0100769 /*
770 * The architectural maximum amount of facilities is 16 kbit. To store
771 * this amount, 2 kbyte of memory is required. Thus we need a full
772 * page to hold the active copy (arch.model.fac->sie) and the current
773 * facilities set (arch.model.fac->kvm). Its address size has to be
774 * 31 bits and word aligned.
775 */
776 kvm->arch.model.fac =
777 (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
778 if (!kvm->arch.model.fac)
779 goto out_nofac;
780
781 memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
782 S390_ARCH_FAC_LIST_SIZE_U64);
783
784 /*
785 * Apply the kvm facility mask to limit the kvm supported/tolerated
786 * facility list.
787 */
788 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
789 if (i < kvm_s390_fac_list_mask_size())
790 kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
791 else
792 kvm->arch.model.fac->kvm[i] = 0UL;
793 }
794
795 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
796
Tony Krowiak5102ee82014-06-27 14:46:01 -0400797 if (kvm_s390_crypto_init(kvm) < 0)
798 goto out_crypto;
799
Carsten Otteba5c1e92008-03-25 18:47:26 +0100800 spin_lock_init(&kvm->arch.float_int.lock);
801 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100802 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +0200803 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100804
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100805 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
806 VM_EVENT(kvm, 3, "%s", "vm created");
807
Carsten Ottee08b9632012-01-04 10:25:20 +0100808 if (type & KVM_VM_S390_UCONTROL) {
809 kvm->arch.gmap = NULL;
810 } else {
Christian Borntraeger03499852014-08-25 12:38:57 +0200811 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +0100812 if (!kvm->arch.gmap)
813 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200814 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200815 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100816 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100817
818 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200819 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -0500820 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100821
David Hildenbrand8ad35752014-03-14 11:00:21 +0100822 spin_lock_init(&kvm->arch.start_stop_lock);
823
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100824 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200825out_nogmap:
Tony Krowiak5102ee82014-06-27 14:46:01 -0400826 kfree(kvm->arch.crypto.crycb);
827out_crypto:
Michael Mueller9d8d5782015-02-02 15:42:51 +0100828 free_page((unsigned long)kvm->arch.model.fac);
829out_nofac:
Carsten Otte598841c2011-07-24 10:48:21 +0200830 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100831out_nodbf:
832 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100833out_err:
834 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100835}
836
Christian Borntraegerd329c032008-11-26 14:50:27 +0100837void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
838{
839 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200840 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100841 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200842 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100843 if (!kvm_is_ucontrol(vcpu->kvm)) {
844 clear_bit(63 - vcpu->vcpu_id,
845 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
846 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
847 (__u64) vcpu->arch.sie_block)
848 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
849 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200850 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100851
852 if (kvm_is_ucontrol(vcpu->kvm))
853 gmap_free(vcpu->arch.gmap);
854
Dominik Dingelb31605c2014-03-25 13:47:11 +0100855 if (kvm_s390_cmma_enabled(vcpu->kvm))
856 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100857 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200858
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100859 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200860 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100861}
862
863static void kvm_free_vcpus(struct kvm *kvm)
864{
865 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300866 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100867
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300868 kvm_for_each_vcpu(i, vcpu, kvm)
869 kvm_arch_vcpu_destroy(vcpu);
870
871 mutex_lock(&kvm->lock);
872 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
873 kvm->vcpus[i] = NULL;
874
875 atomic_set(&kvm->online_vcpus, 0);
876 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100877}
878
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100879void kvm_arch_destroy_vm(struct kvm *kvm)
880{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100881 kvm_free_vcpus(kvm);
Michael Mueller9d8d5782015-02-02 15:42:51 +0100882 free_page((unsigned long)kvm->arch.model.fac);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100883 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100884 debug_unregister(kvm->arch.dbf);
Tony Krowiak5102ee82014-06-27 14:46:01 -0400885 kfree(kvm->arch.crypto.crycb);
Carsten Otte27e03932012-01-04 10:25:21 +0100886 if (!kvm_is_ucontrol(kvm))
887 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200888 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100889 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100890}
891
892/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +0100893static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
894{
895 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
896 if (!vcpu->arch.gmap)
897 return -ENOMEM;
898 vcpu->arch.gmap->private = vcpu->kvm;
899
900 return 0;
901}
902
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100903int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
904{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200905 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
906 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100907 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
908 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100909 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +0200910 KVM_SYNC_CRS |
911 KVM_SYNC_ARCH0 |
912 KVM_SYNC_PFAULT;
Dominik Dingeldafd0322014-12-02 16:53:21 +0100913
914 if (kvm_is_ucontrol(vcpu->kvm))
915 return __kvm_ucontrol_vcpu_init(vcpu);
916
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100917 return 0;
918}
919
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100920void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
921{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200922 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
923 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100924 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200925 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
926 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100927 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200928 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100929 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100930}
931
932void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
933{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100934 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200935 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200936 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
937 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100938 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200939 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
940 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100941 restore_access_regs(vcpu->arch.host_acrs);
942}
943
944static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
945{
946 /* this equals initial cpu reset in pop, but we don't switch to ESA */
947 vcpu->arch.sie_block->gpsw.mask = 0UL;
948 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100949 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100950 vcpu->arch.sie_block->cputm = 0UL;
951 vcpu->arch.sie_block->ckc = 0UL;
952 vcpu->arch.sie_block->todpr = 0;
953 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
954 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
955 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
956 vcpu->arch.guest_fpregs.fpc = 0;
957 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
958 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100959 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200960 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
961 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200962 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
963 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100964 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100965}
966
Dominik Dingel31928aa2014-12-04 15:47:07 +0100967void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200968{
Jason J. Herne72f25022014-11-25 09:46:02 -0500969 mutex_lock(&vcpu->kvm->lock);
970 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
971 mutex_unlock(&vcpu->kvm->lock);
Dominik Dingeldafd0322014-12-02 16:53:21 +0100972 if (!kvm_is_ucontrol(vcpu->kvm))
973 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200974}
975
Tony Krowiak5102ee82014-06-27 14:46:01 -0400976static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
977{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100978 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -0400979 return;
980
Tony Krowiaka374e892014-09-03 10:13:53 +0200981 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
982
983 if (vcpu->kvm->arch.crypto.aes_kw)
984 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
985 if (vcpu->kvm->arch.crypto.dea_kw)
986 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
987
Tony Krowiak5102ee82014-06-27 14:46:01 -0400988 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
989}
990
Dominik Dingelb31605c2014-03-25 13:47:11 +0100991void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
992{
993 free_page(vcpu->arch.sie_block->cbrlo);
994 vcpu->arch.sie_block->cbrlo = 0;
995}
996
997int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
998{
999 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1000 if (!vcpu->arch.sie_block->cbrlo)
1001 return -ENOMEM;
1002
1003 vcpu->arch.sie_block->ecb2 |= 0x80;
1004 vcpu->arch.sie_block->ecb2 &= ~0x08;
1005 return 0;
1006}
1007
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001008int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1009{
Dominik Dingelb31605c2014-03-25 13:47:11 +01001010 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001011
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001012 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1013 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001014 CPUSTAT_STOPPED |
1015 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +02001016 vcpu->arch.sie_block->ecb = 6;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001017 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001018 vcpu->arch.sie_block->ecb |= 0x10;
1019
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +02001020 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001021 vcpu->arch.sie_block->eca = 0xC1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +01001022 if (sclp_has_siif())
1023 vcpu->arch.sie_block->eca |= 1;
David Hildenbrandea5f4962014-10-14 15:29:30 +02001024 if (sclp_has_sigpif())
1025 vcpu->arch.sie_block->eca |= 0x10000000U;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05001026 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
1027 ICTL_TPROT;
1028
Dominik Dingelb31605c2014-03-25 13:47:11 +01001029 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1030 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1031 if (rc)
1032 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001033 }
David Hildenbrand0ac96ca2014-12-12 15:17:31 +01001034 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02001035 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001036
1037 vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
1038 memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
1039 S390_ARCH_FAC_LIST_SIZE_BYTE);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001040
1041 kvm_s390_vcpu_crypto_setup(vcpu);
1042
Dominik Dingelb31605c2014-03-25 13:47:11 +01001043 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001044}
1045
1046struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1047 unsigned int id)
1048{
Carsten Otte4d475552011-10-18 12:27:12 +02001049 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001050 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02001051 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001052
Carsten Otte4d475552011-10-18 12:27:12 +02001053 if (id >= KVM_MAX_VCPUS)
1054 goto out;
1055
1056 rc = -ENOMEM;
1057
Michael Muellerb110fea2013-06-12 13:54:54 +02001058 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001059 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02001060 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001061
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001062 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1063 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001064 goto out_free_cpu;
1065
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001066 vcpu->arch.sie_block = &sie_page->sie_block;
1067 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1068
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001069 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +01001070 if (!kvm_is_ucontrol(kvm)) {
1071 if (!kvm->arch.sca) {
1072 WARN_ON_ONCE(1);
1073 goto out_free_cpu;
1074 }
1075 if (!kvm->arch.sca->cpu[id].sda)
1076 kvm->arch.sca->cpu[id].sda =
1077 (__u64) vcpu->arch.sie_block;
1078 vcpu->arch.sie_block->scaoh =
1079 (__u32)(((__u64)kvm->arch.sca) >> 32);
1080 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1081 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1082 }
Michael Mueller9d8d5782015-02-02 15:42:51 +01001083 vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001084
Carsten Otteba5c1e92008-03-25 18:47:26 +01001085 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001086 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02001087 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01001088 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001089
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001090 rc = kvm_vcpu_init(vcpu, kvm, id);
1091 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001092 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001093 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1094 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02001095 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001096
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001097 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08001098out_free_sie_block:
1099 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001100out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02001101 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02001102out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001103 return ERR_PTR(rc);
1104}
1105
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001106int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1107{
David Hildenbrand9a022062014-08-05 17:40:47 +02001108 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001109}
1110
Christian Borntraeger49b99e12013-05-17 14:41:35 +02001111void s390_vcpu_block(struct kvm_vcpu *vcpu)
1112{
1113 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1114}
1115
1116void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1117{
1118 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1119}
1120
1121/*
1122 * Kick a guest cpu out of SIE and wait until SIE is not running.
1123 * If the CPU is not running (e.g. waiting as idle) the function will
1124 * return immediately. */
1125void exit_sie(struct kvm_vcpu *vcpu)
1126{
1127 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1128 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1129 cpu_relax();
1130}
1131
1132/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1133void exit_sie_sync(struct kvm_vcpu *vcpu)
1134{
1135 s390_vcpu_block(vcpu);
1136 exit_sie(vcpu);
1137}
1138
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001139static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1140{
1141 int i;
1142 struct kvm *kvm = gmap->private;
1143 struct kvm_vcpu *vcpu;
1144
1145 kvm_for_each_vcpu(i, vcpu, kvm) {
1146 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +02001147 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001148 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1149 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1150 exit_sie_sync(vcpu);
1151 }
1152 }
1153}
1154
Christoffer Dallb6d33832012-03-08 16:44:24 -05001155int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1156{
1157 /* kvm common code refers to this, but never calls it */
1158 BUG();
1159 return 0;
1160}
1161
Carsten Otte14eebd92012-05-15 14:15:26 +02001162static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1163 struct kvm_one_reg *reg)
1164{
1165 int r = -EINVAL;
1166
1167 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001168 case KVM_REG_S390_TODPR:
1169 r = put_user(vcpu->arch.sie_block->todpr,
1170 (u32 __user *)reg->addr);
1171 break;
1172 case KVM_REG_S390_EPOCHDIFF:
1173 r = put_user(vcpu->arch.sie_block->epoch,
1174 (u64 __user *)reg->addr);
1175 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001176 case KVM_REG_S390_CPU_TIMER:
1177 r = put_user(vcpu->arch.sie_block->cputm,
1178 (u64 __user *)reg->addr);
1179 break;
1180 case KVM_REG_S390_CLOCK_COMP:
1181 r = put_user(vcpu->arch.sie_block->ckc,
1182 (u64 __user *)reg->addr);
1183 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001184 case KVM_REG_S390_PFTOKEN:
1185 r = put_user(vcpu->arch.pfault_token,
1186 (u64 __user *)reg->addr);
1187 break;
1188 case KVM_REG_S390_PFCOMPARE:
1189 r = put_user(vcpu->arch.pfault_compare,
1190 (u64 __user *)reg->addr);
1191 break;
1192 case KVM_REG_S390_PFSELECT:
1193 r = put_user(vcpu->arch.pfault_select,
1194 (u64 __user *)reg->addr);
1195 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001196 case KVM_REG_S390_PP:
1197 r = put_user(vcpu->arch.sie_block->pp,
1198 (u64 __user *)reg->addr);
1199 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001200 case KVM_REG_S390_GBEA:
1201 r = put_user(vcpu->arch.sie_block->gbea,
1202 (u64 __user *)reg->addr);
1203 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001204 default:
1205 break;
1206 }
1207
1208 return r;
1209}
1210
1211static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1212 struct kvm_one_reg *reg)
1213{
1214 int r = -EINVAL;
1215
1216 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02001217 case KVM_REG_S390_TODPR:
1218 r = get_user(vcpu->arch.sie_block->todpr,
1219 (u32 __user *)reg->addr);
1220 break;
1221 case KVM_REG_S390_EPOCHDIFF:
1222 r = get_user(vcpu->arch.sie_block->epoch,
1223 (u64 __user *)reg->addr);
1224 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02001225 case KVM_REG_S390_CPU_TIMER:
1226 r = get_user(vcpu->arch.sie_block->cputm,
1227 (u64 __user *)reg->addr);
1228 break;
1229 case KVM_REG_S390_CLOCK_COMP:
1230 r = get_user(vcpu->arch.sie_block->ckc,
1231 (u64 __user *)reg->addr);
1232 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02001233 case KVM_REG_S390_PFTOKEN:
1234 r = get_user(vcpu->arch.pfault_token,
1235 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001236 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1237 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02001238 break;
1239 case KVM_REG_S390_PFCOMPARE:
1240 r = get_user(vcpu->arch.pfault_compare,
1241 (u64 __user *)reg->addr);
1242 break;
1243 case KVM_REG_S390_PFSELECT:
1244 r = get_user(vcpu->arch.pfault_select,
1245 (u64 __user *)reg->addr);
1246 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01001247 case KVM_REG_S390_PP:
1248 r = get_user(vcpu->arch.sie_block->pp,
1249 (u64 __user *)reg->addr);
1250 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01001251 case KVM_REG_S390_GBEA:
1252 r = get_user(vcpu->arch.sie_block->gbea,
1253 (u64 __user *)reg->addr);
1254 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001255 default:
1256 break;
1257 }
1258
1259 return r;
1260}
Christoffer Dallb6d33832012-03-08 16:44:24 -05001261
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001262static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1263{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001264 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001265 return 0;
1266}
1267
1268int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1269{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001270 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001271 return 0;
1272}
1273
1274int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1275{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001276 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001277 return 0;
1278}
1279
1280int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1281 struct kvm_sregs *sregs)
1282{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001283 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001284 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +01001285 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001286 return 0;
1287}
1288
1289int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1290 struct kvm_sregs *sregs)
1291{
Christian Borntraeger59674c12012-01-11 11:20:33 +01001292 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001293 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001294 return 0;
1295}
1296
1297int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1298{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001299 if (test_fp_ctl(fpu->fpc))
1300 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001301 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +02001302 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1303 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1304 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001305 return 0;
1306}
1307
1308int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1309{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001310 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1311 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001312 return 0;
1313}
1314
1315static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1316{
1317 int rc = 0;
1318
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02001319 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001320 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001321 else {
1322 vcpu->run->psw_mask = psw.mask;
1323 vcpu->run->psw_addr = psw.addr;
1324 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001325 return rc;
1326}
1327
1328int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1329 struct kvm_translation *tr)
1330{
1331 return -EINVAL; /* not implemented yet */
1332}
1333
David Hildenbrand27291e22014-01-23 12:26:52 +01001334#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1335 KVM_GUESTDBG_USE_HW_BP | \
1336 KVM_GUESTDBG_ENABLE)
1337
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001338int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1339 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001340{
David Hildenbrand27291e22014-01-23 12:26:52 +01001341 int rc = 0;
1342
1343 vcpu->guest_debug = 0;
1344 kvm_s390_clear_bp_data(vcpu);
1345
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02001346 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01001347 return -EINVAL;
1348
1349 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1350 vcpu->guest_debug = dbg->control;
1351 /* enforce guest PER */
1352 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1353
1354 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1355 rc = kvm_s390_import_bp_data(vcpu, dbg);
1356 } else {
1357 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1358 vcpu->arch.guestdbg.last_bp = 0;
1359 }
1360
1361 if (rc) {
1362 vcpu->guest_debug = 0;
1363 kvm_s390_clear_bp_data(vcpu);
1364 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1365 }
1366
1367 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001368}
1369
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001370int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1371 struct kvm_mp_state *mp_state)
1372{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001373 /* CHECK_STOP and LOAD are not supported yet */
1374 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1375 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001376}
1377
1378int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1379 struct kvm_mp_state *mp_state)
1380{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001381 int rc = 0;
1382
1383 /* user space knows about this interface - let it control the state */
1384 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1385
1386 switch (mp_state->mp_state) {
1387 case KVM_MP_STATE_STOPPED:
1388 kvm_s390_vcpu_stop(vcpu);
1389 break;
1390 case KVM_MP_STATE_OPERATING:
1391 kvm_s390_vcpu_start(vcpu);
1392 break;
1393 case KVM_MP_STATE_LOAD:
1394 case KVM_MP_STATE_CHECK_STOP:
1395 /* fall through - CHECK_STOP and LOAD are not supported yet */
1396 default:
1397 rc = -ENXIO;
1398 }
1399
1400 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001401}
1402
Dominik Dingelb31605c2014-03-25 13:47:11 +01001403bool kvm_s390_cmma_enabled(struct kvm *kvm)
1404{
1405 if (!MACHINE_IS_LPAR)
1406 return false;
1407 /* only enable for z10 and later */
1408 if (!MACHINE_HAS_EDAT1)
1409 return false;
1410 if (!kvm->arch.use_cmma)
1411 return false;
1412 return true;
1413}
1414
David Hildenbrand8ad35752014-03-14 11:00:21 +01001415static bool ibs_enabled(struct kvm_vcpu *vcpu)
1416{
1417 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1418}
1419
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001420static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1421{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001422retry:
1423 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001424 /*
1425 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1426 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1427 * This ensures that the ipte instruction for this request has
1428 * already finished. We might race against a second unmapper that
1429 * wants to set the blocking bit. Lets just retry the request loop.
1430 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001431 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001432 int rc;
1433 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001434 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001435 PAGE_SIZE * 2);
1436 if (rc)
1437 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001438 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001439 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001440
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001441 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1442 vcpu->arch.sie_block->ihcpu = 0xffff;
1443 goto retry;
1444 }
1445
David Hildenbrand8ad35752014-03-14 11:00:21 +01001446 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1447 if (!ibs_enabled(vcpu)) {
1448 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1449 atomic_set_mask(CPUSTAT_IBS,
1450 &vcpu->arch.sie_block->cpuflags);
1451 }
1452 goto retry;
1453 }
1454
1455 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1456 if (ibs_enabled(vcpu)) {
1457 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1458 atomic_clear_mask(CPUSTAT_IBS,
1459 &vcpu->arch.sie_block->cpuflags);
1460 }
1461 goto retry;
1462 }
1463
David Hildenbrand0759d062014-05-13 16:54:32 +02001464 /* nothing to do, just clear the request */
1465 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1466
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001467 return 0;
1468}
1469
Thomas Huthfa576c52014-05-06 17:20:16 +02001470/**
1471 * kvm_arch_fault_in_page - fault-in guest page if necessary
1472 * @vcpu: The corresponding virtual cpu
1473 * @gpa: Guest physical address
1474 * @writable: Whether the page should be writable or not
1475 *
1476 * Make sure that a guest page has been faulted-in on the host.
1477 *
1478 * Return: Zero on success, negative error code otherwise.
1479 */
1480long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001481{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001482 return gmap_fault(vcpu->arch.gmap, gpa,
1483 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001484}
1485
Dominik Dingel3c038e62013-10-07 17:11:48 +02001486static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1487 unsigned long token)
1488{
1489 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02001490 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001491
1492 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02001493 irq.u.ext.ext_params2 = token;
1494 irq.type = KVM_S390_INT_PFAULT_INIT;
1495 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02001496 } else {
1497 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02001498 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001499 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1500 }
1501}
1502
1503void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1504 struct kvm_async_pf *work)
1505{
1506 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1507 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1508}
1509
1510void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1511 struct kvm_async_pf *work)
1512{
1513 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1514 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1515}
1516
1517void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1518 struct kvm_async_pf *work)
1519{
1520 /* s390 will always inject the page directly */
1521}
1522
1523bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1524{
1525 /*
1526 * s390 will always inject the page directly,
1527 * but we still want check_async_completion to cleanup
1528 */
1529 return true;
1530}
1531
1532static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1533{
1534 hva_t hva;
1535 struct kvm_arch_async_pf arch;
1536 int rc;
1537
1538 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1539 return 0;
1540 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1541 vcpu->arch.pfault_compare)
1542 return 0;
1543 if (psw_extint_disabled(vcpu))
1544 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02001545 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001546 return 0;
1547 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1548 return 0;
1549 if (!vcpu->arch.gmap->pfault_enabled)
1550 return 0;
1551
Heiko Carstens81480cc2014-01-01 16:36:07 +01001552 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1553 hva += current->thread.gmap_addr & ~PAGE_MASK;
1554 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001555 return 0;
1556
1557 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1558 return rc;
1559}
1560
Thomas Huth3fb4c402013-09-12 10:33:43 +02001561static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001562{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001563 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001564
Dominik Dingel3c038e62013-10-07 17:11:48 +02001565 /*
1566 * On s390 notifications for arriving pages will be delivered directly
1567 * to the guest but the house keeping for completed pfaults is
1568 * handled outside the worker.
1569 */
1570 kvm_check_async_pf_completion(vcpu);
1571
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001572 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001573
1574 if (need_resched())
1575 schedule();
1576
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001577 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001578 s390_handle_mcck();
1579
Jens Freimann79395032014-04-17 10:10:30 +02001580 if (!kvm_is_ucontrol(vcpu->kvm)) {
1581 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1582 if (rc)
1583 return rc;
1584 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001585
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001586 rc = kvm_s390_handle_requests(vcpu);
1587 if (rc)
1588 return rc;
1589
David Hildenbrand27291e22014-01-23 12:26:52 +01001590 if (guestdbg_enabled(vcpu)) {
1591 kvm_s390_backup_guest_per_regs(vcpu);
1592 kvm_s390_patch_guest_per_regs(vcpu);
1593 }
1594
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001595 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001596 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1597 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1598 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001599
Thomas Huth3fb4c402013-09-12 10:33:43 +02001600 return 0;
1601}
1602
1603static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1604{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001605 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001606
1607 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1608 vcpu->arch.sie_block->icptcode);
1609 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1610
David Hildenbrand27291e22014-01-23 12:26:52 +01001611 if (guestdbg_enabled(vcpu))
1612 kvm_s390_restore_guest_per_regs(vcpu);
1613
Thomas Huth3fb4c402013-09-12 10:33:43 +02001614 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001615 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001616 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1617 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1618 vcpu->run->s390_ucontrol.trans_exc_code =
1619 current->thread.gmap_addr;
1620 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1621 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001622
1623 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001624 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001625 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001626 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001627 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001628 } else {
1629 gpa_t gpa = current->thread.gmap_addr;
1630 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1631 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001632 }
1633
1634 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001635 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1636 trace_kvm_s390_sie_fault(vcpu);
1637 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001638 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001639
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001640 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001641
Thomas Hutha76ccff2013-09-12 10:33:44 +02001642 if (rc == 0) {
1643 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001644 /* Don't exit for host interrupts. */
1645 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001646 else
1647 rc = kvm_handle_sie_intercept(vcpu);
1648 }
1649
Thomas Huth3fb4c402013-09-12 10:33:43 +02001650 return rc;
1651}
1652
1653static int __vcpu_run(struct kvm_vcpu *vcpu)
1654{
1655 int rc, exit_reason;
1656
Thomas Huth800c1062013-09-12 10:33:45 +02001657 /*
1658 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1659 * ning the guest), so that memslots (and other stuff) are protected
1660 */
1661 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1662
Thomas Hutha76ccff2013-09-12 10:33:44 +02001663 do {
1664 rc = vcpu_pre_run(vcpu);
1665 if (rc)
1666 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001667
Thomas Huth800c1062013-09-12 10:33:45 +02001668 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001669 /*
1670 * As PF_VCPU will be used in fault handler, between
1671 * guest_enter and guest_exit should be no uaccess.
1672 */
1673 preempt_disable();
1674 kvm_guest_enter();
1675 preempt_enable();
1676 exit_reason = sie64a(vcpu->arch.sie_block,
1677 vcpu->run->s.regs.gprs);
1678 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001679 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001680
Thomas Hutha76ccff2013-09-12 10:33:44 +02001681 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001682 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001683
Thomas Huth800c1062013-09-12 10:33:45 +02001684 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001685 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001686}
1687
David Hildenbrandb028ee32014-07-17 10:47:43 +02001688static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1689{
1690 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1691 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1692 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1693 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1694 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1695 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001696 /* some control register changes require a tlb flush */
1697 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001698 }
1699 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1700 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1701 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1702 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1703 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1704 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1705 }
1706 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1707 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1708 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1709 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02001710 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1711 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001712 }
1713 kvm_run->kvm_dirty_regs = 0;
1714}
1715
1716static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1717{
1718 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1719 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1720 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1721 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1722 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1723 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1724 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1725 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1726 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1727 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1728 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1729 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1730}
1731
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001732int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1733{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001734 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001735 sigset_t sigsaved;
1736
David Hildenbrand27291e22014-01-23 12:26:52 +01001737 if (guestdbg_exit_pending(vcpu)) {
1738 kvm_s390_prepare_debug_exit(vcpu);
1739 return 0;
1740 }
1741
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001742 if (vcpu->sigset_active)
1743 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1744
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001745 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1746 kvm_s390_vcpu_start(vcpu);
1747 } else if (is_vcpu_stopped(vcpu)) {
1748 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1749 vcpu->vcpu_id);
1750 return -EINVAL;
1751 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001752
David Hildenbrandb028ee32014-07-17 10:47:43 +02001753 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001754
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001755 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001756 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001757
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001758 if (signal_pending(current) && !rc) {
1759 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001760 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001761 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001762
David Hildenbrand27291e22014-01-23 12:26:52 +01001763 if (guestdbg_exit_pending(vcpu) && !rc) {
1764 kvm_s390_prepare_debug_exit(vcpu);
1765 rc = 0;
1766 }
1767
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001768 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001769 /* intercept cannot be handled in-kernel, prepare kvm-run */
1770 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1771 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001772 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1773 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1774 rc = 0;
1775 }
1776
1777 if (rc == -EREMOTE) {
1778 /* intercept was handled, but userspace support is needed
1779 * kvm_run has been prepared by the handler */
1780 rc = 0;
1781 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001782
David Hildenbrandb028ee32014-07-17 10:47:43 +02001783 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001784
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001785 if (vcpu->sigset_active)
1786 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1787
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001788 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001789 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001790}
1791
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001792/*
1793 * store status at address
1794 * we use have two special cases:
1795 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1796 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1797 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001798int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001799{
Carsten Otte092670c2011-07-24 10:48:22 +02001800 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001801 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001802 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001803 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001804
Heiko Carstensd0bce602014-01-01 16:45:58 +01001805 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1806 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001807 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001808 gpa = SAVE_AREA_BASE;
1809 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1810 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001811 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001812 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1813 }
1814 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1815 vcpu->arch.guest_fpregs.fprs, 128);
1816 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1817 vcpu->run->s.regs.gprs, 128);
1818 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1819 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001820 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001821 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001822 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001823 rc |= write_guest_abs(vcpu,
1824 gpa + offsetof(struct save_area, fp_ctrl_reg),
1825 &vcpu->arch.guest_fpregs.fpc, 4);
1826 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1827 &vcpu->arch.sie_block->todpr, 4);
1828 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1829 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001830 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001831 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1832 &clkcomp, 8);
1833 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1834 &vcpu->run->s.regs.acrs, 64);
1835 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1836 &vcpu->arch.sie_block->gcr, 128);
1837 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001838}
1839
Thomas Huthe8798922013-11-06 15:46:33 +01001840int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1841{
1842 /*
1843 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1844 * copying in vcpu load/put. Lets update our copies before we save
1845 * it into the save area
1846 */
1847 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1848 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1849 save_access_regs(vcpu->run->s.regs.acrs);
1850
1851 return kvm_s390_store_status_unloaded(vcpu, addr);
1852}
1853
David Hildenbrand8ad35752014-03-14 11:00:21 +01001854static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1855{
1856 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1857 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1858 exit_sie_sync(vcpu);
1859}
1860
1861static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1862{
1863 unsigned int i;
1864 struct kvm_vcpu *vcpu;
1865
1866 kvm_for_each_vcpu(i, vcpu, kvm) {
1867 __disable_ibs_on_vcpu(vcpu);
1868 }
1869}
1870
1871static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1872{
1873 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1874 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1875 exit_sie_sync(vcpu);
1876}
1877
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001878void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1879{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001880 int i, online_vcpus, started_vcpus = 0;
1881
1882 if (!is_vcpu_stopped(vcpu))
1883 return;
1884
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001885 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001886 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001887 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001888 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1889
1890 for (i = 0; i < online_vcpus; i++) {
1891 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1892 started_vcpus++;
1893 }
1894
1895 if (started_vcpus == 0) {
1896 /* we're the only active VCPU -> speed it up */
1897 __enable_ibs_on_vcpu(vcpu);
1898 } else if (started_vcpus == 1) {
1899 /*
1900 * As we are starting a second VCPU, we have to disable
1901 * the IBS facility on all VCPUs to remove potentially
1902 * oustanding ENABLE requests.
1903 */
1904 __disable_ibs_on_all_vcpus(vcpu->kvm);
1905 }
1906
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001907 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001908 /*
1909 * Another VCPU might have used IBS while we were offline.
1910 * Let's play safe and flush the VCPU at startup.
1911 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001912 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001913 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001914 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001915}
1916
1917void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1918{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001919 int i, online_vcpus, started_vcpus = 0;
1920 struct kvm_vcpu *started_vcpu = NULL;
1921
1922 if (is_vcpu_stopped(vcpu))
1923 return;
1924
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001925 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001926 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001927 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001928 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1929
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001930 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02001931 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001932
David Hildenbrand6cddd432014-10-15 16:48:53 +02001933 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001934 __disable_ibs_on_vcpu(vcpu);
1935
1936 for (i = 0; i < online_vcpus; i++) {
1937 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1938 started_vcpus++;
1939 started_vcpu = vcpu->kvm->vcpus[i];
1940 }
1941 }
1942
1943 if (started_vcpus == 1) {
1944 /*
1945 * As we only have one VCPU left, we want to enable the
1946 * IBS facility for that VCPU to speed it up.
1947 */
1948 __enable_ibs_on_vcpu(started_vcpu);
1949 }
1950
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001951 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001952 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001953}
1954
Cornelia Huckd6712df2012-12-20 15:32:11 +01001955static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1956 struct kvm_enable_cap *cap)
1957{
1958 int r;
1959
1960 if (cap->flags)
1961 return -EINVAL;
1962
1963 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001964 case KVM_CAP_S390_CSS_SUPPORT:
1965 if (!vcpu->kvm->arch.css_support) {
1966 vcpu->kvm->arch.css_support = 1;
1967 trace_kvm_s390_enable_css(vcpu->kvm);
1968 }
1969 r = 0;
1970 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001971 default:
1972 r = -EINVAL;
1973 break;
1974 }
1975 return r;
1976}
1977
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001978long kvm_arch_vcpu_ioctl(struct file *filp,
1979 unsigned int ioctl, unsigned long arg)
1980{
1981 struct kvm_vcpu *vcpu = filp->private_data;
1982 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001983 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001984 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001985
Avi Kivity937366242010-05-13 12:35:17 +03001986 switch (ioctl) {
1987 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001988 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02001989 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001990
Avi Kivity937366242010-05-13 12:35:17 +03001991 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001992 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03001993 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02001994 if (s390int_to_s390irq(&s390int, &s390irq))
1995 return -EINVAL;
1996 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity937366242010-05-13 12:35:17 +03001997 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001998 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001999 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02002000 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002001 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02002002 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03002003 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002004 case KVM_S390_SET_INITIAL_PSW: {
2005 psw_t psw;
2006
Avi Kivitybc923cc2010-05-13 12:21:46 +03002007 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002008 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03002009 break;
2010 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2011 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002012 }
2013 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03002014 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2015 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002016 case KVM_SET_ONE_REG:
2017 case KVM_GET_ONE_REG: {
2018 struct kvm_one_reg reg;
2019 r = -EFAULT;
2020 if (copy_from_user(&reg, argp, sizeof(reg)))
2021 break;
2022 if (ioctl == KVM_SET_ONE_REG)
2023 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2024 else
2025 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2026 break;
2027 }
Carsten Otte27e03932012-01-04 10:25:21 +01002028#ifdef CONFIG_KVM_S390_UCONTROL
2029 case KVM_S390_UCAS_MAP: {
2030 struct kvm_s390_ucas_mapping ucasmap;
2031
2032 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2033 r = -EFAULT;
2034 break;
2035 }
2036
2037 if (!kvm_is_ucontrol(vcpu->kvm)) {
2038 r = -EINVAL;
2039 break;
2040 }
2041
2042 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2043 ucasmap.vcpu_addr, ucasmap.length);
2044 break;
2045 }
2046 case KVM_S390_UCAS_UNMAP: {
2047 struct kvm_s390_ucas_mapping ucasmap;
2048
2049 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2050 r = -EFAULT;
2051 break;
2052 }
2053
2054 if (!kvm_is_ucontrol(vcpu->kvm)) {
2055 r = -EINVAL;
2056 break;
2057 }
2058
2059 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2060 ucasmap.length);
2061 break;
2062 }
2063#endif
Carsten Otteccc79102012-01-04 10:25:26 +01002064 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002065 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01002066 break;
2067 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01002068 case KVM_ENABLE_CAP:
2069 {
2070 struct kvm_enable_cap cap;
2071 r = -EFAULT;
2072 if (copy_from_user(&cap, argp, sizeof(cap)))
2073 break;
2074 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2075 break;
2076 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002077 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01002078 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002079 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03002080 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002081}
2082
Carsten Otte5b1c1492012-01-04 10:25:23 +01002083int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2084{
2085#ifdef CONFIG_KVM_S390_UCONTROL
2086 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2087 && (kvm_is_ucontrol(vcpu->kvm))) {
2088 vmf->page = virt_to_page(vcpu->arch.sie_block);
2089 get_page(vmf->page);
2090 return 0;
2091 }
2092#endif
2093 return VM_FAULT_SIGBUS;
2094}
2095
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05302096int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2097 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09002098{
2099 return 0;
2100}
2101
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002102/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002103int kvm_arch_prepare_memory_region(struct kvm *kvm,
2104 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09002105 struct kvm_userspace_memory_region *mem,
2106 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002107{
Nick Wangdd2887e2013-03-25 17:22:57 +01002108 /* A few sanity checks. We can have memory slots which have to be
2109 located/ended at a segment boundary (1MB). The memory in userland is
2110 ok to be fragmented into various different vmas. It is okay to mmap()
2111 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002112
Carsten Otte598841c2011-07-24 10:48:21 +02002113 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002114 return -EINVAL;
2115
Carsten Otte598841c2011-07-24 10:48:21 +02002116 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002117 return -EINVAL;
2118
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002119 return 0;
2120}
2121
2122void kvm_arch_commit_memory_region(struct kvm *kvm,
2123 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09002124 const struct kvm_memory_slot *old,
2125 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002126{
Carsten Ottef7850c92011-07-24 10:48:23 +02002127 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02002128
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01002129 /* If the basics of the memslot do not change, we do not want
2130 * to update the gmap. Every update causes several unnecessary
2131 * segment translation exceptions. This is usually handled just
2132 * fine by the normal fault handler + gmap, but it will also
2133 * cause faults on the prefix page of running guest CPUs.
2134 */
2135 if (old->userspace_addr == mem->userspace_addr &&
2136 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2137 old->npages * PAGE_SIZE == mem->memory_size)
2138 return;
Carsten Otte598841c2011-07-24 10:48:21 +02002139
2140 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2141 mem->guest_phys_addr, mem->memory_size);
2142 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02002143 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02002144 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002145}
2146
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002147static int __init kvm_s390_init(void)
2148{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002149 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002150}
2151
2152static void __exit kvm_s390_exit(void)
2153{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002154 kvm_exit();
2155}
2156
2157module_init(kvm_s390_init);
2158module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02002159
2160/*
2161 * Enable autoloading of the kvm module.
2162 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2163 * since x86 takes a different approach.
2164 */
2165#include <linux/miscdevice.h>
2166MODULE_ALIAS_MISCDEV(KVM_MINOR);
2167MODULE_ALIAS("devname:kvm");