blob: 628e992eeded3c6fb04a534bf10ad4122dac8c6d [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010027#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <asm/lowcore.h>
29#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010030#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010031#include <asm/switch_to.h>
Michael Mueller78c4b592013-07-26 15:04:04 +020032#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020033#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include "gaccess.h"
36
Cornelia Huck5786fff2012-07-23 17:20:29 +020037#define CREATE_TRACE_POINTS
38#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020039#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020040
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020045 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010050 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020053 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010054 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010055 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010057 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020058 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020066 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010067 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010072 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010073 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020075 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010076 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020078 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010079 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010080 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020081 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010082 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010087 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010088 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020089 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010090 { NULL }
91};
92
Michael Mueller78c4b592013-07-26 15:04:04 +020093unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020094static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010095
Michael Mueller78c4b592013-07-26 15:04:04 +020096/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +010097int test_vfacility(unsigned long nr)
Michael Mueller78c4b592013-07-26 15:04:04 +020098{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100102/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200103int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100104{
105 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200106 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100107}
108
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200109static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
110
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100111int kvm_arch_hardware_setup(void)
112{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200113 gmap_notifier.notifier_call = kvm_gmap_notifier;
114 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100115 return 0;
116}
117
118void kvm_arch_hardware_unsetup(void)
119{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200120 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100121}
122
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123int kvm_arch_init(void *opaque)
124{
125 return 0;
126}
127
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100128/* Section: device related */
129long kvm_arch_dev_ioctl(struct file *filp,
130 unsigned int ioctl, unsigned long arg)
131{
132 if (ioctl == KVM_S390_ENABLE_SIE)
133 return s390_enable_sie();
134 return -EINVAL;
135}
136
Alexander Graf784aa3d2014-07-14 18:27:35 +0200137int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100138{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100139 int r;
140
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200141 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100142 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200143 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100144 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100145#ifdef CONFIG_KVM_S390_UCONTROL
146 case KVM_CAP_S390_UCONTROL:
147#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200148 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100149 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200150 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100151 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100152 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huckebc32262014-05-09 15:00:46 +0200153 case KVM_CAP_IRQFD:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100154 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200155 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200156 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200157 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200158 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200159 case KVM_CAP_MP_STATE:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100160 r = 1;
161 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200162 case KVM_CAP_NR_VCPUS:
163 case KVM_CAP_MAX_VCPUS:
164 r = KVM_MAX_VCPUS;
165 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100166 case KVM_CAP_NR_MEMSLOTS:
167 r = KVM_USER_MEM_SLOTS;
168 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200169 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100170 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200171 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200172 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100173 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200174 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100175 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100176}
177
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400178static void kvm_s390_sync_dirty_log(struct kvm *kvm,
179 struct kvm_memory_slot *memslot)
180{
181 gfn_t cur_gfn, last_gfn;
182 unsigned long address;
183 struct gmap *gmap = kvm->arch.gmap;
184
185 down_read(&gmap->mm->mmap_sem);
186 /* Loop over all guest pages */
187 last_gfn = memslot->base_gfn + memslot->npages;
188 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
189 address = gfn_to_hva_memslot(memslot, cur_gfn);
190
191 if (gmap_test_and_clear_dirty(address, gmap))
192 mark_page_dirty(kvm, cur_gfn);
193 }
194 up_read(&gmap->mm->mmap_sem);
195}
196
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100197/* Section: vm related */
198/*
199 * Get (and clear) the dirty memory log for a memory slot.
200 */
201int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
202 struct kvm_dirty_log *log)
203{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400204 int r;
205 unsigned long n;
206 struct kvm_memory_slot *memslot;
207 int is_dirty = 0;
208
209 mutex_lock(&kvm->slots_lock);
210
211 r = -EINVAL;
212 if (log->slot >= KVM_USER_MEM_SLOTS)
213 goto out;
214
215 memslot = id_to_memslot(kvm->memslots, log->slot);
216 r = -ENOENT;
217 if (!memslot->dirty_bitmap)
218 goto out;
219
220 kvm_s390_sync_dirty_log(kvm, memslot);
221 r = kvm_get_dirty_log(kvm, log, &is_dirty);
222 if (r)
223 goto out;
224
225 /* Clear the dirty log */
226 if (is_dirty) {
227 n = kvm_dirty_bitmap_bytes(memslot);
228 memset(memslot->dirty_bitmap, 0, n);
229 }
230 r = 0;
231out:
232 mutex_unlock(&kvm->slots_lock);
233 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100234}
235
Cornelia Huckd938dc52013-10-23 18:26:34 +0200236static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
237{
238 int r;
239
240 if (cap->flags)
241 return -EINVAL;
242
243 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200244 case KVM_CAP_S390_IRQCHIP:
245 kvm->arch.use_irqchip = 1;
246 r = 0;
247 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200248 default:
249 r = -EINVAL;
250 break;
251 }
252 return r;
253}
254
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200255static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
256{
257 int ret;
258 unsigned int idx;
259 switch (attr->attr) {
260 case KVM_S390_VM_MEM_ENABLE_CMMA:
261 ret = -EBUSY;
262 mutex_lock(&kvm->lock);
263 if (atomic_read(&kvm->online_vcpus) == 0) {
264 kvm->arch.use_cmma = 1;
265 ret = 0;
266 }
267 mutex_unlock(&kvm->lock);
268 break;
269 case KVM_S390_VM_MEM_CLR_CMMA:
270 mutex_lock(&kvm->lock);
271 idx = srcu_read_lock(&kvm->srcu);
272 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
273 srcu_read_unlock(&kvm->srcu, idx);
274 mutex_unlock(&kvm->lock);
275 ret = 0;
276 break;
277 default:
278 ret = -ENXIO;
279 break;
280 }
281 return ret;
282}
283
Dominik Dingelf2061652014-04-09 13:13:00 +0200284static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
285{
286 int ret;
287
288 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200289 case KVM_S390_VM_MEM_CTRL:
290 ret = kvm_s390_mem_control(kvm, attr);
291 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200292 default:
293 ret = -ENXIO;
294 break;
295 }
296
297 return ret;
298}
299
300static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
301{
302 return -ENXIO;
303}
304
305static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
306{
307 int ret;
308
309 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200310 case KVM_S390_VM_MEM_CTRL:
311 switch (attr->attr) {
312 case KVM_S390_VM_MEM_ENABLE_CMMA:
313 case KVM_S390_VM_MEM_CLR_CMMA:
314 ret = 0;
315 break;
316 default:
317 ret = -ENXIO;
318 break;
319 }
320 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200321 default:
322 ret = -ENXIO;
323 break;
324 }
325
326 return ret;
327}
328
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100329long kvm_arch_vm_ioctl(struct file *filp,
330 unsigned int ioctl, unsigned long arg)
331{
332 struct kvm *kvm = filp->private_data;
333 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200334 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100335 int r;
336
337 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100338 case KVM_S390_INTERRUPT: {
339 struct kvm_s390_interrupt s390int;
340
341 r = -EFAULT;
342 if (copy_from_user(&s390int, argp, sizeof(s390int)))
343 break;
344 r = kvm_s390_inject_vm(kvm, &s390int);
345 break;
346 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200347 case KVM_ENABLE_CAP: {
348 struct kvm_enable_cap cap;
349 r = -EFAULT;
350 if (copy_from_user(&cap, argp, sizeof(cap)))
351 break;
352 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
353 break;
354 }
Cornelia Huck84223592013-07-15 13:36:01 +0200355 case KVM_CREATE_IRQCHIP: {
356 struct kvm_irq_routing_entry routing;
357
358 r = -EINVAL;
359 if (kvm->arch.use_irqchip) {
360 /* Set up dummy routing. */
361 memset(&routing, 0, sizeof(routing));
362 kvm_set_irq_routing(kvm, &routing, 0, 0);
363 r = 0;
364 }
365 break;
366 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200367 case KVM_SET_DEVICE_ATTR: {
368 r = -EFAULT;
369 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
370 break;
371 r = kvm_s390_vm_set_attr(kvm, &attr);
372 break;
373 }
374 case KVM_GET_DEVICE_ATTR: {
375 r = -EFAULT;
376 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
377 break;
378 r = kvm_s390_vm_get_attr(kvm, &attr);
379 break;
380 }
381 case KVM_HAS_DEVICE_ATTR: {
382 r = -EFAULT;
383 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
384 break;
385 r = kvm_s390_vm_has_attr(kvm, &attr);
386 break;
387 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100388 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300389 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100390 }
391
392 return r;
393}
394
Carsten Ottee08b9632012-01-04 10:25:20 +0100395int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100396{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100397 int rc;
398 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100399 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100400
Carsten Ottee08b9632012-01-04 10:25:20 +0100401 rc = -EINVAL;
402#ifdef CONFIG_KVM_S390_UCONTROL
403 if (type & ~KVM_VM_S390_UCONTROL)
404 goto out_err;
405 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
406 goto out_err;
407#else
408 if (type)
409 goto out_err;
410#endif
411
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100412 rc = s390_enable_sie();
413 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100414 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100415
Carsten Otteb2904112011-10-18 12:27:13 +0200416 rc = -ENOMEM;
417
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100418 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
419 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100420 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100421 spin_lock(&kvm_lock);
422 sca_offset = (sca_offset + 16) & 0x7f0;
423 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
424 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100425
426 sprintf(debug_name, "kvm-%u", current->pid);
427
428 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
429 if (!kvm->arch.dbf)
430 goto out_nodbf;
431
Carsten Otteba5c1e92008-03-25 18:47:26 +0100432 spin_lock_init(&kvm->arch.float_int.lock);
433 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100434 init_waitqueue_head(&kvm->arch.ipte_wq);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100435
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100436 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
437 VM_EVENT(kvm, 3, "%s", "vm created");
438
Carsten Ottee08b9632012-01-04 10:25:20 +0100439 if (type & KVM_VM_S390_UCONTROL) {
440 kvm->arch.gmap = NULL;
441 } else {
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200442 kvm->arch.gmap = gmap_alloc(current->mm, -1UL);
Carsten Ottee08b9632012-01-04 10:25:20 +0100443 if (!kvm->arch.gmap)
444 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200445 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200446 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100447 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100448
449 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200450 kvm->arch.use_irqchip = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100451
David Hildenbrand8ad35752014-03-14 11:00:21 +0100452 spin_lock_init(&kvm->arch.start_stop_lock);
453
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100454 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200455out_nogmap:
456 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100457out_nodbf:
458 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100459out_err:
460 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100461}
462
Christian Borntraegerd329c032008-11-26 14:50:27 +0100463void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
464{
465 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200466 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100467 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200468 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100469 if (!kvm_is_ucontrol(vcpu->kvm)) {
470 clear_bit(63 - vcpu->vcpu_id,
471 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
472 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
473 (__u64) vcpu->arch.sie_block)
474 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
475 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200476 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100477
478 if (kvm_is_ucontrol(vcpu->kvm))
479 gmap_free(vcpu->arch.gmap);
480
Dominik Dingelb31605c2014-03-25 13:47:11 +0100481 if (kvm_s390_cmma_enabled(vcpu->kvm))
482 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100483 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200484
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100485 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200486 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100487}
488
489static void kvm_free_vcpus(struct kvm *kvm)
490{
491 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300492 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100493
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300494 kvm_for_each_vcpu(i, vcpu, kvm)
495 kvm_arch_vcpu_destroy(vcpu);
496
497 mutex_lock(&kvm->lock);
498 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
499 kvm->vcpus[i] = NULL;
500
501 atomic_set(&kvm->online_vcpus, 0);
502 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100503}
504
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100505void kvm_arch_destroy_vm(struct kvm *kvm)
506{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100507 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100508 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100509 debug_unregister(kvm->arch.dbf);
Carsten Otte27e03932012-01-04 10:25:21 +0100510 if (!kvm_is_ucontrol(kvm))
511 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200512 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +0100513 kvm_s390_clear_float_irqs(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100514}
515
516/* Section: vcpu related */
517int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
518{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200519 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
520 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100521 if (kvm_is_ucontrol(vcpu->kvm)) {
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200522 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
Carsten Otte27e03932012-01-04 10:25:21 +0100523 if (!vcpu->arch.gmap)
524 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200525 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100526 return 0;
527 }
528
Carsten Otte598841c2011-07-24 10:48:21 +0200529 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100530 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
531 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100532 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +0200533 KVM_SYNC_CRS |
534 KVM_SYNC_ARCH0 |
535 KVM_SYNC_PFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100536 return 0;
537}
538
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100539void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
540{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200541 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
542 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100543 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200544 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
545 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100546 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200547 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100548 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100549}
550
551void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
552{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100553 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200554 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200555 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
556 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100557 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200558 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
559 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100560 restore_access_regs(vcpu->arch.host_acrs);
561}
562
563static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
564{
565 /* this equals initial cpu reset in pop, but we don't switch to ESA */
566 vcpu->arch.sie_block->gpsw.mask = 0UL;
567 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100568 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100569 vcpu->arch.sie_block->cputm = 0UL;
570 vcpu->arch.sie_block->ckc = 0UL;
571 vcpu->arch.sie_block->todpr = 0;
572 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
573 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
574 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
575 vcpu->arch.guest_fpregs.fpc = 0;
576 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
577 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100578 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200579 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
580 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200581 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
582 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100583 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100584}
585
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200586int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
587{
588 return 0;
589}
590
Dominik Dingelb31605c2014-03-25 13:47:11 +0100591void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
592{
593 free_page(vcpu->arch.sie_block->cbrlo);
594 vcpu->arch.sie_block->cbrlo = 0;
595}
596
597int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
598{
599 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
600 if (!vcpu->arch.sie_block->cbrlo)
601 return -ENOMEM;
602
603 vcpu->arch.sie_block->ecb2 |= 0x80;
604 vcpu->arch.sie_block->ecb2 &= ~0x08;
605 return 0;
606}
607
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100608int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
609{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100610 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200611
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100612 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
613 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200614 CPUSTAT_STOPPED |
615 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200616 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200617 if (test_vfacility(50) && test_vfacility(73))
618 vcpu->arch.sie_block->ecb |= 0x10;
619
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200620 vcpu->arch.sie_block->ecb2 = 8;
David Hildenbrand49539192014-02-21 08:59:59 +0100621 vcpu->arch.sie_block->eca = 0xD1002000U;
Heiko Carstens217a4402013-12-30 12:54:14 +0100622 if (sclp_has_siif())
623 vcpu->arch.sie_block->eca |= 1;
Michael Mueller78c4b592013-07-26 15:04:04 +0200624 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Matthew Rosato5a5e6532013-01-29 11:48:20 -0500625 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
626 ICTL_TPROT;
627
Dominik Dingelb31605c2014-03-25 13:47:11 +0100628 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
629 rc = kvm_s390_vcpu_setup_cmma(vcpu);
630 if (rc)
631 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200632 }
Christian Borntraegerca872302009-05-12 17:21:49 +0200633 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
Christian Borntraegerca872302009-05-12 17:21:49 +0200634 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100635 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100636 vcpu->arch.cpu_id.version = 0xff;
Dominik Dingelb31605c2014-03-25 13:47:11 +0100637 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100638}
639
640struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
641 unsigned int id)
642{
Carsten Otte4d475552011-10-18 12:27:12 +0200643 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200644 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200645 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100646
Carsten Otte4d475552011-10-18 12:27:12 +0200647 if (id >= KVM_MAX_VCPUS)
648 goto out;
649
650 rc = -ENOMEM;
651
Michael Muellerb110fea2013-06-12 13:54:54 +0200652 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100653 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200654 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100655
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200656 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
657 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100658 goto out_free_cpu;
659
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200660 vcpu->arch.sie_block = &sie_page->sie_block;
661 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
662
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100663 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100664 if (!kvm_is_ucontrol(kvm)) {
665 if (!kvm->arch.sca) {
666 WARN_ON_ONCE(1);
667 goto out_free_cpu;
668 }
669 if (!kvm->arch.sca->cpu[id].sda)
670 kvm->arch.sca->cpu[id].sda =
671 (__u64) vcpu->arch.sie_block;
672 vcpu->arch.sie_block->scaoh =
673 (__u32)(((__u64)kvm->arch.sca) >> 32);
674 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
675 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
676 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100677
Carsten Otteba5c1e92008-03-25 18:47:26 +0100678 spin_lock_init(&vcpu->arch.local_int.lock);
679 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
680 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200681 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100682 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100683
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100684 rc = kvm_vcpu_init(vcpu, kvm, id);
685 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800686 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100687 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
688 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200689 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100690
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100691 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800692out_free_sie_block:
693 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100694out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200695 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200696out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100697 return ERR_PTR(rc);
698}
699
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100700int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
701{
Michael Muellerf87618e2014-02-26 16:14:17 +0100702 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100703}
704
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200705void s390_vcpu_block(struct kvm_vcpu *vcpu)
706{
707 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
708}
709
710void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
711{
712 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
713}
714
715/*
716 * Kick a guest cpu out of SIE and wait until SIE is not running.
717 * If the CPU is not running (e.g. waiting as idle) the function will
718 * return immediately. */
719void exit_sie(struct kvm_vcpu *vcpu)
720{
721 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
722 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
723 cpu_relax();
724}
725
726/* Kick a guest cpu out of SIE and prevent SIE-reentry */
727void exit_sie_sync(struct kvm_vcpu *vcpu)
728{
729 s390_vcpu_block(vcpu);
730 exit_sie(vcpu);
731}
732
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200733static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
734{
735 int i;
736 struct kvm *kvm = gmap->private;
737 struct kvm_vcpu *vcpu;
738
739 kvm_for_each_vcpu(i, vcpu, kvm) {
740 /* match against both prefix pages */
Michael Muellerfda902c2014-05-13 16:58:30 +0200741 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200742 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
743 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
744 exit_sie_sync(vcpu);
745 }
746 }
747}
748
Christoffer Dallb6d33832012-03-08 16:44:24 -0500749int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
750{
751 /* kvm common code refers to this, but never calls it */
752 BUG();
753 return 0;
754}
755
Carsten Otte14eebd92012-05-15 14:15:26 +0200756static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
757 struct kvm_one_reg *reg)
758{
759 int r = -EINVAL;
760
761 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200762 case KVM_REG_S390_TODPR:
763 r = put_user(vcpu->arch.sie_block->todpr,
764 (u32 __user *)reg->addr);
765 break;
766 case KVM_REG_S390_EPOCHDIFF:
767 r = put_user(vcpu->arch.sie_block->epoch,
768 (u64 __user *)reg->addr);
769 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200770 case KVM_REG_S390_CPU_TIMER:
771 r = put_user(vcpu->arch.sie_block->cputm,
772 (u64 __user *)reg->addr);
773 break;
774 case KVM_REG_S390_CLOCK_COMP:
775 r = put_user(vcpu->arch.sie_block->ckc,
776 (u64 __user *)reg->addr);
777 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200778 case KVM_REG_S390_PFTOKEN:
779 r = put_user(vcpu->arch.pfault_token,
780 (u64 __user *)reg->addr);
781 break;
782 case KVM_REG_S390_PFCOMPARE:
783 r = put_user(vcpu->arch.pfault_compare,
784 (u64 __user *)reg->addr);
785 break;
786 case KVM_REG_S390_PFSELECT:
787 r = put_user(vcpu->arch.pfault_select,
788 (u64 __user *)reg->addr);
789 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100790 case KVM_REG_S390_PP:
791 r = put_user(vcpu->arch.sie_block->pp,
792 (u64 __user *)reg->addr);
793 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100794 case KVM_REG_S390_GBEA:
795 r = put_user(vcpu->arch.sie_block->gbea,
796 (u64 __user *)reg->addr);
797 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200798 default:
799 break;
800 }
801
802 return r;
803}
804
805static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
806 struct kvm_one_reg *reg)
807{
808 int r = -EINVAL;
809
810 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200811 case KVM_REG_S390_TODPR:
812 r = get_user(vcpu->arch.sie_block->todpr,
813 (u32 __user *)reg->addr);
814 break;
815 case KVM_REG_S390_EPOCHDIFF:
816 r = get_user(vcpu->arch.sie_block->epoch,
817 (u64 __user *)reg->addr);
818 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200819 case KVM_REG_S390_CPU_TIMER:
820 r = get_user(vcpu->arch.sie_block->cputm,
821 (u64 __user *)reg->addr);
822 break;
823 case KVM_REG_S390_CLOCK_COMP:
824 r = get_user(vcpu->arch.sie_block->ckc,
825 (u64 __user *)reg->addr);
826 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200827 case KVM_REG_S390_PFTOKEN:
828 r = get_user(vcpu->arch.pfault_token,
829 (u64 __user *)reg->addr);
830 break;
831 case KVM_REG_S390_PFCOMPARE:
832 r = get_user(vcpu->arch.pfault_compare,
833 (u64 __user *)reg->addr);
834 break;
835 case KVM_REG_S390_PFSELECT:
836 r = get_user(vcpu->arch.pfault_select,
837 (u64 __user *)reg->addr);
838 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100839 case KVM_REG_S390_PP:
840 r = get_user(vcpu->arch.sie_block->pp,
841 (u64 __user *)reg->addr);
842 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100843 case KVM_REG_S390_GBEA:
844 r = get_user(vcpu->arch.sie_block->gbea,
845 (u64 __user *)reg->addr);
846 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200847 default:
848 break;
849 }
850
851 return r;
852}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500853
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100854static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
855{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100856 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100857 return 0;
858}
859
860int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
861{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100862 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100863 return 0;
864}
865
866int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
867{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100868 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100869 return 0;
870}
871
872int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
873 struct kvm_sregs *sregs)
874{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100875 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100876 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100877 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100878 return 0;
879}
880
881int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
882 struct kvm_sregs *sregs)
883{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100884 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100885 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100886 return 0;
887}
888
889int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
890{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200891 if (test_fp_ctl(fpu->fpc))
892 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100893 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200894 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
895 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
896 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100897 return 0;
898}
899
900int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
901{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100902 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
903 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100904 return 0;
905}
906
907static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
908{
909 int rc = 0;
910
David Hildenbrand7a42fdc2014-05-05 16:26:19 +0200911 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100912 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100913 else {
914 vcpu->run->psw_mask = psw.mask;
915 vcpu->run->psw_addr = psw.addr;
916 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100917 return rc;
918}
919
920int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
921 struct kvm_translation *tr)
922{
923 return -EINVAL; /* not implemented yet */
924}
925
David Hildenbrand27291e22014-01-23 12:26:52 +0100926#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
927 KVM_GUESTDBG_USE_HW_BP | \
928 KVM_GUESTDBG_ENABLE)
929
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100930int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
931 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100932{
David Hildenbrand27291e22014-01-23 12:26:52 +0100933 int rc = 0;
934
935 vcpu->guest_debug = 0;
936 kvm_s390_clear_bp_data(vcpu);
937
David Hildenbrand2de3bfc2014-05-20 17:25:20 +0200938 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +0100939 return -EINVAL;
940
941 if (dbg->control & KVM_GUESTDBG_ENABLE) {
942 vcpu->guest_debug = dbg->control;
943 /* enforce guest PER */
944 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
945
946 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
947 rc = kvm_s390_import_bp_data(vcpu, dbg);
948 } else {
949 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
950 vcpu->arch.guestdbg.last_bp = 0;
951 }
952
953 if (rc) {
954 vcpu->guest_debug = 0;
955 kvm_s390_clear_bp_data(vcpu);
956 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
957 }
958
959 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100960}
961
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300962int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
963 struct kvm_mp_state *mp_state)
964{
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200965 /* CHECK_STOP and LOAD are not supported yet */
966 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
967 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300968}
969
970int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
971 struct kvm_mp_state *mp_state)
972{
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200973 int rc = 0;
974
975 /* user space knows about this interface - let it control the state */
976 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
977
978 switch (mp_state->mp_state) {
979 case KVM_MP_STATE_STOPPED:
980 kvm_s390_vcpu_stop(vcpu);
981 break;
982 case KVM_MP_STATE_OPERATING:
983 kvm_s390_vcpu_start(vcpu);
984 break;
985 case KVM_MP_STATE_LOAD:
986 case KVM_MP_STATE_CHECK_STOP:
987 /* fall through - CHECK_STOP and LOAD are not supported yet */
988 default:
989 rc = -ENXIO;
990 }
991
992 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300993}
994
Dominik Dingelb31605c2014-03-25 13:47:11 +0100995bool kvm_s390_cmma_enabled(struct kvm *kvm)
996{
997 if (!MACHINE_IS_LPAR)
998 return false;
999 /* only enable for z10 and later */
1000 if (!MACHINE_HAS_EDAT1)
1001 return false;
1002 if (!kvm->arch.use_cmma)
1003 return false;
1004 return true;
1005}
1006
David Hildenbrand8ad35752014-03-14 11:00:21 +01001007static bool ibs_enabled(struct kvm_vcpu *vcpu)
1008{
1009 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1010}
1011
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001012static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1013{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001014retry:
1015 s390_vcpu_unblock(vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001016 /*
1017 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1018 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1019 * This ensures that the ipte instruction for this request has
1020 * already finished. We might race against a second unmapper that
1021 * wants to set the blocking bit. Lets just retry the request loop.
1022 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01001023 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001024 int rc;
1025 rc = gmap_ipte_notify(vcpu->arch.gmap,
Michael Muellerfda902c2014-05-13 16:58:30 +02001026 kvm_s390_get_prefix(vcpu),
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001027 PAGE_SIZE * 2);
1028 if (rc)
1029 return rc;
David Hildenbrand8ad35752014-03-14 11:00:21 +01001030 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001031 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01001032
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001033 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1034 vcpu->arch.sie_block->ihcpu = 0xffff;
1035 goto retry;
1036 }
1037
David Hildenbrand8ad35752014-03-14 11:00:21 +01001038 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1039 if (!ibs_enabled(vcpu)) {
1040 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1041 atomic_set_mask(CPUSTAT_IBS,
1042 &vcpu->arch.sie_block->cpuflags);
1043 }
1044 goto retry;
1045 }
1046
1047 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1048 if (ibs_enabled(vcpu)) {
1049 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1050 atomic_clear_mask(CPUSTAT_IBS,
1051 &vcpu->arch.sie_block->cpuflags);
1052 }
1053 goto retry;
1054 }
1055
David Hildenbrand0759d062014-05-13 16:54:32 +02001056 /* nothing to do, just clear the request */
1057 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1058
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001059 return 0;
1060}
1061
Thomas Huthfa576c52014-05-06 17:20:16 +02001062/**
1063 * kvm_arch_fault_in_page - fault-in guest page if necessary
1064 * @vcpu: The corresponding virtual cpu
1065 * @gpa: Guest physical address
1066 * @writable: Whether the page should be writable or not
1067 *
1068 * Make sure that a guest page has been faulted-in on the host.
1069 *
1070 * Return: Zero on success, negative error code otherwise.
1071 */
1072long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001073{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001074 return gmap_fault(vcpu->arch.gmap, gpa,
1075 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001076}
1077
Dominik Dingel3c038e62013-10-07 17:11:48 +02001078static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1079 unsigned long token)
1080{
1081 struct kvm_s390_interrupt inti;
1082 inti.parm64 = token;
1083
1084 if (start_token) {
1085 inti.type = KVM_S390_INT_PFAULT_INIT;
1086 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1087 } else {
1088 inti.type = KVM_S390_INT_PFAULT_DONE;
1089 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1090 }
1091}
1092
1093void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1094 struct kvm_async_pf *work)
1095{
1096 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1097 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1098}
1099
1100void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1101 struct kvm_async_pf *work)
1102{
1103 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1104 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1105}
1106
1107void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1108 struct kvm_async_pf *work)
1109{
1110 /* s390 will always inject the page directly */
1111}
1112
1113bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1114{
1115 /*
1116 * s390 will always inject the page directly,
1117 * but we still want check_async_completion to cleanup
1118 */
1119 return true;
1120}
1121
1122static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1123{
1124 hva_t hva;
1125 struct kvm_arch_async_pf arch;
1126 int rc;
1127
1128 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1129 return 0;
1130 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1131 vcpu->arch.pfault_compare)
1132 return 0;
1133 if (psw_extint_disabled(vcpu))
1134 return 0;
1135 if (kvm_cpu_has_interrupt(vcpu))
1136 return 0;
1137 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1138 return 0;
1139 if (!vcpu->arch.gmap->pfault_enabled)
1140 return 0;
1141
Heiko Carstens81480cc2014-01-01 16:36:07 +01001142 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1143 hva += current->thread.gmap_addr & ~PAGE_MASK;
1144 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001145 return 0;
1146
1147 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1148 return rc;
1149}
1150
Thomas Huth3fb4c402013-09-12 10:33:43 +02001151static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001152{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001153 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001154
Dominik Dingel3c038e62013-10-07 17:11:48 +02001155 /*
1156 * On s390 notifications for arriving pages will be delivered directly
1157 * to the guest but the house keeping for completed pfaults is
1158 * handled outside the worker.
1159 */
1160 kvm_check_async_pf_completion(vcpu);
1161
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001162 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001163
1164 if (need_resched())
1165 schedule();
1166
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02001167 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02001168 s390_handle_mcck();
1169
Jens Freimann79395032014-04-17 10:10:30 +02001170 if (!kvm_is_ucontrol(vcpu->kvm)) {
1171 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1172 if (rc)
1173 return rc;
1174 }
Carsten Otte0ff31862008-05-21 13:37:37 +02001175
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001176 rc = kvm_s390_handle_requests(vcpu);
1177 if (rc)
1178 return rc;
1179
David Hildenbrand27291e22014-01-23 12:26:52 +01001180 if (guestdbg_enabled(vcpu)) {
1181 kvm_s390_backup_guest_per_regs(vcpu);
1182 kvm_s390_patch_guest_per_regs(vcpu);
1183 }
1184
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001185 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001186 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1187 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1188 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001189
Thomas Huth3fb4c402013-09-12 10:33:43 +02001190 return 0;
1191}
1192
1193static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1194{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001195 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001196
1197 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1198 vcpu->arch.sie_block->icptcode);
1199 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1200
David Hildenbrand27291e22014-01-23 12:26:52 +01001201 if (guestdbg_enabled(vcpu))
1202 kvm_s390_restore_guest_per_regs(vcpu);
1203
Thomas Huth3fb4c402013-09-12 10:33:43 +02001204 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001205 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001206 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1207 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1208 vcpu->run->s390_ucontrol.trans_exc_code =
1209 current->thread.gmap_addr;
1210 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1211 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001212
1213 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001214 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001215 current->thread.gmap_pfault = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001216 if (kvm_arch_setup_async_pf(vcpu)) {
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001217 rc = 0;
Thomas Huthfa576c52014-05-06 17:20:16 +02001218 } else {
1219 gpa_t gpa = current->thread.gmap_addr;
1220 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1221 }
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001222 }
1223
1224 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001225 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1226 trace_kvm_s390_sie_fault(vcpu);
1227 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001228 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001229
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001230 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001231
Thomas Hutha76ccff2013-09-12 10:33:44 +02001232 if (rc == 0) {
1233 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001234 /* Don't exit for host interrupts. */
1235 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001236 else
1237 rc = kvm_handle_sie_intercept(vcpu);
1238 }
1239
Thomas Huth3fb4c402013-09-12 10:33:43 +02001240 return rc;
1241}
1242
1243static int __vcpu_run(struct kvm_vcpu *vcpu)
1244{
1245 int rc, exit_reason;
1246
Thomas Huth800c1062013-09-12 10:33:45 +02001247 /*
1248 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1249 * ning the guest), so that memslots (and other stuff) are protected
1250 */
1251 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1252
Thomas Hutha76ccff2013-09-12 10:33:44 +02001253 do {
1254 rc = vcpu_pre_run(vcpu);
1255 if (rc)
1256 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001257
Thomas Huth800c1062013-09-12 10:33:45 +02001258 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001259 /*
1260 * As PF_VCPU will be used in fault handler, between
1261 * guest_enter and guest_exit should be no uaccess.
1262 */
1263 preempt_disable();
1264 kvm_guest_enter();
1265 preempt_enable();
1266 exit_reason = sie64a(vcpu->arch.sie_block,
1267 vcpu->run->s.regs.gprs);
1268 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001269 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001270
Thomas Hutha76ccff2013-09-12 10:33:44 +02001271 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01001272 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001273
Thomas Huth800c1062013-09-12 10:33:45 +02001274 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001275 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001276}
1277
David Hildenbrandb028ee32014-07-17 10:47:43 +02001278static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1279{
1280 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1281 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1282 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1283 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1284 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1285 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001286 /* some control register changes require a tlb flush */
1287 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02001288 }
1289 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1290 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1291 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1292 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1293 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1294 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1295 }
1296 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1297 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1298 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1299 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1300 }
1301 kvm_run->kvm_dirty_regs = 0;
1302}
1303
1304static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1305{
1306 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1307 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1308 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1309 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1310 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1311 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1312 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1313 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1314 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1315 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1316 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1317 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1318}
1319
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001320int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1321{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001322 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001323 sigset_t sigsaved;
1324
David Hildenbrand27291e22014-01-23 12:26:52 +01001325 if (guestdbg_exit_pending(vcpu)) {
1326 kvm_s390_prepare_debug_exit(vcpu);
1327 return 0;
1328 }
1329
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001330 if (vcpu->sigset_active)
1331 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1332
David Hildenbrand6352e4d2014-04-10 17:35:00 +02001333 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1334 kvm_s390_vcpu_start(vcpu);
1335 } else if (is_vcpu_stopped(vcpu)) {
1336 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1337 vcpu->vcpu_id);
1338 return -EINVAL;
1339 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001340
David Hildenbrandb028ee32014-07-17 10:47:43 +02001341 sync_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001342
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001343 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001344 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001345
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001346 if (signal_pending(current) && !rc) {
1347 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001348 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001349 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001350
David Hildenbrand27291e22014-01-23 12:26:52 +01001351 if (guestdbg_exit_pending(vcpu) && !rc) {
1352 kvm_s390_prepare_debug_exit(vcpu);
1353 rc = 0;
1354 }
1355
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001356 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001357 /* intercept cannot be handled in-kernel, prepare kvm-run */
1358 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1359 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001360 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1361 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1362 rc = 0;
1363 }
1364
1365 if (rc == -EREMOTE) {
1366 /* intercept was handled, but userspace support is needed
1367 * kvm_run has been prepared by the handler */
1368 rc = 0;
1369 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001370
David Hildenbrandb028ee32014-07-17 10:47:43 +02001371 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001372
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001373 if (vcpu->sigset_active)
1374 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1375
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001376 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001377 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001378}
1379
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001380/*
1381 * store status at address
1382 * we use have two special cases:
1383 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1384 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1385 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001386int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001387{
Carsten Otte092670c2011-07-24 10:48:22 +02001388 unsigned char archmode = 1;
Michael Muellerfda902c2014-05-13 16:58:30 +02001389 unsigned int px;
Thomas Huth178bd782013-11-13 20:28:18 +01001390 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001391 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001392
Heiko Carstensd0bce602014-01-01 16:45:58 +01001393 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1394 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001395 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001396 gpa = SAVE_AREA_BASE;
1397 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1398 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001399 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001400 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1401 }
1402 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1403 vcpu->arch.guest_fpregs.fprs, 128);
1404 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1405 vcpu->run->s.regs.gprs, 128);
1406 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1407 &vcpu->arch.sie_block->gpsw, 16);
Michael Muellerfda902c2014-05-13 16:58:30 +02001408 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001409 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
Michael Muellerfda902c2014-05-13 16:58:30 +02001410 &px, 4);
Heiko Carstensd0bce602014-01-01 16:45:58 +01001411 rc |= write_guest_abs(vcpu,
1412 gpa + offsetof(struct save_area, fp_ctrl_reg),
1413 &vcpu->arch.guest_fpregs.fpc, 4);
1414 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1415 &vcpu->arch.sie_block->todpr, 4);
1416 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1417 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001418 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001419 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1420 &clkcomp, 8);
1421 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1422 &vcpu->run->s.regs.acrs, 64);
1423 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1424 &vcpu->arch.sie_block->gcr, 128);
1425 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001426}
1427
Thomas Huthe8798922013-11-06 15:46:33 +01001428int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1429{
1430 /*
1431 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1432 * copying in vcpu load/put. Lets update our copies before we save
1433 * it into the save area
1434 */
1435 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1436 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1437 save_access_regs(vcpu->run->s.regs.acrs);
1438
1439 return kvm_s390_store_status_unloaded(vcpu, addr);
1440}
1441
David Hildenbrand8ad35752014-03-14 11:00:21 +01001442static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1443{
1444 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1445 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1446 exit_sie_sync(vcpu);
1447}
1448
1449static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1450{
1451 unsigned int i;
1452 struct kvm_vcpu *vcpu;
1453
1454 kvm_for_each_vcpu(i, vcpu, kvm) {
1455 __disable_ibs_on_vcpu(vcpu);
1456 }
1457}
1458
1459static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1460{
1461 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1462 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1463 exit_sie_sync(vcpu);
1464}
1465
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001466void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1467{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001468 int i, online_vcpus, started_vcpus = 0;
1469
1470 if (!is_vcpu_stopped(vcpu))
1471 return;
1472
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001473 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001474 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001475 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001476 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1477
1478 for (i = 0; i < online_vcpus; i++) {
1479 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1480 started_vcpus++;
1481 }
1482
1483 if (started_vcpus == 0) {
1484 /* we're the only active VCPU -> speed it up */
1485 __enable_ibs_on_vcpu(vcpu);
1486 } else if (started_vcpus == 1) {
1487 /*
1488 * As we are starting a second VCPU, we have to disable
1489 * the IBS facility on all VCPUs to remove potentially
1490 * oustanding ENABLE requests.
1491 */
1492 __disable_ibs_on_all_vcpus(vcpu->kvm);
1493 }
1494
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001495 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001496 /*
1497 * Another VCPU might have used IBS while we were offline.
1498 * Let's play safe and flush the VCPU at startup.
1499 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02001500 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001501 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001502 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001503}
1504
1505void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1506{
David Hildenbrand8ad35752014-03-14 11:00:21 +01001507 int i, online_vcpus, started_vcpus = 0;
1508 struct kvm_vcpu *started_vcpu = NULL;
1509
1510 if (is_vcpu_stopped(vcpu))
1511 return;
1512
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001513 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001514 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001515 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001516 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1517
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001518 /* Need to lock access to action_bits to avoid a SIGP race condition */
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001519 spin_lock(&vcpu->arch.local_int.lock);
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001520 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001521
1522 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1523 vcpu->arch.local_int.action_bits &=
1524 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
David Hildenbrand4ae3c082014-05-16 10:23:53 +02001525 spin_unlock(&vcpu->arch.local_int.lock);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02001526
David Hildenbrand8ad35752014-03-14 11:00:21 +01001527 __disable_ibs_on_vcpu(vcpu);
1528
1529 for (i = 0; i < online_vcpus; i++) {
1530 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1531 started_vcpus++;
1532 started_vcpu = vcpu->kvm->vcpus[i];
1533 }
1534 }
1535
1536 if (started_vcpus == 1) {
1537 /*
1538 * As we only have one VCPU left, we want to enable the
1539 * IBS facility for that VCPU to speed it up.
1540 */
1541 __enable_ibs_on_vcpu(started_vcpu);
1542 }
1543
David Hildenbrand433b9ee2014-05-06 16:11:14 +02001544 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001545 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01001546}
1547
Cornelia Huckd6712df2012-12-20 15:32:11 +01001548static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1549 struct kvm_enable_cap *cap)
1550{
1551 int r;
1552
1553 if (cap->flags)
1554 return -EINVAL;
1555
1556 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001557 case KVM_CAP_S390_CSS_SUPPORT:
1558 if (!vcpu->kvm->arch.css_support) {
1559 vcpu->kvm->arch.css_support = 1;
1560 trace_kvm_s390_enable_css(vcpu->kvm);
1561 }
1562 r = 0;
1563 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001564 default:
1565 r = -EINVAL;
1566 break;
1567 }
1568 return r;
1569}
1570
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001571long kvm_arch_vcpu_ioctl(struct file *filp,
1572 unsigned int ioctl, unsigned long arg)
1573{
1574 struct kvm_vcpu *vcpu = filp->private_data;
1575 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001576 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001577 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001578
Avi Kivity93736622010-05-13 12:35:17 +03001579 switch (ioctl) {
1580 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001581 struct kvm_s390_interrupt s390int;
1582
Avi Kivity93736622010-05-13 12:35:17 +03001583 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001584 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03001585 break;
1586 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1587 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001588 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001589 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001590 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001591 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001592 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001593 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001594 case KVM_S390_SET_INITIAL_PSW: {
1595 psw_t psw;
1596
Avi Kivitybc923cc2010-05-13 12:21:46 +03001597 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001598 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001599 break;
1600 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1601 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001602 }
1603 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001604 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1605 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001606 case KVM_SET_ONE_REG:
1607 case KVM_GET_ONE_REG: {
1608 struct kvm_one_reg reg;
1609 r = -EFAULT;
1610 if (copy_from_user(&reg, argp, sizeof(reg)))
1611 break;
1612 if (ioctl == KVM_SET_ONE_REG)
1613 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1614 else
1615 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1616 break;
1617 }
Carsten Otte27e03932012-01-04 10:25:21 +01001618#ifdef CONFIG_KVM_S390_UCONTROL
1619 case KVM_S390_UCAS_MAP: {
1620 struct kvm_s390_ucas_mapping ucasmap;
1621
1622 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1623 r = -EFAULT;
1624 break;
1625 }
1626
1627 if (!kvm_is_ucontrol(vcpu->kvm)) {
1628 r = -EINVAL;
1629 break;
1630 }
1631
1632 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1633 ucasmap.vcpu_addr, ucasmap.length);
1634 break;
1635 }
1636 case KVM_S390_UCAS_UNMAP: {
1637 struct kvm_s390_ucas_mapping ucasmap;
1638
1639 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1640 r = -EFAULT;
1641 break;
1642 }
1643
1644 if (!kvm_is_ucontrol(vcpu->kvm)) {
1645 r = -EINVAL;
1646 break;
1647 }
1648
1649 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1650 ucasmap.length);
1651 break;
1652 }
1653#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001654 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02001655 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01001656 break;
1657 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001658 case KVM_ENABLE_CAP:
1659 {
1660 struct kvm_enable_cap cap;
1661 r = -EFAULT;
1662 if (copy_from_user(&cap, argp, sizeof(cap)))
1663 break;
1664 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1665 break;
1666 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001667 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001668 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001669 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001670 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001671}
1672
Carsten Otte5b1c1492012-01-04 10:25:23 +01001673int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1674{
1675#ifdef CONFIG_KVM_S390_UCONTROL
1676 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1677 && (kvm_is_ucontrol(vcpu->kvm))) {
1678 vmf->page = virt_to_page(vcpu->arch.sie_block);
1679 get_page(vmf->page);
1680 return 0;
1681 }
1682#endif
1683 return VM_FAULT_SIGBUS;
1684}
1685
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301686int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1687 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001688{
1689 return 0;
1690}
1691
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001692/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001693int kvm_arch_prepare_memory_region(struct kvm *kvm,
1694 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001695 struct kvm_userspace_memory_region *mem,
1696 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001697{
Nick Wangdd2887e2013-03-25 17:22:57 +01001698 /* A few sanity checks. We can have memory slots which have to be
1699 located/ended at a segment boundary (1MB). The memory in userland is
1700 ok to be fragmented into various different vmas. It is okay to mmap()
1701 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001702
Carsten Otte598841c2011-07-24 10:48:21 +02001703 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001704 return -EINVAL;
1705
Carsten Otte598841c2011-07-24 10:48:21 +02001706 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001707 return -EINVAL;
1708
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001709 return 0;
1710}
1711
1712void kvm_arch_commit_memory_region(struct kvm *kvm,
1713 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001714 const struct kvm_memory_slot *old,
1715 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001716{
Carsten Ottef7850c92011-07-24 10:48:23 +02001717 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001718
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001719 /* If the basics of the memslot do not change, we do not want
1720 * to update the gmap. Every update causes several unnecessary
1721 * segment translation exceptions. This is usually handled just
1722 * fine by the normal fault handler + gmap, but it will also
1723 * cause faults on the prefix page of running guest CPUs.
1724 */
1725 if (old->userspace_addr == mem->userspace_addr &&
1726 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1727 old->npages * PAGE_SIZE == mem->memory_size)
1728 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001729
1730 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1731 mem->guest_phys_addr, mem->memory_size);
1732 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001733 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001734 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001735}
1736
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001737static int __init kvm_s390_init(void)
1738{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001739 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001740 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001741 if (ret)
1742 return ret;
1743
1744 /*
1745 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001746 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001747 * only set facilities that are known to work in KVM.
1748 */
Michael Mueller78c4b592013-07-26 15:04:04 +02001749 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1750 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001751 kvm_exit();
1752 return -ENOMEM;
1753 }
Michael Mueller78c4b592013-07-26 15:04:04 +02001754 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Thomas Huthd208c792013-12-12 13:40:40 +01001755 vfacilities[0] &= 0xff82fff3f4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001756 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001757 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001758}
1759
1760static void __exit kvm_s390_exit(void)
1761{
Michael Mueller78c4b592013-07-26 15:04:04 +02001762 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001763 kvm_exit();
1764}
1765
1766module_init(kvm_s390_init);
1767module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001768
1769/*
1770 * Enable autoloading of the kvm module.
1771 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1772 * since x86 takes a different approach.
1773 */
1774#include <linux/miscdevice.h>
1775MODULE_ALIAS_MISCDEV(KVM_MINOR);
1776MODULE_ALIAS("devname:kvm");