blob: 7ae8c26065fbe24ceb85d768367282c8ef68b9c7 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010026#include <linux/timer.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010027#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <asm/lowcore.h>
29#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010030#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010031#include <asm/switch_to.h>
Michael Mueller78c4b59f2013-07-26 15:04:04 +020032#include <asm/facility.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020033#include <asm/sclp.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010035#include "gaccess.h"
36
Cornelia Huck5786fff2012-07-23 17:20:29 +020037#define CREATE_TRACE_POINTS
38#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020039#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020040
Heiko Carstensb0c632d2008-03-25 18:47:20 +010041#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020045 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010050 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020053 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010054 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010055 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010057 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020058 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010059 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020066 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010067 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010072 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010073 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +020075 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010076 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +020078 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010079 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +010080 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020081 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010082 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraeger388186b2011-10-30 15:17:03 +010087 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010088 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +020089 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010090 { NULL }
91};
92
Michael Mueller78c4b59f2013-07-26 15:04:04 +020093unsigned long *vfacilities;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +020094static struct gmap_notifier gmap_notifier;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010095
Michael Mueller78c4b59f2013-07-26 15:04:04 +020096/* test availability of vfacility */
Heiko Carstens280ef0f2013-12-17 09:08:28 +010097int test_vfacility(unsigned long nr)
Michael Mueller78c4b59f2013-07-26 15:04:04 +020098{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100102/* Section: not file related */
Alexander Graf10474ae2009-09-15 11:37:46 +0200103int kvm_arch_hardware_enable(void *garbage)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100104{
105 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200106 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100107}
108
109void kvm_arch_hardware_disable(void *garbage)
110{
111}
112
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200113static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100115int kvm_arch_hardware_setup(void)
116{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200117 gmap_notifier.notifier_call = kvm_gmap_notifier;
118 gmap_register_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100119 return 0;
120}
121
122void kvm_arch_hardware_unsetup(void)
123{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200124 gmap_unregister_ipte_notifier(&gmap_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100125}
126
127void kvm_arch_check_processor_compat(void *rtn)
128{
129}
130
131int kvm_arch_init(void *opaque)
132{
133 return 0;
134}
135
136void kvm_arch_exit(void)
137{
138}
139
140/* Section: device related */
141long kvm_arch_dev_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
143{
144 if (ioctl == KVM_S390_ENABLE_SIE)
145 return s390_enable_sie();
146 return -EINVAL;
147}
148
149int kvm_dev_ioctl_check_extension(long ext)
150{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100151 int r;
152
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200153 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100154 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200155 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100156 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100157#ifdef CONFIG_KVM_S390_UCONTROL
158 case KVM_CAP_S390_UCONTROL:
159#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200160 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100161 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200162 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100163 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100164 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100165 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200166 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200167 case KVM_CAP_ENABLE_CAP_VM:
Dominik Dingelf2061652014-04-09 13:13:00 +0200168 case KVM_CAP_VM_ATTRIBUTES:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100169 r = 1;
170 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200171 case KVM_CAP_NR_VCPUS:
172 case KVM_CAP_MAX_VCPUS:
173 r = KVM_MAX_VCPUS;
174 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100175 case KVM_CAP_NR_MEMSLOTS:
176 r = KVM_USER_MEM_SLOTS;
177 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200178 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100179 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200180 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200181 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100182 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200183 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100184 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100185}
186
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400187static void kvm_s390_sync_dirty_log(struct kvm *kvm,
188 struct kvm_memory_slot *memslot)
189{
190 gfn_t cur_gfn, last_gfn;
191 unsigned long address;
192 struct gmap *gmap = kvm->arch.gmap;
193
194 down_read(&gmap->mm->mmap_sem);
195 /* Loop over all guest pages */
196 last_gfn = memslot->base_gfn + memslot->npages;
197 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
198 address = gfn_to_hva_memslot(memslot, cur_gfn);
199
200 if (gmap_test_and_clear_dirty(address, gmap))
201 mark_page_dirty(kvm, cur_gfn);
202 }
203 up_read(&gmap->mm->mmap_sem);
204}
205
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100206/* Section: vm related */
207/*
208 * Get (and clear) the dirty memory log for a memory slot.
209 */
210int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
211 struct kvm_dirty_log *log)
212{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400213 int r;
214 unsigned long n;
215 struct kvm_memory_slot *memslot;
216 int is_dirty = 0;
217
218 mutex_lock(&kvm->slots_lock);
219
220 r = -EINVAL;
221 if (log->slot >= KVM_USER_MEM_SLOTS)
222 goto out;
223
224 memslot = id_to_memslot(kvm->memslots, log->slot);
225 r = -ENOENT;
226 if (!memslot->dirty_bitmap)
227 goto out;
228
229 kvm_s390_sync_dirty_log(kvm, memslot);
230 r = kvm_get_dirty_log(kvm, log, &is_dirty);
231 if (r)
232 goto out;
233
234 /* Clear the dirty log */
235 if (is_dirty) {
236 n = kvm_dirty_bitmap_bytes(memslot);
237 memset(memslot->dirty_bitmap, 0, n);
238 }
239 r = 0;
240out:
241 mutex_unlock(&kvm->slots_lock);
242 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100243}
244
Cornelia Huckd938dc52013-10-23 18:26:34 +0200245static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
246{
247 int r;
248
249 if (cap->flags)
250 return -EINVAL;
251
252 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200253 case KVM_CAP_S390_IRQCHIP:
254 kvm->arch.use_irqchip = 1;
255 r = 0;
256 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200257 default:
258 r = -EINVAL;
259 break;
260 }
261 return r;
262}
263
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200264static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
265{
266 int ret;
267 unsigned int idx;
268 switch (attr->attr) {
269 case KVM_S390_VM_MEM_ENABLE_CMMA:
270 ret = -EBUSY;
271 mutex_lock(&kvm->lock);
272 if (atomic_read(&kvm->online_vcpus) == 0) {
273 kvm->arch.use_cmma = 1;
274 ret = 0;
275 }
276 mutex_unlock(&kvm->lock);
277 break;
278 case KVM_S390_VM_MEM_CLR_CMMA:
279 mutex_lock(&kvm->lock);
280 idx = srcu_read_lock(&kvm->srcu);
281 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
282 srcu_read_unlock(&kvm->srcu, idx);
283 mutex_unlock(&kvm->lock);
284 ret = 0;
285 break;
286 default:
287 ret = -ENXIO;
288 break;
289 }
290 return ret;
291}
292
Dominik Dingelf2061652014-04-09 13:13:00 +0200293static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
294{
295 int ret;
296
297 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200298 case KVM_S390_VM_MEM_CTRL:
299 ret = kvm_s390_mem_control(kvm, attr);
300 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200301 default:
302 ret = -ENXIO;
303 break;
304 }
305
306 return ret;
307}
308
309static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
310{
311 return -ENXIO;
312}
313
314static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
315{
316 int ret;
317
318 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200319 case KVM_S390_VM_MEM_CTRL:
320 switch (attr->attr) {
321 case KVM_S390_VM_MEM_ENABLE_CMMA:
322 case KVM_S390_VM_MEM_CLR_CMMA:
323 ret = 0;
324 break;
325 default:
326 ret = -ENXIO;
327 break;
328 }
329 break;
Dominik Dingelf2061652014-04-09 13:13:00 +0200330 default:
331 ret = -ENXIO;
332 break;
333 }
334
335 return ret;
336}
337
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100338long kvm_arch_vm_ioctl(struct file *filp,
339 unsigned int ioctl, unsigned long arg)
340{
341 struct kvm *kvm = filp->private_data;
342 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +0200343 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100344 int r;
345
346 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100347 case KVM_S390_INTERRUPT: {
348 struct kvm_s390_interrupt s390int;
349
350 r = -EFAULT;
351 if (copy_from_user(&s390int, argp, sizeof(s390int)))
352 break;
353 r = kvm_s390_inject_vm(kvm, &s390int);
354 break;
355 }
Cornelia Huckd938dc52013-10-23 18:26:34 +0200356 case KVM_ENABLE_CAP: {
357 struct kvm_enable_cap cap;
358 r = -EFAULT;
359 if (copy_from_user(&cap, argp, sizeof(cap)))
360 break;
361 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
362 break;
363 }
Cornelia Huck84223592013-07-15 13:36:01 +0200364 case KVM_CREATE_IRQCHIP: {
365 struct kvm_irq_routing_entry routing;
366
367 r = -EINVAL;
368 if (kvm->arch.use_irqchip) {
369 /* Set up dummy routing. */
370 memset(&routing, 0, sizeof(routing));
371 kvm_set_irq_routing(kvm, &routing, 0, 0);
372 r = 0;
373 }
374 break;
375 }
Dominik Dingelf2061652014-04-09 13:13:00 +0200376 case KVM_SET_DEVICE_ATTR: {
377 r = -EFAULT;
378 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379 break;
380 r = kvm_s390_vm_set_attr(kvm, &attr);
381 break;
382 }
383 case KVM_GET_DEVICE_ATTR: {
384 r = -EFAULT;
385 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386 break;
387 r = kvm_s390_vm_get_attr(kvm, &attr);
388 break;
389 }
390 case KVM_HAS_DEVICE_ATTR: {
391 r = -EFAULT;
392 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
393 break;
394 r = kvm_s390_vm_has_attr(kvm, &attr);
395 break;
396 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100397 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300398 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100399 }
400
401 return r;
402}
403
Carsten Ottee08b9632012-01-04 10:25:20 +0100404int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100405{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100406 int rc;
407 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100408 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100409
Carsten Ottee08b9632012-01-04 10:25:20 +0100410 rc = -EINVAL;
411#ifdef CONFIG_KVM_S390_UCONTROL
412 if (type & ~KVM_VM_S390_UCONTROL)
413 goto out_err;
414 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
415 goto out_err;
416#else
417 if (type)
418 goto out_err;
419#endif
420
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100421 rc = s390_enable_sie();
422 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100423 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100424
Carsten Otteb2904112011-10-18 12:27:13 +0200425 rc = -ENOMEM;
426
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100427 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
428 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100429 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +0100430 spin_lock(&kvm_lock);
431 sca_offset = (sca_offset + 16) & 0x7f0;
432 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
433 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100434
435 sprintf(debug_name, "kvm-%u", current->pid);
436
437 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
438 if (!kvm->arch.dbf)
439 goto out_nodbf;
440
Carsten Otteba5c1e92008-03-25 18:47:26 +0100441 spin_lock_init(&kvm->arch.float_int.lock);
442 INIT_LIST_HEAD(&kvm->arch.float_int.list);
Heiko Carstens8a2422342014-01-10 14:33:28 +0100443 init_waitqueue_head(&kvm->arch.ipte_wq);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100444
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100445 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
446 VM_EVENT(kvm, 3, "%s", "vm created");
447
Carsten Ottee08b9632012-01-04 10:25:20 +0100448 if (type & KVM_VM_S390_UCONTROL) {
449 kvm->arch.gmap = NULL;
450 } else {
451 kvm->arch.gmap = gmap_alloc(current->mm);
452 if (!kvm->arch.gmap)
453 goto out_nogmap;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200454 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200455 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +0100456 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100457
458 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +0200459 kvm->arch.use_irqchip = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100460
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100461 return 0;
Carsten Otte598841c2011-07-24 10:48:21 +0200462out_nogmap:
463 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100464out_nodbf:
465 free_page((unsigned long)(kvm->arch.sca));
Jan Kiszkad89f5ef2010-11-09 17:02:49 +0100466out_err:
467 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100468}
469
Christian Borntraegerd329c032008-11-26 14:50:27 +0100470void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
471{
472 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +0200473 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Dominik Dingel3c038e62013-10-07 17:11:48 +0200474 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte58f94602012-01-04 10:25:27 +0100475 if (!kvm_is_ucontrol(vcpu->kvm)) {
476 clear_bit(63 - vcpu->vcpu_id,
477 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
478 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
479 (__u64) vcpu->arch.sie_block)
480 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
481 }
Carsten Otteabf4a712009-05-12 17:21:51 +0200482 smp_mb();
Carsten Otte27e03932012-01-04 10:25:21 +0100483
484 if (kvm_is_ucontrol(vcpu->kvm))
485 gmap_free(vcpu->arch.gmap);
486
Dominik Dingelb31605c2014-03-25 13:47:11 +0100487 if (kvm_s390_cmma_enabled(vcpu->kvm))
488 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100489 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200490
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100491 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +0200492 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100493}
494
495static void kvm_free_vcpus(struct kvm *kvm)
496{
497 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300498 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100499
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300500 kvm_for_each_vcpu(i, vcpu, kvm)
501 kvm_arch_vcpu_destroy(vcpu);
502
503 mutex_lock(&kvm->lock);
504 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
505 kvm->vcpus[i] = NULL;
506
507 atomic_set(&kvm->online_vcpus, 0);
508 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100509}
510
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800511void kvm_arch_sync_events(struct kvm *kvm)
512{
513}
514
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100515void kvm_arch_destroy_vm(struct kvm *kvm)
516{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100517 kvm_free_vcpus(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100518 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100519 debug_unregister(kvm->arch.dbf);
Carsten Otte27e03932012-01-04 10:25:21 +0100520 if (!kvm_is_ucontrol(kvm))
521 gmap_free(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +0200522 kvm_s390_destroy_adapters(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100523}
524
525/* Section: vcpu related */
526int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
527{
Dominik Dingel3c038e62013-10-07 17:11:48 +0200528 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
529 kvm_clear_async_pf_completion_queue(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +0100530 if (kvm_is_ucontrol(vcpu->kvm)) {
531 vcpu->arch.gmap = gmap_alloc(current->mm);
532 if (!vcpu->arch.gmap)
533 return -ENOMEM;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200534 vcpu->arch.gmap->private = vcpu->kvm;
Carsten Otte27e03932012-01-04 10:25:21 +0100535 return 0;
536 }
537
Carsten Otte598841c2011-07-24 10:48:21 +0200538 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
Christian Borntraeger59674c12012-01-11 11:20:33 +0100539 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
540 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +0100541 KVM_SYNC_ACRS |
542 KVM_SYNC_CRS;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100543 return 0;
544}
545
546void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
547{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100548 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100549}
550
551void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
552{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200553 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
554 save_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100555 save_access_regs(vcpu->arch.host_acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200556 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
557 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100558 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200559 gmap_enable(vcpu->arch.gmap);
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100560 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100561}
562
563void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
564{
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100565 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger480e5922011-09-20 17:07:28 +0200566 gmap_disable(vcpu->arch.gmap);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200567 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
568 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
Christian Borntraeger59674c12012-01-11 11:20:33 +0100569 save_access_regs(vcpu->run->s.regs.acrs);
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200570 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
571 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100572 restore_access_regs(vcpu->arch.host_acrs);
573}
574
575static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
576{
577 /* this equals initial cpu reset in pop, but we don't switch to ESA */
578 vcpu->arch.sie_block->gpsw.mask = 0UL;
579 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +0100580 kvm_s390_set_prefix(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100581 vcpu->arch.sie_block->cputm = 0UL;
582 vcpu->arch.sie_block->ckc = 0UL;
583 vcpu->arch.sie_block->todpr = 0;
584 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
585 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
586 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
587 vcpu->arch.guest_fpregs.fpc = 0;
588 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
589 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100590 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +0200591 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
592 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger61bde822012-06-11 16:06:57 +0200593 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Jens Freimann2ed10cc2014-02-11 13:48:07 +0100594 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100595}
596
Marcelo Tosatti42897d82012-11-27 23:29:02 -0200597int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
598{
599 return 0;
600}
601
Dominik Dingelb31605c2014-03-25 13:47:11 +0100602void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
603{
604 free_page(vcpu->arch.sie_block->cbrlo);
605 vcpu->arch.sie_block->cbrlo = 0;
606}
607
608int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
609{
610 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
611 if (!vcpu->arch.sie_block->cbrlo)
612 return -ENOMEM;
613
614 vcpu->arch.sie_block->ecb2 |= 0x80;
615 vcpu->arch.sie_block->ecb2 &= ~0x08;
616 return 0;
617}
618
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100619int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
620{
Dominik Dingelb31605c2014-03-25 13:47:11 +0100621 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200622
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100623 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
624 CPUSTAT_SM |
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200625 CPUSTAT_STOPPED |
626 CPUSTAT_GED);
Christian Borntraegerfc345312010-06-17 23:16:20 +0200627 vcpu->arch.sie_block->ecb = 6;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200628 if (test_vfacility(50) && test_vfacility(73))
629 vcpu->arch.sie_block->ecb |= 0x10;
630
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200631 vcpu->arch.sie_block->ecb2 = 8;
Heiko Carstens217a4402013-12-30 12:54:14 +0100632 vcpu->arch.sie_block->eca = 0xC1002000U;
633 if (sclp_has_siif())
634 vcpu->arch.sie_block->eca |= 1;
Michael Mueller78c4b59f2013-07-26 15:04:04 +0200635 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
Dominik Dingel693ffc02014-01-14 18:11:14 +0100636 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Dominik Dingelb31605c2014-03-25 13:47:11 +0100637 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
638 rc = kvm_s390_vcpu_setup_cmma(vcpu);
639 if (rc)
640 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200641 }
Christian Borntraegerca872302009-05-12 17:21:49 +0200642 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
643 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
644 (unsigned long) vcpu);
645 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100646 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100647 vcpu->arch.cpu_id.version = 0xff;
Dominik Dingelb31605c2014-03-25 13:47:11 +0100648 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100649}
650
651struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
652 unsigned int id)
653{
Carsten Otte4d475552011-10-18 12:27:12 +0200654 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200655 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +0200656 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100657
Carsten Otte4d475552011-10-18 12:27:12 +0200658 if (id >= KVM_MAX_VCPUS)
659 goto out;
660
661 rc = -ENOMEM;
662
Michael Muellerb110fea2013-06-12 13:54:54 +0200663 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100664 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +0200665 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100666
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200667 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
668 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100669 goto out_free_cpu;
670
Michael Mueller7feb6bb2013-06-28 13:30:24 +0200671 vcpu->arch.sie_block = &sie_page->sie_block;
672 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
673
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100674 vcpu->arch.sie_block->icpua = id;
Carsten Otte58f94602012-01-04 10:25:27 +0100675 if (!kvm_is_ucontrol(kvm)) {
676 if (!kvm->arch.sca) {
677 WARN_ON_ONCE(1);
678 goto out_free_cpu;
679 }
680 if (!kvm->arch.sca->cpu[id].sda)
681 kvm->arch.sca->cpu[id].sda =
682 (__u64) vcpu->arch.sie_block;
683 vcpu->arch.sie_block->scaoh =
684 (__u32)(((__u64)kvm->arch.sca) >> 32);
685 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
686 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
687 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100688
Carsten Otteba5c1e92008-03-25 18:47:26 +0100689 spin_lock_init(&vcpu->arch.local_int.lock);
690 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
691 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +0200692 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100693 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100694
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100695 rc = kvm_vcpu_init(vcpu, kvm, id);
696 if (rc)
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800697 goto out_free_sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100698 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
699 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +0200700 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100701
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100702 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +0800703out_free_sie_block:
704 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100705out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +0200706 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +0200707out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100708 return ERR_PTR(rc);
709}
710
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100711int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
712{
Michael Muellerf87618e2014-02-26 16:14:17 +0100713 return kvm_cpu_has_interrupt(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100714}
715
Christian Borntraeger49b99e12013-05-17 14:41:35 +0200716void s390_vcpu_block(struct kvm_vcpu *vcpu)
717{
718 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
719}
720
721void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
722{
723 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
724}
725
726/*
727 * Kick a guest cpu out of SIE and wait until SIE is not running.
728 * If the CPU is not running (e.g. waiting as idle) the function will
729 * return immediately. */
730void exit_sie(struct kvm_vcpu *vcpu)
731{
732 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
733 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
734 cpu_relax();
735}
736
737/* Kick a guest cpu out of SIE and prevent SIE-reentry */
738void exit_sie_sync(struct kvm_vcpu *vcpu)
739{
740 s390_vcpu_block(vcpu);
741 exit_sie(vcpu);
742}
743
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200744static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
745{
746 int i;
747 struct kvm *kvm = gmap->private;
748 struct kvm_vcpu *vcpu;
749
750 kvm_for_each_vcpu(i, vcpu, kvm) {
751 /* match against both prefix pages */
752 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
753 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
754 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
755 exit_sie_sync(vcpu);
756 }
757 }
758}
759
Christoffer Dallb6d33832012-03-08 16:44:24 -0500760int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
761{
762 /* kvm common code refers to this, but never calls it */
763 BUG();
764 return 0;
765}
766
Carsten Otte14eebd92012-05-15 14:15:26 +0200767static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
768 struct kvm_one_reg *reg)
769{
770 int r = -EINVAL;
771
772 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200773 case KVM_REG_S390_TODPR:
774 r = put_user(vcpu->arch.sie_block->todpr,
775 (u32 __user *)reg->addr);
776 break;
777 case KVM_REG_S390_EPOCHDIFF:
778 r = put_user(vcpu->arch.sie_block->epoch,
779 (u64 __user *)reg->addr);
780 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200781 case KVM_REG_S390_CPU_TIMER:
782 r = put_user(vcpu->arch.sie_block->cputm,
783 (u64 __user *)reg->addr);
784 break;
785 case KVM_REG_S390_CLOCK_COMP:
786 r = put_user(vcpu->arch.sie_block->ckc,
787 (u64 __user *)reg->addr);
788 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200789 case KVM_REG_S390_PFTOKEN:
790 r = put_user(vcpu->arch.pfault_token,
791 (u64 __user *)reg->addr);
792 break;
793 case KVM_REG_S390_PFCOMPARE:
794 r = put_user(vcpu->arch.pfault_compare,
795 (u64 __user *)reg->addr);
796 break;
797 case KVM_REG_S390_PFSELECT:
798 r = put_user(vcpu->arch.pfault_select,
799 (u64 __user *)reg->addr);
800 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100801 case KVM_REG_S390_PP:
802 r = put_user(vcpu->arch.sie_block->pp,
803 (u64 __user *)reg->addr);
804 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100805 case KVM_REG_S390_GBEA:
806 r = put_user(vcpu->arch.sie_block->gbea,
807 (u64 __user *)reg->addr);
808 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200809 default:
810 break;
811 }
812
813 return r;
814}
815
816static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
817 struct kvm_one_reg *reg)
818{
819 int r = -EINVAL;
820
821 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +0200822 case KVM_REG_S390_TODPR:
823 r = get_user(vcpu->arch.sie_block->todpr,
824 (u32 __user *)reg->addr);
825 break;
826 case KVM_REG_S390_EPOCHDIFF:
827 r = get_user(vcpu->arch.sie_block->epoch,
828 (u64 __user *)reg->addr);
829 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +0200830 case KVM_REG_S390_CPU_TIMER:
831 r = get_user(vcpu->arch.sie_block->cputm,
832 (u64 __user *)reg->addr);
833 break;
834 case KVM_REG_S390_CLOCK_COMP:
835 r = get_user(vcpu->arch.sie_block->ckc,
836 (u64 __user *)reg->addr);
837 break;
Dominik Dingel536336c2013-09-30 10:55:33 +0200838 case KVM_REG_S390_PFTOKEN:
839 r = get_user(vcpu->arch.pfault_token,
840 (u64 __user *)reg->addr);
841 break;
842 case KVM_REG_S390_PFCOMPARE:
843 r = get_user(vcpu->arch.pfault_compare,
844 (u64 __user *)reg->addr);
845 break;
846 case KVM_REG_S390_PFSELECT:
847 r = get_user(vcpu->arch.pfault_select,
848 (u64 __user *)reg->addr);
849 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +0100850 case KVM_REG_S390_PP:
851 r = get_user(vcpu->arch.sie_block->pp,
852 (u64 __user *)reg->addr);
853 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +0100854 case KVM_REG_S390_GBEA:
855 r = get_user(vcpu->arch.sie_block->gbea,
856 (u64 __user *)reg->addr);
857 break;
Carsten Otte14eebd92012-05-15 14:15:26 +0200858 default:
859 break;
860 }
861
862 return r;
863}
Christoffer Dallb6d33832012-03-08 16:44:24 -0500864
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100865static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
866{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100867 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100868 return 0;
869}
870
871int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
872{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100873 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100874 return 0;
875}
876
877int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
878{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +0100879 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100880 return 0;
881}
882
883int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
884 struct kvm_sregs *sregs)
885{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100886 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100887 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christian Borntraeger59674c12012-01-11 11:20:33 +0100888 restore_access_regs(vcpu->run->s.regs.acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100889 return 0;
890}
891
892int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
893 struct kvm_sregs *sregs)
894{
Christian Borntraeger59674c12012-01-11 11:20:33 +0100895 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100896 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100897 return 0;
898}
899
900int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
901{
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200902 if (test_fp_ctl(fpu->fpc))
903 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100904 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Martin Schwidefsky4725c862013-10-15 16:08:34 +0200905 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
906 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
907 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100908 return 0;
909}
910
911int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
912{
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100913 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
914 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100915 return 0;
916}
917
918static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
919{
920 int rc = 0;
921
Cornelia Huck9e6dabe2011-11-17 11:00:41 +0100922 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100923 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100924 else {
925 vcpu->run->psw_mask = psw.mask;
926 vcpu->run->psw_addr = psw.addr;
927 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100928 return rc;
929}
930
931int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
932 struct kvm_translation *tr)
933{
934 return -EINVAL; /* not implemented yet */
935}
936
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100937int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
938 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100939{
940 return -EINVAL; /* not implemented yet */
941}
942
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300943int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
944 struct kvm_mp_state *mp_state)
945{
946 return -EINVAL; /* not implemented yet */
947}
948
949int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
950 struct kvm_mp_state *mp_state)
951{
952 return -EINVAL; /* not implemented yet */
953}
954
Dominik Dingelb31605c2014-03-25 13:47:11 +0100955bool kvm_s390_cmma_enabled(struct kvm *kvm)
956{
957 if (!MACHINE_IS_LPAR)
958 return false;
959 /* only enable for z10 and later */
960 if (!MACHINE_HAS_EDAT1)
961 return false;
962 if (!kvm->arch.use_cmma)
963 return false;
964 return true;
965}
966
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200967static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
968{
969 /*
970 * We use MMU_RELOAD just to re-arm the ipte notifier for the
971 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
972 * This ensures that the ipte instruction for this request has
973 * already finished. We might race against a second unmapper that
974 * wants to set the blocking bit. Lets just retry the request loop.
975 */
976 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
977 int rc;
978 rc = gmap_ipte_notify(vcpu->arch.gmap,
979 vcpu->arch.sie_block->prefix,
980 PAGE_SIZE * 2);
981 if (rc)
982 return rc;
983 s390_vcpu_unblock(vcpu);
984 }
985 return 0;
986}
987
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200988static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
989{
990 long rc;
991 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
992 struct mm_struct *mm = current->mm;
993 down_read(&mm->mmap_sem);
994 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
995 up_read(&mm->mmap_sem);
996 return rc;
997}
998
Dominik Dingel3c038e62013-10-07 17:11:48 +0200999static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1000 unsigned long token)
1001{
1002 struct kvm_s390_interrupt inti;
1003 inti.parm64 = token;
1004
1005 if (start_token) {
1006 inti.type = KVM_S390_INT_PFAULT_INIT;
1007 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1008 } else {
1009 inti.type = KVM_S390_INT_PFAULT_DONE;
1010 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1011 }
1012}
1013
1014void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1015 struct kvm_async_pf *work)
1016{
1017 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1018 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1019}
1020
1021void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1022 struct kvm_async_pf *work)
1023{
1024 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1025 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1026}
1027
1028void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1029 struct kvm_async_pf *work)
1030{
1031 /* s390 will always inject the page directly */
1032}
1033
1034bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1035{
1036 /*
1037 * s390 will always inject the page directly,
1038 * but we still want check_async_completion to cleanup
1039 */
1040 return true;
1041}
1042
1043static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1044{
1045 hva_t hva;
1046 struct kvm_arch_async_pf arch;
1047 int rc;
1048
1049 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1050 return 0;
1051 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1052 vcpu->arch.pfault_compare)
1053 return 0;
1054 if (psw_extint_disabled(vcpu))
1055 return 0;
1056 if (kvm_cpu_has_interrupt(vcpu))
1057 return 0;
1058 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1059 return 0;
1060 if (!vcpu->arch.gmap->pfault_enabled)
1061 return 0;
1062
Heiko Carstens81480cc2014-01-01 16:36:07 +01001063 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1064 hva += current->thread.gmap_addr & ~PAGE_MASK;
1065 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02001066 return 0;
1067
1068 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1069 return rc;
1070}
1071
Thomas Huth3fb4c402013-09-12 10:33:43 +02001072static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001073{
Thomas Huth3fb4c402013-09-12 10:33:43 +02001074 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01001075
Dominik Dingel3c038e62013-10-07 17:11:48 +02001076 /*
1077 * On s390 notifications for arriving pages will be delivered directly
1078 * to the guest but the house keeping for completed pfaults is
1079 * handled outside the worker.
1080 */
1081 kvm_check_async_pf_completion(vcpu);
1082
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001083 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001084
1085 if (need_resched())
1086 schedule();
1087
Christian Borntraeger71cde582008-05-21 13:37:34 +02001088 if (test_thread_flag(TIF_MCCK_PENDING))
1089 s390_handle_mcck();
1090
Carsten Otted6b6d162012-01-04 10:25:25 +01001091 if (!kvm_is_ucontrol(vcpu->kvm))
1092 kvm_s390_deliver_pending_interrupts(vcpu);
Carsten Otte0ff31862008-05-21 13:37:37 +02001093
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001094 rc = kvm_s390_handle_requests(vcpu);
1095 if (rc)
1096 return rc;
1097
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001098 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001099 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1100 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1101 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001102
Thomas Huth3fb4c402013-09-12 10:33:43 +02001103 return 0;
1104}
1105
1106static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1107{
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001108 int rc = -1;
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02001109
1110 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1111 vcpu->arch.sie_block->icptcode);
1112 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1113
Thomas Huth3fb4c402013-09-12 10:33:43 +02001114 if (exit_reason >= 0) {
Martin Schwidefsky7c470532013-05-17 14:41:37 +02001115 rc = 0;
Thomas Huth210b16072013-09-19 16:26:18 +02001116 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1117 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1118 vcpu->run->s390_ucontrol.trans_exc_code =
1119 current->thread.gmap_addr;
1120 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1121 rc = -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001122
1123 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02001124 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001125 current->thread.gmap_pfault = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02001126 if (kvm_arch_setup_async_pf(vcpu) ||
1127 (kvm_arch_fault_in_sync(vcpu) >= 0))
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001128 rc = 0;
1129 }
1130
1131 if (rc == -1) {
Christian Borntraeger699bde32014-01-20 12:34:13 +01001132 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1133 trace_kvm_s390_sie_fault(vcpu);
1134 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
Carsten Otte1f0d0f02008-05-21 13:37:40 +02001135 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001136
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01001137 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001138
Thomas Hutha76ccff2013-09-12 10:33:44 +02001139 if (rc == 0) {
1140 if (kvm_is_ucontrol(vcpu->kvm))
Christian Borntraeger2955c832014-03-06 16:01:38 +01001141 /* Don't exit for host interrupts. */
1142 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
Thomas Hutha76ccff2013-09-12 10:33:44 +02001143 else
1144 rc = kvm_handle_sie_intercept(vcpu);
1145 }
1146
Thomas Huth3fb4c402013-09-12 10:33:43 +02001147 return rc;
1148}
1149
1150static int __vcpu_run(struct kvm_vcpu *vcpu)
1151{
1152 int rc, exit_reason;
1153
Thomas Huth800c1062013-09-12 10:33:45 +02001154 /*
1155 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1156 * ning the guest), so that memslots (and other stuff) are protected
1157 */
1158 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1159
Thomas Hutha76ccff2013-09-12 10:33:44 +02001160 do {
1161 rc = vcpu_pre_run(vcpu);
1162 if (rc)
1163 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02001164
Thomas Huth800c1062013-09-12 10:33:45 +02001165 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02001166 /*
1167 * As PF_VCPU will be used in fault handler, between
1168 * guest_enter and guest_exit should be no uaccess.
1169 */
1170 preempt_disable();
1171 kvm_guest_enter();
1172 preempt_enable();
1173 exit_reason = sie64a(vcpu->arch.sie_block,
1174 vcpu->run->s.regs.gprs);
1175 kvm_guest_exit();
Thomas Huth800c1062013-09-12 10:33:45 +02001176 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001177
Thomas Hutha76ccff2013-09-12 10:33:44 +02001178 rc = vcpu_post_run(vcpu, exit_reason);
1179 } while (!signal_pending(current) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02001180
Thomas Huth800c1062013-09-12 10:33:45 +02001181 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01001182 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001183}
1184
1185int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1186{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001187 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001188 sigset_t sigsaved;
1189
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001190 if (vcpu->sigset_active)
1191 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1192
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01001193 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001194
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001195 switch (kvm_run->exit_reason) {
1196 case KVM_EXIT_S390_SIEIC:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001197 case KVM_EXIT_UNKNOWN:
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001198 case KVM_EXIT_INTR:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001199 case KVM_EXIT_S390_RESET:
Carsten Ottee168bf82012-01-04 10:25:22 +01001200 case KVM_EXIT_S390_UCONTROL:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001201 case KVM_EXIT_S390_TSCH:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001202 break;
1203 default:
1204 BUG();
1205 }
1206
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001207 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1208 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +01001209 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1210 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1211 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1212 }
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001213 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1214 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1215 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1216 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1217 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001218
Heiko Carstensdab4079d2009-06-12 10:26:32 +02001219 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02001220 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02001221
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001222 if (signal_pending(current) && !rc) {
1223 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001224 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02001225 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001226
Heiko Carstensb8e660b2010-02-26 22:37:41 +01001227 if (rc == -EOPNOTSUPP) {
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001228 /* intercept cannot be handled in-kernel, prepare kvm-run */
1229 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1230 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01001231 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1232 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1233 rc = 0;
1234 }
1235
1236 if (rc == -EREMOTE) {
1237 /* intercept was handled, but userspace support is needed
1238 * kvm_run has been prepared by the handler */
1239 rc = 0;
1240 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001241
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001242 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1243 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
Christian Borntraeger60b413c2012-01-11 11:20:31 +01001244 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
Christian Borntraeger9eed07352012-02-06 10:59:07 +01001245 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01001246
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001247 if (vcpu->sigset_active)
1248 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1249
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001250 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02001251 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001252}
1253
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001254/*
1255 * store status at address
1256 * we use have two special cases:
1257 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1258 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1259 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01001260int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001261{
Carsten Otte092670c2011-07-24 10:48:22 +02001262 unsigned char archmode = 1;
Thomas Huth178bd782013-11-13 20:28:18 +01001263 u64 clkcomp;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001264 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001265
Heiko Carstensd0bce602014-01-01 16:45:58 +01001266 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1267 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001268 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001269 gpa = SAVE_AREA_BASE;
1270 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1271 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001272 return -EFAULT;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001273 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1274 }
1275 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1276 vcpu->arch.guest_fpregs.fprs, 128);
1277 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1278 vcpu->run->s.regs.gprs, 128);
1279 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1280 &vcpu->arch.sie_block->gpsw, 16);
1281 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1282 &vcpu->arch.sie_block->prefix, 4);
1283 rc |= write_guest_abs(vcpu,
1284 gpa + offsetof(struct save_area, fp_ctrl_reg),
1285 &vcpu->arch.guest_fpregs.fpc, 4);
1286 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1287 &vcpu->arch.sie_block->todpr, 4);
1288 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1289 &vcpu->arch.sie_block->cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01001290 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Heiko Carstensd0bce602014-01-01 16:45:58 +01001291 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1292 &clkcomp, 8);
1293 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1294 &vcpu->run->s.regs.acrs, 64);
1295 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1296 &vcpu->arch.sie_block->gcr, 128);
1297 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001298}
1299
Thomas Huthe8798922013-11-06 15:46:33 +01001300int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1301{
1302 /*
1303 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1304 * copying in vcpu load/put. Lets update our copies before we save
1305 * it into the save area
1306 */
1307 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1308 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1309 save_access_regs(vcpu->run->s.regs.acrs);
1310
1311 return kvm_s390_store_status_unloaded(vcpu, addr);
1312}
1313
Cornelia Huckd6712df2012-12-20 15:32:11 +01001314static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1315 struct kvm_enable_cap *cap)
1316{
1317 int r;
1318
1319 if (cap->flags)
1320 return -EINVAL;
1321
1322 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001323 case KVM_CAP_S390_CSS_SUPPORT:
1324 if (!vcpu->kvm->arch.css_support) {
1325 vcpu->kvm->arch.css_support = 1;
1326 trace_kvm_s390_enable_css(vcpu->kvm);
1327 }
1328 r = 0;
1329 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01001330 default:
1331 r = -EINVAL;
1332 break;
1333 }
1334 return r;
1335}
1336
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001337long kvm_arch_vcpu_ioctl(struct file *filp,
1338 unsigned int ioctl, unsigned long arg)
1339{
1340 struct kvm_vcpu *vcpu = filp->private_data;
1341 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02001342 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03001343 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001344
Avi Kivity937366242010-05-13 12:35:17 +03001345 switch (ioctl) {
1346 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001347 struct kvm_s390_interrupt s390int;
1348
Avi Kivity937366242010-05-13 12:35:17 +03001349 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001350 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity937366242010-05-13 12:35:17 +03001351 break;
1352 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1353 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001354 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001355 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02001356 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001357 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02001358 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03001359 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001360 case KVM_S390_SET_INITIAL_PSW: {
1361 psw_t psw;
1362
Avi Kivitybc923cc2010-05-13 12:21:46 +03001363 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001364 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03001365 break;
1366 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1367 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001368 }
1369 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03001370 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1371 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02001372 case KVM_SET_ONE_REG:
1373 case KVM_GET_ONE_REG: {
1374 struct kvm_one_reg reg;
1375 r = -EFAULT;
1376 if (copy_from_user(&reg, argp, sizeof(reg)))
1377 break;
1378 if (ioctl == KVM_SET_ONE_REG)
1379 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1380 else
1381 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1382 break;
1383 }
Carsten Otte27e03932012-01-04 10:25:21 +01001384#ifdef CONFIG_KVM_S390_UCONTROL
1385 case KVM_S390_UCAS_MAP: {
1386 struct kvm_s390_ucas_mapping ucasmap;
1387
1388 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1389 r = -EFAULT;
1390 break;
1391 }
1392
1393 if (!kvm_is_ucontrol(vcpu->kvm)) {
1394 r = -EINVAL;
1395 break;
1396 }
1397
1398 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1399 ucasmap.vcpu_addr, ucasmap.length);
1400 break;
1401 }
1402 case KVM_S390_UCAS_UNMAP: {
1403 struct kvm_s390_ucas_mapping ucasmap;
1404
1405 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1406 r = -EFAULT;
1407 break;
1408 }
1409
1410 if (!kvm_is_ucontrol(vcpu->kvm)) {
1411 r = -EINVAL;
1412 break;
1413 }
1414
1415 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1416 ucasmap.length);
1417 break;
1418 }
1419#endif
Carsten Otteccc79102012-01-04 10:25:26 +01001420 case KVM_S390_VCPU_FAULT: {
1421 r = gmap_fault(arg, vcpu->arch.gmap);
1422 if (!IS_ERR_VALUE(r))
1423 r = 0;
1424 break;
1425 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01001426 case KVM_ENABLE_CAP:
1427 {
1428 struct kvm_enable_cap cap;
1429 r = -EFAULT;
1430 if (copy_from_user(&cap, argp, sizeof(cap)))
1431 break;
1432 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1433 break;
1434 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001435 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01001436 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001437 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03001438 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001439}
1440
Carsten Otte5b1c1492012-01-04 10:25:23 +01001441int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1442{
1443#ifdef CONFIG_KVM_S390_UCONTROL
1444 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1445 && (kvm_is_ucontrol(vcpu->kvm))) {
1446 vmf->page = virt_to_page(vcpu->arch.sie_block);
1447 get_page(vmf->page);
1448 return 0;
1449 }
1450#endif
1451 return VM_FAULT_SIGBUS;
1452}
1453
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301454void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001455 struct kvm_memory_slot *dont)
1456{
1457}
1458
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301459int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1460 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09001461{
1462 return 0;
1463}
1464
Takuya Yoshikawae59dbe02013-07-04 13:40:29 +09001465void kvm_arch_memslots_updated(struct kvm *kvm)
1466{
1467}
1468
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001469/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001470int kvm_arch_prepare_memory_region(struct kvm *kvm,
1471 struct kvm_memory_slot *memslot,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09001472 struct kvm_userspace_memory_region *mem,
1473 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001474{
Nick Wangdd2887e2013-03-25 17:22:57 +01001475 /* A few sanity checks. We can have memory slots which have to be
1476 located/ended at a segment boundary (1MB). The memory in userland is
1477 ok to be fragmented into various different vmas. It is okay to mmap()
1478 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001479
Carsten Otte598841c2011-07-24 10:48:21 +02001480 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001481 return -EINVAL;
1482
Carsten Otte598841c2011-07-24 10:48:21 +02001483 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001484 return -EINVAL;
1485
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001486 return 0;
1487}
1488
1489void kvm_arch_commit_memory_region(struct kvm *kvm,
1490 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001491 const struct kvm_memory_slot *old,
1492 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001493{
Carsten Ottef7850c92011-07-24 10:48:23 +02001494 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02001495
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01001496 /* If the basics of the memslot do not change, we do not want
1497 * to update the gmap. Every update causes several unnecessary
1498 * segment translation exceptions. This is usually handled just
1499 * fine by the normal fault handler + gmap, but it will also
1500 * cause faults on the prefix page of running guest CPUs.
1501 */
1502 if (old->userspace_addr == mem->userspace_addr &&
1503 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1504 old->npages * PAGE_SIZE == mem->memory_size)
1505 return;
Carsten Otte598841c2011-07-24 10:48:21 +02001506
1507 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1508 mem->guest_phys_addr, mem->memory_size);
1509 if (rc)
Carsten Ottef7850c92011-07-24 10:48:23 +02001510 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02001511 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001512}
1513
Marcelo Tosatti2df72e92012-08-24 15:54:57 -03001514void kvm_arch_flush_shadow_all(struct kvm *kvm)
1515{
1516}
1517
1518void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1519 struct kvm_memory_slot *slot)
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001520{
1521}
1522
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001523static int __init kvm_s390_init(void)
1524{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001525 int ret;
Avi Kivity0ee75be2010-04-28 15:39:01 +03001526 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001527 if (ret)
1528 return ret;
1529
1530 /*
1531 * guests can ask for up to 255+1 double words, we need a full page
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001532 * to hold the maximum amount of facilities. On the other hand, we
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001533 * only set facilities that are known to work in KVM.
1534 */
Michael Mueller78c4b59f2013-07-26 15:04:04 +02001535 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1536 if (!vfacilities) {
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001537 kvm_exit();
1538 return -ENOMEM;
1539 }
Michael Mueller78c4b59f2013-07-26 15:04:04 +02001540 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
Thomas Huthd208c792013-12-12 13:40:40 +01001541 vfacilities[0] &= 0xff82fff3f4fc2000UL;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02001542 vfacilities[1] &= 0x005c000000000000UL;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +02001543 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001544}
1545
1546static void __exit kvm_s390_exit(void)
1547{
Michael Mueller78c4b59f2013-07-26 15:04:04 +02001548 free_page((unsigned long) vfacilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001549 kvm_exit();
1550}
1551
1552module_init(kvm_s390_init);
1553module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02001554
1555/*
1556 * Enable autoloading of the kvm module.
1557 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1558 * since x86 takes a different approach.
1559 */
1560#include <linux/miscdevice.h>
1561MODULE_ALIAS_MISCDEV(KVM_MINOR);
1562MODULE_ALIAS("devname:kvm");