blob: 00e2ce8e91f5e988ec0ec0121cf8a04890b28276 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +02004 * Copyright IBM Corp. 2008,2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010014 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020019#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010020#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010025#include <linux/timer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <asm/lowcore.h>
27#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010028#include <asm/nmi.h>
Christian Borntraegeref50f7a2009-06-23 17:24:07 +020029#include <asm/system.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010030#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010031#include "gaccess.h"
32
33#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
34
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020037 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010038 { "exit_validity", VCPU_STAT(exit_validity) },
39 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
40 { "exit_external_request", VCPU_STAT(exit_external_request) },
41 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010042 { "exit_instruction", VCPU_STAT(exit_instruction) },
43 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
44 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020045 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010046 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
47 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
48 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
49 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
50 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
51 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
52 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
53 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
54 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010055 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
56 { "instruction_spx", VCPU_STAT(instruction_spx) },
57 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
58 { "instruction_stap", VCPU_STAT(instruction_stap) },
59 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
60 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
61 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
62 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
63 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010064 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
65 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
66 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
67 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
68 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
69 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010070 { "diagnose_44", VCPU_STAT(diagnose_44) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010071 { NULL }
72};
73
Christian Borntraegeref50f7a2009-06-23 17:24:07 +020074static unsigned long long *facilities;
Heiko Carstensb0c632d2008-03-25 18:47:20 +010075
76/* Section: not file related */
77void kvm_arch_hardware_enable(void *garbage)
78{
79 /* every s390 is virtualization enabled ;-) */
80}
81
82void kvm_arch_hardware_disable(void *garbage)
83{
84}
85
Heiko Carstensb0c632d2008-03-25 18:47:20 +010086int kvm_arch_hardware_setup(void)
87{
88 return 0;
89}
90
91void kvm_arch_hardware_unsetup(void)
92{
93}
94
95void kvm_arch_check_processor_compat(void *rtn)
96{
97}
98
99int kvm_arch_init(void *opaque)
100{
101 return 0;
102}
103
104void kvm_arch_exit(void)
105{
106}
107
108/* Section: device related */
109long kvm_arch_dev_ioctl(struct file *filp,
110 unsigned int ioctl, unsigned long arg)
111{
112 if (ioctl == KVM_S390_ENABLE_SIE)
113 return s390_enable_sie();
114 return -EINVAL;
115}
116
117int kvm_dev_ioctl_check_extension(long ext)
118{
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200119 switch (ext) {
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200120 default:
121 return 0;
122 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100123}
124
125/* Section: vm related */
126/*
127 * Get (and clear) the dirty memory log for a memory slot.
128 */
129int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
130 struct kvm_dirty_log *log)
131{
132 return 0;
133}
134
135long kvm_arch_vm_ioctl(struct file *filp,
136 unsigned int ioctl, unsigned long arg)
137{
138 struct kvm *kvm = filp->private_data;
139 void __user *argp = (void __user *)arg;
140 int r;
141
142 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100143 case KVM_S390_INTERRUPT: {
144 struct kvm_s390_interrupt s390int;
145
146 r = -EFAULT;
147 if (copy_from_user(&s390int, argp, sizeof(s390int)))
148 break;
149 r = kvm_s390_inject_vm(kvm, &s390int);
150 break;
151 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100152 default:
Avi Kivity367e1312009-08-26 14:57:07 +0300153 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100154 }
155
156 return r;
157}
158
159struct kvm *kvm_arch_create_vm(void)
160{
161 struct kvm *kvm;
162 int rc;
163 char debug_name[16];
164
165 rc = s390_enable_sie();
166 if (rc)
167 goto out_nokvm;
168
169 rc = -ENOMEM;
170 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
171 if (!kvm)
172 goto out_nokvm;
173
174 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
175 if (!kvm->arch.sca)
176 goto out_nosca;
177
178 sprintf(debug_name, "kvm-%u", current->pid);
179
180 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
181 if (!kvm->arch.dbf)
182 goto out_nodbf;
183
Carsten Otteba5c1e92008-03-25 18:47:26 +0100184 spin_lock_init(&kvm->arch.float_int.lock);
185 INIT_LIST_HEAD(&kvm->arch.float_int.list);
186
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100187 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
188 VM_EVENT(kvm, 3, "%s", "vm created");
189
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100190 return kvm;
191out_nodbf:
192 free_page((unsigned long)(kvm->arch.sca));
193out_nosca:
194 kfree(kvm);
195out_nokvm:
196 return ERR_PTR(rc);
197}
198
Christian Borntraegerd329c032008-11-26 14:50:27 +0100199void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
200{
201 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Carsten Otteabf4a712009-05-12 17:21:51 +0200202 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
203 (__u64) vcpu->arch.sie_block)
204 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
205 smp_mb();
Christian Borntraegerd329c032008-11-26 14:50:27 +0100206 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100207 kvm_vcpu_uninit(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100208 kfree(vcpu);
209}
210
211static void kvm_free_vcpus(struct kvm *kvm)
212{
213 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300214 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +0100215
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300216 kvm_for_each_vcpu(i, vcpu, kvm)
217 kvm_arch_vcpu_destroy(vcpu);
218
219 mutex_lock(&kvm->lock);
220 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
221 kvm->vcpus[i] = NULL;
222
223 atomic_set(&kvm->online_vcpus, 0);
224 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100225}
226
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800227void kvm_arch_sync_events(struct kvm *kvm)
228{
229}
230
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100231void kvm_arch_destroy_vm(struct kvm *kvm)
232{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100233 kvm_free_vcpus(kvm);
Carsten Ottedfdded72008-06-27 15:05:34 +0200234 kvm_free_physmem(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100235 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100236 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100237 kfree(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100238}
239
240/* Section: vcpu related */
241int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
242{
243 return 0;
244}
245
246void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
247{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100248 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100249}
250
251void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
252{
253 save_fp_regs(&vcpu->arch.host_fpregs);
254 save_access_regs(vcpu->arch.host_acrs);
255 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
256 restore_fp_regs(&vcpu->arch.guest_fpregs);
257 restore_access_regs(vcpu->arch.guest_acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100258}
259
260void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
261{
262 save_fp_regs(&vcpu->arch.guest_fpregs);
263 save_access_regs(vcpu->arch.guest_acrs);
264 restore_fp_regs(&vcpu->arch.host_fpregs);
265 restore_access_regs(vcpu->arch.host_acrs);
266}
267
268static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
269{
270 /* this equals initial cpu reset in pop, but we don't switch to ESA */
271 vcpu->arch.sie_block->gpsw.mask = 0UL;
272 vcpu->arch.sie_block->gpsw.addr = 0UL;
273 vcpu->arch.sie_block->prefix = 0UL;
274 vcpu->arch.sie_block->ihcpu = 0xffff;
275 vcpu->arch.sie_block->cputm = 0UL;
276 vcpu->arch.sie_block->ckc = 0UL;
277 vcpu->arch.sie_block->todpr = 0;
278 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
279 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
280 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
281 vcpu->arch.guest_fpregs.fpc = 0;
282 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
283 vcpu->arch.sie_block->gbea = 1;
284}
285
286int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
287{
288 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200289 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100290 vcpu->arch.sie_block->ecb = 2;
291 vcpu->arch.sie_block->eca = 0xC1002001U;
Christian Borntraegeref50f7a2009-06-23 17:24:07 +0200292 vcpu->arch.sie_block->fac = (int) (long) facilities;
Christian Borntraegerca872302009-05-12 17:21:49 +0200293 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
294 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
295 (unsigned long) vcpu);
296 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100297 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100298 vcpu->arch.cpu_id.version = 0xff;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100299 return 0;
300}
301
302struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
303 unsigned int id)
304{
305 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
306 int rc = -ENOMEM;
307
308 if (!vcpu)
309 goto out_nomem;
310
Christian Borntraeger180c12f2008-06-27 15:05:40 +0200311 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
312 get_zeroed_page(GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100313
314 if (!vcpu->arch.sie_block)
315 goto out_free_cpu;
316
317 vcpu->arch.sie_block->icpua = id;
318 BUG_ON(!kvm->arch.sca);
Carsten Otteabf4a712009-05-12 17:21:51 +0200319 if (!kvm->arch.sca->cpu[id].sda)
320 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
323
Carsten Otteba5c1e92008-03-25 18:47:26 +0100324 spin_lock_init(&vcpu->arch.local_int.lock);
325 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
326 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerb037a4f2009-05-12 17:21:50 +0200327 spin_lock(&kvm->arch.float_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100328 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
329 init_waitqueue_head(&vcpu->arch.local_int.wq);
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100330 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Christian Borntraegerb037a4f2009-05-12 17:21:50 +0200331 spin_unlock(&kvm->arch.float_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100332
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100333 rc = kvm_vcpu_init(vcpu, kvm, id);
334 if (rc)
335 goto out_free_cpu;
336 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
337 vcpu->arch.sie_block);
338
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100339 return vcpu;
340out_free_cpu:
341 kfree(vcpu);
342out_nomem:
343 return ERR_PTR(rc);
344}
345
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100346int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
347{
348 /* kvm common code refers to this, but never calls it */
349 BUG();
350 return 0;
351}
352
353static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
354{
355 vcpu_load(vcpu);
356 kvm_s390_vcpu_initial_reset(vcpu);
357 vcpu_put(vcpu);
358 return 0;
359}
360
361int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
362{
363 vcpu_load(vcpu);
364 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
365 vcpu_put(vcpu);
366 return 0;
367}
368
369int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
370{
371 vcpu_load(vcpu);
372 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
373 vcpu_put(vcpu);
374 return 0;
375}
376
377int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
378 struct kvm_sregs *sregs)
379{
380 vcpu_load(vcpu);
381 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
382 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
383 vcpu_put(vcpu);
384 return 0;
385}
386
387int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
388 struct kvm_sregs *sregs)
389{
390 vcpu_load(vcpu);
391 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
392 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
393 vcpu_put(vcpu);
394 return 0;
395}
396
397int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
398{
399 vcpu_load(vcpu);
400 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
401 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
402 vcpu_put(vcpu);
403 return 0;
404}
405
406int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
407{
408 vcpu_load(vcpu);
409 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
410 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
411 vcpu_put(vcpu);
412 return 0;
413}
414
415static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
416{
417 int rc = 0;
418
419 vcpu_load(vcpu);
420 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
421 rc = -EBUSY;
422 else
423 vcpu->arch.sie_block->gpsw = psw;
424 vcpu_put(vcpu);
425 return rc;
426}
427
428int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
429 struct kvm_translation *tr)
430{
431 return -EINVAL; /* not implemented yet */
432}
433
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100434int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
435 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100436{
437 return -EINVAL; /* not implemented yet */
438}
439
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300440int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
441 struct kvm_mp_state *mp_state)
442{
443 return -EINVAL; /* not implemented yet */
444}
445
446int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
447 struct kvm_mp_state *mp_state)
448{
449 return -EINVAL; /* not implemented yet */
450}
451
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100452static void __vcpu_run(struct kvm_vcpu *vcpu)
453{
454 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
455
456 if (need_resched())
457 schedule();
458
Christian Borntraeger71cde582008-05-21 13:37:34 +0200459 if (test_thread_flag(TIF_MCCK_PENDING))
460 s390_handle_mcck();
461
Carsten Otte0ff31862008-05-21 13:37:37 +0200462 kvm_s390_deliver_pending_interrupts(vcpu);
463
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100464 vcpu->arch.sie_block->icptcode = 0;
465 local_irq_disable();
466 kvm_guest_enter();
467 local_irq_enable();
468 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
469 atomic_read(&vcpu->arch.sie_block->cpuflags));
Carsten Otte1f0d0f02008-05-21 13:37:40 +0200470 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
471 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
472 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
473 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100474 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
475 vcpu->arch.sie_block->icptcode);
476 local_irq_disable();
477 kvm_guest_exit();
478 local_irq_enable();
479
480 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
481}
482
483int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
484{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100485 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100486 sigset_t sigsaved;
487
488 vcpu_load(vcpu);
489
Christian Ehrhardt9ace9032009-05-20 15:34:55 +0200490rerun_vcpu:
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200491 if (vcpu->requests)
492 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
493 kvm_s390_vcpu_set_mem(vcpu);
494
Carsten Otte51e4d5a2009-05-12 17:21:53 +0200495 /* verify, that memory has been registered */
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200496 if (!vcpu->arch.sie_block->gmslm) {
Carsten Otte51e4d5a2009-05-12 17:21:53 +0200497 vcpu_put(vcpu);
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200498 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
Carsten Otte51e4d5a2009-05-12 17:21:53 +0200499 return -EINVAL;
500 }
501
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100502 if (vcpu->sigset_active)
503 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
504
505 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
506
Carsten Otteba5c1e92008-03-25 18:47:26 +0100507 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
508
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100509 switch (kvm_run->exit_reason) {
510 case KVM_EXIT_S390_SIEIC:
511 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
512 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
513 break;
514 case KVM_EXIT_UNKNOWN:
Christian Ehrhardt9ace9032009-05-20 15:34:55 +0200515 case KVM_EXIT_INTR:
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100516 case KVM_EXIT_S390_RESET:
517 break;
518 default:
519 BUG();
520 }
521
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200522 might_fault();
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100523
524 do {
525 __vcpu_run(vcpu);
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100526 rc = kvm_handle_sie_intercept(vcpu);
527 } while (!signal_pending(current) && !rc);
528
Christian Ehrhardt9ace9032009-05-20 15:34:55 +0200529 if (rc == SIE_INTERCEPT_RERUNVCPU)
530 goto rerun_vcpu;
531
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +0200532 if (signal_pending(current) && !rc) {
533 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100534 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +0200535 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100536
537 if (rc == -ENOTSUPP) {
538 /* intercept cannot be handled in-kernel, prepare kvm-run */
539 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
540 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
541 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
542 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
543 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
544 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
545 rc = 0;
546 }
547
548 if (rc == -EREMOTE) {
549 /* intercept was handled, but userspace support is needed
550 * kvm_run has been prepared by the handler */
551 rc = 0;
552 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100553
554 if (vcpu->sigset_active)
555 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
556
557 vcpu_put(vcpu);
558
559 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +0200560 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100561}
562
563static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
564 unsigned long n, int prefix)
565{
566 if (prefix)
567 return copy_to_guest(vcpu, guestdest, from, n);
568 else
569 return copy_to_guest_absolute(vcpu, guestdest, from, n);
570}
571
572/*
573 * store status at address
574 * we use have two special cases:
575 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
576 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
577 */
578int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
579{
580 const unsigned char archmode = 1;
581 int prefix;
582
583 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
584 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
585 return -EFAULT;
586 addr = SAVE_AREA_BASE;
587 prefix = 0;
588 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
589 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
590 return -EFAULT;
591 addr = SAVE_AREA_BASE;
592 prefix = 1;
593 } else
594 prefix = 0;
595
596 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
597 vcpu->arch.guest_fpregs.fprs, 128, prefix))
598 return -EFAULT;
599
600 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
601 vcpu->arch.guest_gprs, 128, prefix))
602 return -EFAULT;
603
604 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
605 &vcpu->arch.sie_block->gpsw, 16, prefix))
606 return -EFAULT;
607
608 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
609 &vcpu->arch.sie_block->prefix, 4, prefix))
610 return -EFAULT;
611
612 if (__guestcopy(vcpu,
613 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
614 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
615 return -EFAULT;
616
617 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
618 &vcpu->arch.sie_block->todpr, 4, prefix))
619 return -EFAULT;
620
621 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
622 &vcpu->arch.sie_block->cputm, 8, prefix))
623 return -EFAULT;
624
625 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
626 &vcpu->arch.sie_block->ckc, 8, prefix))
627 return -EFAULT;
628
629 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
630 &vcpu->arch.guest_acrs, 64, prefix))
631 return -EFAULT;
632
633 if (__guestcopy(vcpu,
634 addr + offsetof(struct save_area_s390x, ctrl_regs),
635 &vcpu->arch.sie_block->gcr, 128, prefix))
636 return -EFAULT;
637 return 0;
638}
639
640static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
641{
642 int rc;
643
644 vcpu_load(vcpu);
645 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
646 vcpu_put(vcpu);
647 return rc;
648}
649
650long kvm_arch_vcpu_ioctl(struct file *filp,
651 unsigned int ioctl, unsigned long arg)
652{
653 struct kvm_vcpu *vcpu = filp->private_data;
654 void __user *argp = (void __user *)arg;
655
656 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100657 case KVM_S390_INTERRUPT: {
658 struct kvm_s390_interrupt s390int;
659
660 if (copy_from_user(&s390int, argp, sizeof(s390int)))
661 return -EFAULT;
662 return kvm_s390_inject_vcpu(vcpu, &s390int);
663 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100664 case KVM_S390_STORE_STATUS:
665 return kvm_s390_vcpu_store_status(vcpu, arg);
666 case KVM_S390_SET_INITIAL_PSW: {
667 psw_t psw;
668
669 if (copy_from_user(&psw, argp, sizeof(psw)))
670 return -EFAULT;
671 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
672 }
673 case KVM_S390_INITIAL_RESET:
674 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
675 default:
676 ;
677 }
678 return -EINVAL;
679}
680
681/* Section: memory related */
682int kvm_arch_set_memory_region(struct kvm *kvm,
683 struct kvm_userspace_memory_region *mem,
684 struct kvm_memory_slot old,
685 int user_alloc)
686{
Carsten Otte2668dab2009-05-12 17:21:48 +0200687 int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300688 struct kvm_vcpu *vcpu;
Carsten Otte2668dab2009-05-12 17:21:48 +0200689
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100690 /* A few sanity checks. We can have exactly one memory slot which has
691 to start at guest virtual zero and which has to be located at a
692 page boundary in userland and which has to end at a page boundary.
693 The memory in userland is ok to be fragmented into various different
694 vmas. It is okay to mmap() and munmap() stuff in this slot after
695 doing this call at any time */
696
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200697 if (mem->slot)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100698 return -EINVAL;
699
700 if (mem->guest_phys_addr)
701 return -EINVAL;
702
703 if (mem->userspace_addr & (PAGE_SIZE - 1))
704 return -EINVAL;
705
706 if (mem->memory_size & (PAGE_SIZE - 1))
707 return -EINVAL;
708
Carsten Otte2668dab2009-05-12 17:21:48 +0200709 if (!user_alloc)
710 return -EINVAL;
711
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +0200712 /* request update of sie control block for all available vcpus */
Gleb Natapov988a2ca2009-06-09 15:56:29 +0300713 kvm_for_each_vcpu(i, vcpu, kvm) {
714 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
715 continue;
716 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
Carsten Otte2668dab2009-05-12 17:21:48 +0200717 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100718
719 return 0;
720}
721
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300722void kvm_arch_flush_shadow(struct kvm *kvm)
723{
724}
725
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100726gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
727{
728 return gfn;
729}
730
731static int __init kvm_s390_init(void)
732{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +0200733 int ret;
734 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
735 if (ret)
736 return ret;
737
738 /*
739 * guests can ask for up to 255+1 double words, we need a full page
740 * to hold the maximum amount of facilites. On the other hand, we
741 * only set facilities that are known to work in KVM.
742 */
743 facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
744 if (!facilities) {
745 kvm_exit();
746 return -ENOMEM;
747 }
748 stfle(facilities, 1);
749 facilities[0] &= 0xff00fff3f0700000ULL;
750 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100751}
752
753static void __exit kvm_s390_exit(void)
754{
Christian Borntraegeref50f7a2009-06-23 17:24:07 +0200755 free_page((unsigned long) facilities);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100756 kvm_exit();
757}
758
759module_init(kvm_s390_init);
760module_exit(kvm_s390_exit);