blob: 628494a434251c2f4ffb816981f793c849eeb2e9 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15#include <linux/compiler.h>
16#include <linux/err.h>
17#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020018#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010019#include <linux/init.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010024#include <linux/timer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <asm/lowcore.h>
26#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010027#include <asm/nmi.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010028#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010029#include "gaccess.h"
30
31#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
32
33struct kvm_stats_debugfs_item debugfs_entries[] = {
34 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020035 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010036 { "exit_validity", VCPU_STAT(exit_validity) },
37 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
38 { "exit_external_request", VCPU_STAT(exit_external_request) },
39 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010040 { "exit_instruction", VCPU_STAT(exit_instruction) },
41 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
42 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020043 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010044 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
45 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
46 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
47 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
48 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
49 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
50 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
51 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
52 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010053 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
54 { "instruction_spx", VCPU_STAT(instruction_spx) },
55 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
56 { "instruction_stap", VCPU_STAT(instruction_stap) },
57 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
58 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
59 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
60 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
61 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010062 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
63 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
64 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
65 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
66 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
67 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010068 { "diagnose_44", VCPU_STAT(diagnose_44) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010069 { NULL }
70};
71
72
73/* Section: not file related */
74void kvm_arch_hardware_enable(void *garbage)
75{
76 /* every s390 is virtualization enabled ;-) */
77}
78
79void kvm_arch_hardware_disable(void *garbage)
80{
81}
82
Heiko Carstensb0c632d2008-03-25 18:47:20 +010083int kvm_arch_hardware_setup(void)
84{
85 return 0;
86}
87
88void kvm_arch_hardware_unsetup(void)
89{
90}
91
92void kvm_arch_check_processor_compat(void *rtn)
93{
94}
95
96int kvm_arch_init(void *opaque)
97{
98 return 0;
99}
100
101void kvm_arch_exit(void)
102{
103}
104
105/* Section: device related */
106long kvm_arch_dev_ioctl(struct file *filp,
107 unsigned int ioctl, unsigned long arg)
108{
109 if (ioctl == KVM_S390_ENABLE_SIE)
110 return s390_enable_sie();
111 return -EINVAL;
112}
113
114int kvm_dev_ioctl_check_extension(long ext)
115{
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200116 switch (ext) {
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200117 default:
118 return 0;
119 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100120}
121
122/* Section: vm related */
123/*
124 * Get (and clear) the dirty memory log for a memory slot.
125 */
126int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
127 struct kvm_dirty_log *log)
128{
129 return 0;
130}
131
132long kvm_arch_vm_ioctl(struct file *filp,
133 unsigned int ioctl, unsigned long arg)
134{
135 struct kvm *kvm = filp->private_data;
136 void __user *argp = (void __user *)arg;
137 int r;
138
139 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100140 case KVM_S390_INTERRUPT: {
141 struct kvm_s390_interrupt s390int;
142
143 r = -EFAULT;
144 if (copy_from_user(&s390int, argp, sizeof(s390int)))
145 break;
146 r = kvm_s390_inject_vm(kvm, &s390int);
147 break;
148 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100149 default:
150 r = -EINVAL;
151 }
152
153 return r;
154}
155
156struct kvm *kvm_arch_create_vm(void)
157{
158 struct kvm *kvm;
159 int rc;
160 char debug_name[16];
161
162 rc = s390_enable_sie();
163 if (rc)
164 goto out_nokvm;
165
166 rc = -ENOMEM;
167 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
168 if (!kvm)
169 goto out_nokvm;
170
171 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
172 if (!kvm->arch.sca)
173 goto out_nosca;
174
175 sprintf(debug_name, "kvm-%u", current->pid);
176
177 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
178 if (!kvm->arch.dbf)
179 goto out_nodbf;
180
Carsten Otteba5c1e92008-03-25 18:47:26 +0100181 spin_lock_init(&kvm->arch.float_int.lock);
182 INIT_LIST_HEAD(&kvm->arch.float_int.list);
183
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100184 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
185 VM_EVENT(kvm, 3, "%s", "vm created");
186
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100187 return kvm;
188out_nodbf:
189 free_page((unsigned long)(kvm->arch.sca));
190out_nosca:
191 kfree(kvm);
192out_nokvm:
193 return ERR_PTR(rc);
194}
195
Christian Borntraegerd329c032008-11-26 14:50:27 +0100196void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
197{
198 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Carsten Otteabf4a712009-05-12 17:21:51 +0200199 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
200 (__u64) vcpu->arch.sie_block)
201 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
202 smp_mb();
Christian Borntraegerd329c032008-11-26 14:50:27 +0100203 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100204 kvm_vcpu_uninit(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100205 kfree(vcpu);
206}
207
208static void kvm_free_vcpus(struct kvm *kvm)
209{
210 unsigned int i;
211
212 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
213 if (kvm->vcpus[i]) {
214 kvm_arch_vcpu_destroy(kvm->vcpus[i]);
215 kvm->vcpus[i] = NULL;
216 }
217 }
218}
219
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800220void kvm_arch_sync_events(struct kvm *kvm)
221{
222}
223
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100224void kvm_arch_destroy_vm(struct kvm *kvm)
225{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100226 kvm_free_vcpus(kvm);
Carsten Ottedfdded72008-06-27 15:05:34 +0200227 kvm_free_physmem(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100228 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100229 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100230 kfree(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100231}
232
233/* Section: vcpu related */
234int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
235{
236 return 0;
237}
238
239void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
240{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100241 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100242}
243
244void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
245{
246 save_fp_regs(&vcpu->arch.host_fpregs);
247 save_access_regs(vcpu->arch.host_acrs);
248 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
249 restore_fp_regs(&vcpu->arch.guest_fpregs);
250 restore_access_regs(vcpu->arch.guest_acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100251}
252
253void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
254{
255 save_fp_regs(&vcpu->arch.guest_fpregs);
256 save_access_regs(vcpu->arch.guest_acrs);
257 restore_fp_regs(&vcpu->arch.host_fpregs);
258 restore_access_regs(vcpu->arch.host_acrs);
259}
260
261static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
262{
263 /* this equals initial cpu reset in pop, but we don't switch to ESA */
264 vcpu->arch.sie_block->gpsw.mask = 0UL;
265 vcpu->arch.sie_block->gpsw.addr = 0UL;
266 vcpu->arch.sie_block->prefix = 0UL;
267 vcpu->arch.sie_block->ihcpu = 0xffff;
268 vcpu->arch.sie_block->cputm = 0UL;
269 vcpu->arch.sie_block->ckc = 0UL;
270 vcpu->arch.sie_block->todpr = 0;
271 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
272 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
273 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
274 vcpu->arch.guest_fpregs.fpc = 0;
275 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
276 vcpu->arch.sie_block->gbea = 1;
277}
278
Christian Borntraeger4da29e92008-06-27 15:05:38 +0200279/* The current code can have up to 256 pages for virtio */
280#define VIRTIODESCSPACE (256ul * 4096ul)
281
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100282int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
283{
284 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
Christian Borntraeger4da29e92008-06-27 15:05:38 +0200285 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
286 vcpu->kvm->arch.guest_origin +
287 VIRTIODESCSPACE - 1ul;
288 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100289 vcpu->arch.sie_block->ecb = 2;
290 vcpu->arch.sie_block->eca = 0xC1002001U;
Christian Borntraegerca872302009-05-12 17:21:49 +0200291 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
292 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
293 (unsigned long) vcpu);
294 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Christian Borntraeger453423d2008-03-25 18:47:29 +0100295 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100296 vcpu->arch.cpu_id.version = 0xff;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100297 return 0;
298}
299
300struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
301 unsigned int id)
302{
303 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
304 int rc = -ENOMEM;
305
306 if (!vcpu)
307 goto out_nomem;
308
Christian Borntraeger180c12f2008-06-27 15:05:40 +0200309 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
310 get_zeroed_page(GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100311
312 if (!vcpu->arch.sie_block)
313 goto out_free_cpu;
314
315 vcpu->arch.sie_block->icpua = id;
316 BUG_ON(!kvm->arch.sca);
Carsten Otteabf4a712009-05-12 17:21:51 +0200317 if (!kvm->arch.sca->cpu[id].sda)
318 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
319 else
320 BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
323
Carsten Otteba5c1e92008-03-25 18:47:26 +0100324 spin_lock_init(&vcpu->arch.local_int.lock);
325 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
326 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerb037a4f2009-05-12 17:21:50 +0200327 spin_lock(&kvm->arch.float_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100328 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
329 init_waitqueue_head(&vcpu->arch.local_int.wq);
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100330 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Christian Borntraegerb037a4f2009-05-12 17:21:50 +0200331 spin_unlock(&kvm->arch.float_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +0100332
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100333 rc = kvm_vcpu_init(vcpu, kvm, id);
334 if (rc)
335 goto out_free_cpu;
336 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
337 vcpu->arch.sie_block);
338
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100339 return vcpu;
340out_free_cpu:
341 kfree(vcpu);
342out_nomem:
343 return ERR_PTR(rc);
344}
345
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100346int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
347{
348 /* kvm common code refers to this, but never calls it */
349 BUG();
350 return 0;
351}
352
353static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
354{
355 vcpu_load(vcpu);
356 kvm_s390_vcpu_initial_reset(vcpu);
357 vcpu_put(vcpu);
358 return 0;
359}
360
361int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
362{
363 vcpu_load(vcpu);
364 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
365 vcpu_put(vcpu);
366 return 0;
367}
368
369int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
370{
371 vcpu_load(vcpu);
372 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
373 vcpu_put(vcpu);
374 return 0;
375}
376
377int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
378 struct kvm_sregs *sregs)
379{
380 vcpu_load(vcpu);
381 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
382 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
383 vcpu_put(vcpu);
384 return 0;
385}
386
387int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
388 struct kvm_sregs *sregs)
389{
390 vcpu_load(vcpu);
391 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
392 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
393 vcpu_put(vcpu);
394 return 0;
395}
396
397int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
398{
399 vcpu_load(vcpu);
400 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
401 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
402 vcpu_put(vcpu);
403 return 0;
404}
405
406int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
407{
408 vcpu_load(vcpu);
409 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
410 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
411 vcpu_put(vcpu);
412 return 0;
413}
414
415static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
416{
417 int rc = 0;
418
419 vcpu_load(vcpu);
420 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
421 rc = -EBUSY;
422 else
423 vcpu->arch.sie_block->gpsw = psw;
424 vcpu_put(vcpu);
425 return rc;
426}
427
428int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
429 struct kvm_translation *tr)
430{
431 return -EINVAL; /* not implemented yet */
432}
433
Jan Kiszkad0bfb942008-12-15 13:52:10 +0100434int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
435 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100436{
437 return -EINVAL; /* not implemented yet */
438}
439
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300440int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
441 struct kvm_mp_state *mp_state)
442{
443 return -EINVAL; /* not implemented yet */
444}
445
446int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
447 struct kvm_mp_state *mp_state)
448{
449 return -EINVAL; /* not implemented yet */
450}
451
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100452static void __vcpu_run(struct kvm_vcpu *vcpu)
453{
454 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
455
456 if (need_resched())
457 schedule();
458
Christian Borntraeger71cde582008-05-21 13:37:34 +0200459 if (test_thread_flag(TIF_MCCK_PENDING))
460 s390_handle_mcck();
461
Carsten Otte0ff31862008-05-21 13:37:37 +0200462 kvm_s390_deliver_pending_interrupts(vcpu);
463
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100464 vcpu->arch.sie_block->icptcode = 0;
465 local_irq_disable();
466 kvm_guest_enter();
467 local_irq_enable();
468 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
469 atomic_read(&vcpu->arch.sie_block->cpuflags));
Carsten Otte1f0d0f02008-05-21 13:37:40 +0200470 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
471 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
472 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
473 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100474 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
475 vcpu->arch.sie_block->icptcode);
476 local_irq_disable();
477 kvm_guest_exit();
478 local_irq_enable();
479
480 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
481}
482
483int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
484{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100485 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100486 sigset_t sigsaved;
487
488 vcpu_load(vcpu);
489
490 if (vcpu->sigset_active)
491 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
492
493 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
494
Carsten Otteba5c1e92008-03-25 18:47:26 +0100495 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
496
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100497 switch (kvm_run->exit_reason) {
498 case KVM_EXIT_S390_SIEIC:
499 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
500 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
501 break;
502 case KVM_EXIT_UNKNOWN:
503 case KVM_EXIT_S390_RESET:
504 break;
505 default:
506 BUG();
507 }
508
509 might_sleep();
510
511 do {
512 __vcpu_run(vcpu);
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100513 rc = kvm_handle_sie_intercept(vcpu);
514 } while (!signal_pending(current) && !rc);
515
516 if (signal_pending(current) && !rc)
517 rc = -EINTR;
518
519 if (rc == -ENOTSUPP) {
520 /* intercept cannot be handled in-kernel, prepare kvm-run */
521 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
522 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
523 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
524 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
525 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
526 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
527 rc = 0;
528 }
529
530 if (rc == -EREMOTE) {
531 /* intercept was handled, but userspace support is needed
532 * kvm_run has been prepared by the handler */
533 rc = 0;
534 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100535
536 if (vcpu->sigset_active)
537 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
538
539 vcpu_put(vcpu);
540
541 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +0200542 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100543}
544
545static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
546 unsigned long n, int prefix)
547{
548 if (prefix)
549 return copy_to_guest(vcpu, guestdest, from, n);
550 else
551 return copy_to_guest_absolute(vcpu, guestdest, from, n);
552}
553
554/*
555 * store status at address
556 * we use have two special cases:
557 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
558 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
559 */
560int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
561{
562 const unsigned char archmode = 1;
563 int prefix;
564
565 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
566 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
567 return -EFAULT;
568 addr = SAVE_AREA_BASE;
569 prefix = 0;
570 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
571 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
572 return -EFAULT;
573 addr = SAVE_AREA_BASE;
574 prefix = 1;
575 } else
576 prefix = 0;
577
578 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
579 vcpu->arch.guest_fpregs.fprs, 128, prefix))
580 return -EFAULT;
581
582 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
583 vcpu->arch.guest_gprs, 128, prefix))
584 return -EFAULT;
585
586 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
587 &vcpu->arch.sie_block->gpsw, 16, prefix))
588 return -EFAULT;
589
590 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
591 &vcpu->arch.sie_block->prefix, 4, prefix))
592 return -EFAULT;
593
594 if (__guestcopy(vcpu,
595 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
596 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
597 return -EFAULT;
598
599 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
600 &vcpu->arch.sie_block->todpr, 4, prefix))
601 return -EFAULT;
602
603 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
604 &vcpu->arch.sie_block->cputm, 8, prefix))
605 return -EFAULT;
606
607 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
608 &vcpu->arch.sie_block->ckc, 8, prefix))
609 return -EFAULT;
610
611 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
612 &vcpu->arch.guest_acrs, 64, prefix))
613 return -EFAULT;
614
615 if (__guestcopy(vcpu,
616 addr + offsetof(struct save_area_s390x, ctrl_regs),
617 &vcpu->arch.sie_block->gcr, 128, prefix))
618 return -EFAULT;
619 return 0;
620}
621
622static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
623{
624 int rc;
625
626 vcpu_load(vcpu);
627 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
628 vcpu_put(vcpu);
629 return rc;
630}
631
632long kvm_arch_vcpu_ioctl(struct file *filp,
633 unsigned int ioctl, unsigned long arg)
634{
635 struct kvm_vcpu *vcpu = filp->private_data;
636 void __user *argp = (void __user *)arg;
637
638 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100639 case KVM_S390_INTERRUPT: {
640 struct kvm_s390_interrupt s390int;
641
642 if (copy_from_user(&s390int, argp, sizeof(s390int)))
643 return -EFAULT;
644 return kvm_s390_inject_vcpu(vcpu, &s390int);
645 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100646 case KVM_S390_STORE_STATUS:
647 return kvm_s390_vcpu_store_status(vcpu, arg);
648 case KVM_S390_SET_INITIAL_PSW: {
649 psw_t psw;
650
651 if (copy_from_user(&psw, argp, sizeof(psw)))
652 return -EFAULT;
653 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
654 }
655 case KVM_S390_INITIAL_RESET:
656 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
657 default:
658 ;
659 }
660 return -EINVAL;
661}
662
663/* Section: memory related */
664int kvm_arch_set_memory_region(struct kvm *kvm,
665 struct kvm_userspace_memory_region *mem,
666 struct kvm_memory_slot old,
667 int user_alloc)
668{
Carsten Otte2668dab2009-05-12 17:21:48 +0200669 int i;
670
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100671 /* A few sanity checks. We can have exactly one memory slot which has
672 to start at guest virtual zero and which has to be located at a
673 page boundary in userland and which has to end at a page boundary.
674 The memory in userland is ok to be fragmented into various different
675 vmas. It is okay to mmap() and munmap() stuff in this slot after
676 doing this call at any time */
677
Carsten Otte2668dab2009-05-12 17:21:48 +0200678 if (mem->slot || kvm->arch.guest_memsize)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100679 return -EINVAL;
680
681 if (mem->guest_phys_addr)
682 return -EINVAL;
683
684 if (mem->userspace_addr & (PAGE_SIZE - 1))
685 return -EINVAL;
686
687 if (mem->memory_size & (PAGE_SIZE - 1))
688 return -EINVAL;
689
Carsten Otte2668dab2009-05-12 17:21:48 +0200690 if (!user_alloc)
691 return -EINVAL;
692
693 /* lock all vcpus */
694 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
695 if (!kvm->vcpus[i])
696 continue;
697 if (!mutex_trylock(&kvm->vcpus[i]->mutex))
698 goto fail_out;
699 }
700
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100701 kvm->arch.guest_origin = mem->userspace_addr;
702 kvm->arch.guest_memsize = mem->memory_size;
703
Carsten Otte2668dab2009-05-12 17:21:48 +0200704 /* update sie control blocks, and unlock all vcpus */
705 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
706 if (kvm->vcpus[i]) {
707 kvm->vcpus[i]->arch.sie_block->gmsor =
708 kvm->arch.guest_origin;
709 kvm->vcpus[i]->arch.sie_block->gmslm =
710 kvm->arch.guest_memsize +
711 kvm->arch.guest_origin +
712 VIRTIODESCSPACE - 1ul;
713 mutex_unlock(&kvm->vcpus[i]->mutex);
714 }
715 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100716
717 return 0;
Carsten Otte2668dab2009-05-12 17:21:48 +0200718
719fail_out:
720 for (; i >= 0; i--)
721 mutex_unlock(&kvm->vcpus[i]->mutex);
722 return -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100723}
724
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300725void kvm_arch_flush_shadow(struct kvm *kvm)
726{
727}
728
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100729gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
730{
731 return gfn;
732}
733
734static int __init kvm_s390_init(void)
735{
736 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
737}
738
739static void __exit kvm_s390_exit(void)
740{
741 kvm_exit();
742}
743
744module_init(kvm_s390_init);
745module_exit(kvm_s390_exit);