blob: 5e3473c9a6390b508460482154d5fe437d961c5f [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15#include <linux/compiler.h>
16#include <linux/err.h>
17#include <linux/fs.h>
18#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/module.h>
22#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010023#include <linux/timer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010024#include <asm/lowcore.h>
25#include <asm/pgtable.h>
26
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010027#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include "gaccess.h"
29
30#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32struct kvm_stats_debugfs_item debugfs_entries[] = {
33 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034 { "exit_validity", VCPU_STAT(exit_validity) },
35 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
36 { "exit_external_request", VCPU_STAT(exit_external_request) },
37 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010038 { "exit_instruction", VCPU_STAT(exit_instruction) },
39 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
40 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
41 { "instruction_lctg", VCPU_STAT(instruction_lctg) },
42 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
43 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
44 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
45 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
46 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
47 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
48 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
49 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
50 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010051 { NULL }
52};
53
54
55/* Section: not file related */
56void kvm_arch_hardware_enable(void *garbage)
57{
58 /* every s390 is virtualization enabled ;-) */
59}
60
61void kvm_arch_hardware_disable(void *garbage)
62{
63}
64
65void decache_vcpus_on_cpu(int cpu)
66{
67}
68
69int kvm_arch_hardware_setup(void)
70{
71 return 0;
72}
73
74void kvm_arch_hardware_unsetup(void)
75{
76}
77
78void kvm_arch_check_processor_compat(void *rtn)
79{
80}
81
82int kvm_arch_init(void *opaque)
83{
84 return 0;
85}
86
87void kvm_arch_exit(void)
88{
89}
90
91/* Section: device related */
92long kvm_arch_dev_ioctl(struct file *filp,
93 unsigned int ioctl, unsigned long arg)
94{
95 if (ioctl == KVM_S390_ENABLE_SIE)
96 return s390_enable_sie();
97 return -EINVAL;
98}
99
100int kvm_dev_ioctl_check_extension(long ext)
101{
102 return 0;
103}
104
105/* Section: vm related */
106/*
107 * Get (and clear) the dirty memory log for a memory slot.
108 */
109int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
110 struct kvm_dirty_log *log)
111{
112 return 0;
113}
114
115long kvm_arch_vm_ioctl(struct file *filp,
116 unsigned int ioctl, unsigned long arg)
117{
118 struct kvm *kvm = filp->private_data;
119 void __user *argp = (void __user *)arg;
120 int r;
121
122 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100123 case KVM_S390_INTERRUPT: {
124 struct kvm_s390_interrupt s390int;
125
126 r = -EFAULT;
127 if (copy_from_user(&s390int, argp, sizeof(s390int)))
128 break;
129 r = kvm_s390_inject_vm(kvm, &s390int);
130 break;
131 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100132 default:
133 r = -EINVAL;
134 }
135
136 return r;
137}
138
139struct kvm *kvm_arch_create_vm(void)
140{
141 struct kvm *kvm;
142 int rc;
143 char debug_name[16];
144
145 rc = s390_enable_sie();
146 if (rc)
147 goto out_nokvm;
148
149 rc = -ENOMEM;
150 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
151 if (!kvm)
152 goto out_nokvm;
153
154 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
155 if (!kvm->arch.sca)
156 goto out_nosca;
157
158 sprintf(debug_name, "kvm-%u", current->pid);
159
160 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
161 if (!kvm->arch.dbf)
162 goto out_nodbf;
163
Carsten Otteba5c1e92008-03-25 18:47:26 +0100164 spin_lock_init(&kvm->arch.float_int.lock);
165 INIT_LIST_HEAD(&kvm->arch.float_int.list);
166
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100167 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
168 VM_EVENT(kvm, 3, "%s", "vm created");
169
170 try_module_get(THIS_MODULE);
171
172 return kvm;
173out_nodbf:
174 free_page((unsigned long)(kvm->arch.sca));
175out_nosca:
176 kfree(kvm);
177out_nokvm:
178 return ERR_PTR(rc);
179}
180
181void kvm_arch_destroy_vm(struct kvm *kvm)
182{
183 debug_unregister(kvm->arch.dbf);
184 free_page((unsigned long)(kvm->arch.sca));
185 kfree(kvm);
186 module_put(THIS_MODULE);
187}
188
189/* Section: vcpu related */
190int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
191{
192 return 0;
193}
194
195void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
196{
197 /* kvm common code refers to this, but does'nt call it */
198 BUG();
199}
200
201void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
202{
203 save_fp_regs(&vcpu->arch.host_fpregs);
204 save_access_regs(vcpu->arch.host_acrs);
205 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
206 restore_fp_regs(&vcpu->arch.guest_fpregs);
207 restore_access_regs(vcpu->arch.guest_acrs);
208
209 if (signal_pending(current))
210 atomic_set_mask(CPUSTAT_STOP_INT,
211 &vcpu->arch.sie_block->cpuflags);
212}
213
214void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
215{
216 save_fp_regs(&vcpu->arch.guest_fpregs);
217 save_access_regs(vcpu->arch.guest_acrs);
218 restore_fp_regs(&vcpu->arch.host_fpregs);
219 restore_access_regs(vcpu->arch.host_acrs);
220}
221
222static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
223{
224 /* this equals initial cpu reset in pop, but we don't switch to ESA */
225 vcpu->arch.sie_block->gpsw.mask = 0UL;
226 vcpu->arch.sie_block->gpsw.addr = 0UL;
227 vcpu->arch.sie_block->prefix = 0UL;
228 vcpu->arch.sie_block->ihcpu = 0xffff;
229 vcpu->arch.sie_block->cputm = 0UL;
230 vcpu->arch.sie_block->ckc = 0UL;
231 vcpu->arch.sie_block->todpr = 0;
232 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
233 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
234 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
235 vcpu->arch.guest_fpregs.fpc = 0;
236 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
237 vcpu->arch.sie_block->gbea = 1;
238}
239
240int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
241{
242 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
243 vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
244 vcpu->arch.sie_block->gmsor = 0x000000000000;
245 vcpu->arch.sie_block->ecb = 2;
246 vcpu->arch.sie_block->eca = 0xC1002001U;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100247 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
248 (unsigned long) vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100249 return 0;
250}
251
252struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
253 unsigned int id)
254{
255 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
256 int rc = -ENOMEM;
257
258 if (!vcpu)
259 goto out_nomem;
260
261 vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
262
263 if (!vcpu->arch.sie_block)
264 goto out_free_cpu;
265
266 vcpu->arch.sie_block->icpua = id;
267 BUG_ON(!kvm->arch.sca);
268 BUG_ON(kvm->arch.sca->cpu[id].sda);
269 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
270 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
271 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
272
Carsten Otteba5c1e92008-03-25 18:47:26 +0100273 spin_lock_init(&vcpu->arch.local_int.lock);
274 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
275 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
276 spin_lock_bh(&kvm->arch.float_int.lock);
277 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
278 init_waitqueue_head(&vcpu->arch.local_int.wq);
279 spin_unlock_bh(&kvm->arch.float_int.lock);
280
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100281 rc = kvm_vcpu_init(vcpu, kvm, id);
282 if (rc)
283 goto out_free_cpu;
284 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
285 vcpu->arch.sie_block);
286
287 try_module_get(THIS_MODULE);
288
289 return vcpu;
290out_free_cpu:
291 kfree(vcpu);
292out_nomem:
293 return ERR_PTR(rc);
294}
295
296void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
297{
298 VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
299 free_page((unsigned long)(vcpu->arch.sie_block));
300 kfree(vcpu);
301 module_put(THIS_MODULE);
302}
303
304int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
305{
306 /* kvm common code refers to this, but never calls it */
307 BUG();
308 return 0;
309}
310
311static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
312{
313 vcpu_load(vcpu);
314 kvm_s390_vcpu_initial_reset(vcpu);
315 vcpu_put(vcpu);
316 return 0;
317}
318
319int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
320{
321 vcpu_load(vcpu);
322 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
323 vcpu_put(vcpu);
324 return 0;
325}
326
327int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
328{
329 vcpu_load(vcpu);
330 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
331 vcpu_put(vcpu);
332 return 0;
333}
334
335int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
336 struct kvm_sregs *sregs)
337{
338 vcpu_load(vcpu);
339 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
340 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
341 vcpu_put(vcpu);
342 return 0;
343}
344
345int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
346 struct kvm_sregs *sregs)
347{
348 vcpu_load(vcpu);
349 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
350 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
351 vcpu_put(vcpu);
352 return 0;
353}
354
355int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
356{
357 vcpu_load(vcpu);
358 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
359 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
360 vcpu_put(vcpu);
361 return 0;
362}
363
364int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
365{
366 vcpu_load(vcpu);
367 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
368 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
369 vcpu_put(vcpu);
370 return 0;
371}
372
373static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
374{
375 int rc = 0;
376
377 vcpu_load(vcpu);
378 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
379 rc = -EBUSY;
380 else
381 vcpu->arch.sie_block->gpsw = psw;
382 vcpu_put(vcpu);
383 return rc;
384}
385
386int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
387 struct kvm_translation *tr)
388{
389 return -EINVAL; /* not implemented yet */
390}
391
392int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
393 struct kvm_debug_guest *dbg)
394{
395 return -EINVAL; /* not implemented yet */
396}
397
398static void __vcpu_run(struct kvm_vcpu *vcpu)
399{
400 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
401
402 if (need_resched())
403 schedule();
404
405 vcpu->arch.sie_block->icptcode = 0;
406 local_irq_disable();
407 kvm_guest_enter();
408 local_irq_enable();
409 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
410 atomic_read(&vcpu->arch.sie_block->cpuflags));
411 sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
412 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
413 vcpu->arch.sie_block->icptcode);
414 local_irq_disable();
415 kvm_guest_exit();
416 local_irq_enable();
417
418 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
419}
420
421int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
422{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100423 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100424 sigset_t sigsaved;
425
426 vcpu_load(vcpu);
427
428 if (vcpu->sigset_active)
429 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
430
431 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
432
Carsten Otteba5c1e92008-03-25 18:47:26 +0100433 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
434
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100435 switch (kvm_run->exit_reason) {
436 case KVM_EXIT_S390_SIEIC:
437 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
438 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
439 break;
440 case KVM_EXIT_UNKNOWN:
441 case KVM_EXIT_S390_RESET:
442 break;
443 default:
444 BUG();
445 }
446
447 might_sleep();
448
449 do {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100450 kvm_s390_deliver_pending_interrupts(vcpu);
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100451 __vcpu_run(vcpu);
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100452 rc = kvm_handle_sie_intercept(vcpu);
453 } while (!signal_pending(current) && !rc);
454
455 if (signal_pending(current) && !rc)
456 rc = -EINTR;
457
458 if (rc == -ENOTSUPP) {
459 /* intercept cannot be handled in-kernel, prepare kvm-run */
460 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
461 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
462 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
463 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
464 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
465 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
466 rc = 0;
467 }
468
469 if (rc == -EREMOTE) {
470 /* intercept was handled, but userspace support is needed
471 * kvm_run has been prepared by the handler */
472 rc = 0;
473 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100474
475 if (vcpu->sigset_active)
476 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
477
478 vcpu_put(vcpu);
479
480 vcpu->stat.exit_userspace++;
481 return 0;
482}
483
484static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
485 unsigned long n, int prefix)
486{
487 if (prefix)
488 return copy_to_guest(vcpu, guestdest, from, n);
489 else
490 return copy_to_guest_absolute(vcpu, guestdest, from, n);
491}
492
493/*
494 * store status at address
495 * we use have two special cases:
496 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
497 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
498 */
499int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
500{
501 const unsigned char archmode = 1;
502 int prefix;
503
504 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
505 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
506 return -EFAULT;
507 addr = SAVE_AREA_BASE;
508 prefix = 0;
509 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
510 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
511 return -EFAULT;
512 addr = SAVE_AREA_BASE;
513 prefix = 1;
514 } else
515 prefix = 0;
516
517 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
518 vcpu->arch.guest_fpregs.fprs, 128, prefix))
519 return -EFAULT;
520
521 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
522 vcpu->arch.guest_gprs, 128, prefix))
523 return -EFAULT;
524
525 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
526 &vcpu->arch.sie_block->gpsw, 16, prefix))
527 return -EFAULT;
528
529 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
530 &vcpu->arch.sie_block->prefix, 4, prefix))
531 return -EFAULT;
532
533 if (__guestcopy(vcpu,
534 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
535 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
536 return -EFAULT;
537
538 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
539 &vcpu->arch.sie_block->todpr, 4, prefix))
540 return -EFAULT;
541
542 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
543 &vcpu->arch.sie_block->cputm, 8, prefix))
544 return -EFAULT;
545
546 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
547 &vcpu->arch.sie_block->ckc, 8, prefix))
548 return -EFAULT;
549
550 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
551 &vcpu->arch.guest_acrs, 64, prefix))
552 return -EFAULT;
553
554 if (__guestcopy(vcpu,
555 addr + offsetof(struct save_area_s390x, ctrl_regs),
556 &vcpu->arch.sie_block->gcr, 128, prefix))
557 return -EFAULT;
558 return 0;
559}
560
561static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
562{
563 int rc;
564
565 vcpu_load(vcpu);
566 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
567 vcpu_put(vcpu);
568 return rc;
569}
570
571long kvm_arch_vcpu_ioctl(struct file *filp,
572 unsigned int ioctl, unsigned long arg)
573{
574 struct kvm_vcpu *vcpu = filp->private_data;
575 void __user *argp = (void __user *)arg;
576
577 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100578 case KVM_S390_INTERRUPT: {
579 struct kvm_s390_interrupt s390int;
580
581 if (copy_from_user(&s390int, argp, sizeof(s390int)))
582 return -EFAULT;
583 return kvm_s390_inject_vcpu(vcpu, &s390int);
584 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100585 case KVM_S390_STORE_STATUS:
586 return kvm_s390_vcpu_store_status(vcpu, arg);
587 case KVM_S390_SET_INITIAL_PSW: {
588 psw_t psw;
589
590 if (copy_from_user(&psw, argp, sizeof(psw)))
591 return -EFAULT;
592 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
593 }
594 case KVM_S390_INITIAL_RESET:
595 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
596 default:
597 ;
598 }
599 return -EINVAL;
600}
601
602/* Section: memory related */
603int kvm_arch_set_memory_region(struct kvm *kvm,
604 struct kvm_userspace_memory_region *mem,
605 struct kvm_memory_slot old,
606 int user_alloc)
607{
608 /* A few sanity checks. We can have exactly one memory slot which has
609 to start at guest virtual zero and which has to be located at a
610 page boundary in userland and which has to end at a page boundary.
611 The memory in userland is ok to be fragmented into various different
612 vmas. It is okay to mmap() and munmap() stuff in this slot after
613 doing this call at any time */
614
615 if (mem->slot)
616 return -EINVAL;
617
618 if (mem->guest_phys_addr)
619 return -EINVAL;
620
621 if (mem->userspace_addr & (PAGE_SIZE - 1))
622 return -EINVAL;
623
624 if (mem->memory_size & (PAGE_SIZE - 1))
625 return -EINVAL;
626
627 kvm->arch.guest_origin = mem->userspace_addr;
628 kvm->arch.guest_memsize = mem->memory_size;
629
630 /* FIXME: we do want to interrupt running CPUs and update their memory
631 configuration now to avoid race conditions. But hey, changing the
632 memory layout while virtual CPUs are running is usually bad
633 programming practice. */
634
635 return 0;
636}
637
638gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
639{
640 return gfn;
641}
642
643static int __init kvm_s390_init(void)
644{
645 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
646}
647
648static void __exit kvm_s390_exit(void)
649{
650 kvm_exit();
651}
652
653module_init(kvm_s390_init);
654module_exit(kvm_s390_exit);