blob: c632180739ee569c9c894360c6977ddeedf7de81 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15#include <linux/compiler.h>
16#include <linux/err.h>
17#include <linux/fs.h>
18#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/module.h>
22#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010023#include <linux/timer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010024#include <asm/lowcore.h>
25#include <asm/pgtable.h>
26
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010027#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include "gaccess.h"
29
30#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32struct kvm_stats_debugfs_item debugfs_entries[] = {
33 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010034 { "exit_validity", VCPU_STAT(exit_validity) },
35 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
36 { "exit_external_request", VCPU_STAT(exit_external_request) },
37 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010038 { "exit_instruction", VCPU_STAT(exit_instruction) },
39 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
40 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
41 { "instruction_lctg", VCPU_STAT(instruction_lctg) },
42 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
43 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
44 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
45 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
46 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
47 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
48 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
49 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
50 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010051 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
52 { "instruction_spx", VCPU_STAT(instruction_spx) },
53 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
54 { "instruction_stap", VCPU_STAT(instruction_stap) },
55 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
56 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
57 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
58 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
59 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010060 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
61 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
62 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
63 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
64 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
65 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010066 { NULL }
67};
68
69
70/* Section: not file related */
71void kvm_arch_hardware_enable(void *garbage)
72{
73 /* every s390 is virtualization enabled ;-) */
74}
75
76void kvm_arch_hardware_disable(void *garbage)
77{
78}
79
80void decache_vcpus_on_cpu(int cpu)
81{
82}
83
84int kvm_arch_hardware_setup(void)
85{
86 return 0;
87}
88
89void kvm_arch_hardware_unsetup(void)
90{
91}
92
93void kvm_arch_check_processor_compat(void *rtn)
94{
95}
96
97int kvm_arch_init(void *opaque)
98{
99 return 0;
100}
101
102void kvm_arch_exit(void)
103{
104}
105
106/* Section: device related */
107long kvm_arch_dev_ioctl(struct file *filp,
108 unsigned int ioctl, unsigned long arg)
109{
110 if (ioctl == KVM_S390_ENABLE_SIE)
111 return s390_enable_sie();
112 return -EINVAL;
113}
114
115int kvm_dev_ioctl_check_extension(long ext)
116{
117 return 0;
118}
119
120/* Section: vm related */
121/*
122 * Get (and clear) the dirty memory log for a memory slot.
123 */
124int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
125 struct kvm_dirty_log *log)
126{
127 return 0;
128}
129
130long kvm_arch_vm_ioctl(struct file *filp,
131 unsigned int ioctl, unsigned long arg)
132{
133 struct kvm *kvm = filp->private_data;
134 void __user *argp = (void __user *)arg;
135 int r;
136
137 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100138 case KVM_S390_INTERRUPT: {
139 struct kvm_s390_interrupt s390int;
140
141 r = -EFAULT;
142 if (copy_from_user(&s390int, argp, sizeof(s390int)))
143 break;
144 r = kvm_s390_inject_vm(kvm, &s390int);
145 break;
146 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100147 default:
148 r = -EINVAL;
149 }
150
151 return r;
152}
153
154struct kvm *kvm_arch_create_vm(void)
155{
156 struct kvm *kvm;
157 int rc;
158 char debug_name[16];
159
160 rc = s390_enable_sie();
161 if (rc)
162 goto out_nokvm;
163
164 rc = -ENOMEM;
165 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
166 if (!kvm)
167 goto out_nokvm;
168
169 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
170 if (!kvm->arch.sca)
171 goto out_nosca;
172
173 sprintf(debug_name, "kvm-%u", current->pid);
174
175 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
176 if (!kvm->arch.dbf)
177 goto out_nodbf;
178
Carsten Otteba5c1e92008-03-25 18:47:26 +0100179 spin_lock_init(&kvm->arch.float_int.lock);
180 INIT_LIST_HEAD(&kvm->arch.float_int.list);
181
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100182 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
183 VM_EVENT(kvm, 3, "%s", "vm created");
184
185 try_module_get(THIS_MODULE);
186
187 return kvm;
188out_nodbf:
189 free_page((unsigned long)(kvm->arch.sca));
190out_nosca:
191 kfree(kvm);
192out_nokvm:
193 return ERR_PTR(rc);
194}
195
196void kvm_arch_destroy_vm(struct kvm *kvm)
197{
198 debug_unregister(kvm->arch.dbf);
199 free_page((unsigned long)(kvm->arch.sca));
200 kfree(kvm);
201 module_put(THIS_MODULE);
202}
203
204/* Section: vcpu related */
205int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
206{
207 return 0;
208}
209
210void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
211{
212 /* kvm common code refers to this, but does'nt call it */
213 BUG();
214}
215
216void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
217{
218 save_fp_regs(&vcpu->arch.host_fpregs);
219 save_access_regs(vcpu->arch.host_acrs);
220 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
221 restore_fp_regs(&vcpu->arch.guest_fpregs);
222 restore_access_regs(vcpu->arch.guest_acrs);
223
224 if (signal_pending(current))
225 atomic_set_mask(CPUSTAT_STOP_INT,
226 &vcpu->arch.sie_block->cpuflags);
227}
228
229void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
230{
231 save_fp_regs(&vcpu->arch.guest_fpregs);
232 save_access_regs(vcpu->arch.guest_acrs);
233 restore_fp_regs(&vcpu->arch.host_fpregs);
234 restore_access_regs(vcpu->arch.host_acrs);
235}
236
237static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
238{
239 /* this equals initial cpu reset in pop, but we don't switch to ESA */
240 vcpu->arch.sie_block->gpsw.mask = 0UL;
241 vcpu->arch.sie_block->gpsw.addr = 0UL;
242 vcpu->arch.sie_block->prefix = 0UL;
243 vcpu->arch.sie_block->ihcpu = 0xffff;
244 vcpu->arch.sie_block->cputm = 0UL;
245 vcpu->arch.sie_block->ckc = 0UL;
246 vcpu->arch.sie_block->todpr = 0;
247 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
248 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
249 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
250 vcpu->arch.guest_fpregs.fpc = 0;
251 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
252 vcpu->arch.sie_block->gbea = 1;
253}
254
255int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
256{
257 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
258 vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
259 vcpu->arch.sie_block->gmsor = 0x000000000000;
260 vcpu->arch.sie_block->ecb = 2;
261 vcpu->arch.sie_block->eca = 0xC1002001U;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100262 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
263 (unsigned long) vcpu);
Christian Borntraeger453423d2008-03-25 18:47:29 +0100264 get_cpu_id(&vcpu->arch.cpu_id);
265 vcpu->arch.cpu_id.version = 0xfe;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100266 return 0;
267}
268
269struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
270 unsigned int id)
271{
272 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
273 int rc = -ENOMEM;
274
275 if (!vcpu)
276 goto out_nomem;
277
278 vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
279
280 if (!vcpu->arch.sie_block)
281 goto out_free_cpu;
282
283 vcpu->arch.sie_block->icpua = id;
284 BUG_ON(!kvm->arch.sca);
285 BUG_ON(kvm->arch.sca->cpu[id].sda);
286 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
287 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
288 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
289
Carsten Otteba5c1e92008-03-25 18:47:26 +0100290 spin_lock_init(&vcpu->arch.local_int.lock);
291 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
292 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
293 spin_lock_bh(&kvm->arch.float_int.lock);
294 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
295 init_waitqueue_head(&vcpu->arch.local_int.wq);
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100296 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100297 spin_unlock_bh(&kvm->arch.float_int.lock);
298
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100299 rc = kvm_vcpu_init(vcpu, kvm, id);
300 if (rc)
301 goto out_free_cpu;
302 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
303 vcpu->arch.sie_block);
304
305 try_module_get(THIS_MODULE);
306
307 return vcpu;
308out_free_cpu:
309 kfree(vcpu);
310out_nomem:
311 return ERR_PTR(rc);
312}
313
314void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
315{
316 VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
317 free_page((unsigned long)(vcpu->arch.sie_block));
318 kfree(vcpu);
319 module_put(THIS_MODULE);
320}
321
322int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
323{
324 /* kvm common code refers to this, but never calls it */
325 BUG();
326 return 0;
327}
328
329static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
330{
331 vcpu_load(vcpu);
332 kvm_s390_vcpu_initial_reset(vcpu);
333 vcpu_put(vcpu);
334 return 0;
335}
336
337int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
338{
339 vcpu_load(vcpu);
340 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
341 vcpu_put(vcpu);
342 return 0;
343}
344
345int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
346{
347 vcpu_load(vcpu);
348 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
349 vcpu_put(vcpu);
350 return 0;
351}
352
353int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
354 struct kvm_sregs *sregs)
355{
356 vcpu_load(vcpu);
357 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
358 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
359 vcpu_put(vcpu);
360 return 0;
361}
362
363int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
364 struct kvm_sregs *sregs)
365{
366 vcpu_load(vcpu);
367 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
368 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
369 vcpu_put(vcpu);
370 return 0;
371}
372
373int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
374{
375 vcpu_load(vcpu);
376 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
377 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
378 vcpu_put(vcpu);
379 return 0;
380}
381
382int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
383{
384 vcpu_load(vcpu);
385 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
386 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
387 vcpu_put(vcpu);
388 return 0;
389}
390
391static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
392{
393 int rc = 0;
394
395 vcpu_load(vcpu);
396 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
397 rc = -EBUSY;
398 else
399 vcpu->arch.sie_block->gpsw = psw;
400 vcpu_put(vcpu);
401 return rc;
402}
403
404int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
405 struct kvm_translation *tr)
406{
407 return -EINVAL; /* not implemented yet */
408}
409
410int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
411 struct kvm_debug_guest *dbg)
412{
413 return -EINVAL; /* not implemented yet */
414}
415
416static void __vcpu_run(struct kvm_vcpu *vcpu)
417{
418 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
419
420 if (need_resched())
421 schedule();
422
423 vcpu->arch.sie_block->icptcode = 0;
424 local_irq_disable();
425 kvm_guest_enter();
426 local_irq_enable();
427 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
428 atomic_read(&vcpu->arch.sie_block->cpuflags));
429 sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
430 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
431 vcpu->arch.sie_block->icptcode);
432 local_irq_disable();
433 kvm_guest_exit();
434 local_irq_enable();
435
436 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
437}
438
439int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
440{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100441 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100442 sigset_t sigsaved;
443
444 vcpu_load(vcpu);
445
446 if (vcpu->sigset_active)
447 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
448
449 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
450
Carsten Otteba5c1e92008-03-25 18:47:26 +0100451 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
452
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100453 switch (kvm_run->exit_reason) {
454 case KVM_EXIT_S390_SIEIC:
455 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
456 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
457 break;
458 case KVM_EXIT_UNKNOWN:
459 case KVM_EXIT_S390_RESET:
460 break;
461 default:
462 BUG();
463 }
464
465 might_sleep();
466
467 do {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100468 kvm_s390_deliver_pending_interrupts(vcpu);
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100469 __vcpu_run(vcpu);
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100470 rc = kvm_handle_sie_intercept(vcpu);
471 } while (!signal_pending(current) && !rc);
472
473 if (signal_pending(current) && !rc)
474 rc = -EINTR;
475
476 if (rc == -ENOTSUPP) {
477 /* intercept cannot be handled in-kernel, prepare kvm-run */
478 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
479 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
480 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
481 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
482 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
483 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
484 rc = 0;
485 }
486
487 if (rc == -EREMOTE) {
488 /* intercept was handled, but userspace support is needed
489 * kvm_run has been prepared by the handler */
490 rc = 0;
491 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100492
493 if (vcpu->sigset_active)
494 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
495
496 vcpu_put(vcpu);
497
498 vcpu->stat.exit_userspace++;
499 return 0;
500}
501
502static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
503 unsigned long n, int prefix)
504{
505 if (prefix)
506 return copy_to_guest(vcpu, guestdest, from, n);
507 else
508 return copy_to_guest_absolute(vcpu, guestdest, from, n);
509}
510
511/*
512 * store status at address
513 * we use have two special cases:
514 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
515 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
516 */
517int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
518{
519 const unsigned char archmode = 1;
520 int prefix;
521
522 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
523 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
524 return -EFAULT;
525 addr = SAVE_AREA_BASE;
526 prefix = 0;
527 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
528 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
529 return -EFAULT;
530 addr = SAVE_AREA_BASE;
531 prefix = 1;
532 } else
533 prefix = 0;
534
535 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
536 vcpu->arch.guest_fpregs.fprs, 128, prefix))
537 return -EFAULT;
538
539 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
540 vcpu->arch.guest_gprs, 128, prefix))
541 return -EFAULT;
542
543 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
544 &vcpu->arch.sie_block->gpsw, 16, prefix))
545 return -EFAULT;
546
547 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
548 &vcpu->arch.sie_block->prefix, 4, prefix))
549 return -EFAULT;
550
551 if (__guestcopy(vcpu,
552 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
553 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
554 return -EFAULT;
555
556 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
557 &vcpu->arch.sie_block->todpr, 4, prefix))
558 return -EFAULT;
559
560 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
561 &vcpu->arch.sie_block->cputm, 8, prefix))
562 return -EFAULT;
563
564 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
565 &vcpu->arch.sie_block->ckc, 8, prefix))
566 return -EFAULT;
567
568 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
569 &vcpu->arch.guest_acrs, 64, prefix))
570 return -EFAULT;
571
572 if (__guestcopy(vcpu,
573 addr + offsetof(struct save_area_s390x, ctrl_regs),
574 &vcpu->arch.sie_block->gcr, 128, prefix))
575 return -EFAULT;
576 return 0;
577}
578
579static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
580{
581 int rc;
582
583 vcpu_load(vcpu);
584 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
585 vcpu_put(vcpu);
586 return rc;
587}
588
589long kvm_arch_vcpu_ioctl(struct file *filp,
590 unsigned int ioctl, unsigned long arg)
591{
592 struct kvm_vcpu *vcpu = filp->private_data;
593 void __user *argp = (void __user *)arg;
594
595 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100596 case KVM_S390_INTERRUPT: {
597 struct kvm_s390_interrupt s390int;
598
599 if (copy_from_user(&s390int, argp, sizeof(s390int)))
600 return -EFAULT;
601 return kvm_s390_inject_vcpu(vcpu, &s390int);
602 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100603 case KVM_S390_STORE_STATUS:
604 return kvm_s390_vcpu_store_status(vcpu, arg);
605 case KVM_S390_SET_INITIAL_PSW: {
606 psw_t psw;
607
608 if (copy_from_user(&psw, argp, sizeof(psw)))
609 return -EFAULT;
610 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
611 }
612 case KVM_S390_INITIAL_RESET:
613 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
614 default:
615 ;
616 }
617 return -EINVAL;
618}
619
620/* Section: memory related */
621int kvm_arch_set_memory_region(struct kvm *kvm,
622 struct kvm_userspace_memory_region *mem,
623 struct kvm_memory_slot old,
624 int user_alloc)
625{
626 /* A few sanity checks. We can have exactly one memory slot which has
627 to start at guest virtual zero and which has to be located at a
628 page boundary in userland and which has to end at a page boundary.
629 The memory in userland is ok to be fragmented into various different
630 vmas. It is okay to mmap() and munmap() stuff in this slot after
631 doing this call at any time */
632
633 if (mem->slot)
634 return -EINVAL;
635
636 if (mem->guest_phys_addr)
637 return -EINVAL;
638
639 if (mem->userspace_addr & (PAGE_SIZE - 1))
640 return -EINVAL;
641
642 if (mem->memory_size & (PAGE_SIZE - 1))
643 return -EINVAL;
644
645 kvm->arch.guest_origin = mem->userspace_addr;
646 kvm->arch.guest_memsize = mem->memory_size;
647
648 /* FIXME: we do want to interrupt running CPUs and update their memory
649 configuration now to avoid race conditions. But hey, changing the
650 memory layout while virtual CPUs are running is usually bad
651 programming practice. */
652
653 return 0;
654}
655
656gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
657{
658 return gfn;
659}
660
661static int __init kvm_s390_init(void)
662{
663 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
664}
665
666static void __exit kvm_s390_exit(void)
667{
668 kvm_exit();
669}
670
671module_init(kvm_s390_init);
672module_exit(kvm_s390_exit);