blob: caa4d28770168a906e096749937a07d1f7f26cbc [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15#include <linux/compiler.h>
16#include <linux/err.h>
17#include <linux/fs.h>
18#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/module.h>
22#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010023#include <linux/timer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010024#include <asm/lowcore.h>
25#include <asm/pgtable.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010026#include <asm/nmi.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010027#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include "gaccess.h"
29
30#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32struct kvm_stats_debugfs_item debugfs_entries[] = {
33 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020034 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010035 { "exit_validity", VCPU_STAT(exit_validity) },
36 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
37 { "exit_external_request", VCPU_STAT(exit_external_request) },
38 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010039 { "exit_instruction", VCPU_STAT(exit_instruction) },
40 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020042 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010043 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
44 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
46 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
47 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
48 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
49 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
50 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
51 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010052 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
53 { "instruction_spx", VCPU_STAT(instruction_spx) },
54 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
55 { "instruction_stap", VCPU_STAT(instruction_stap) },
56 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
57 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
58 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
59 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
60 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +010061 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
62 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
63 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
64 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
65 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
66 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +010067 { "diagnose_44", VCPU_STAT(diagnose_44) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +010068 { NULL }
69};
70
71
72/* Section: not file related */
73void kvm_arch_hardware_enable(void *garbage)
74{
75 /* every s390 is virtualization enabled ;-) */
76}
77
78void kvm_arch_hardware_disable(void *garbage)
79{
80}
81
Heiko Carstensb0c632d2008-03-25 18:47:20 +010082int kvm_arch_hardware_setup(void)
83{
84 return 0;
85}
86
87void kvm_arch_hardware_unsetup(void)
88{
89}
90
91void kvm_arch_check_processor_compat(void *rtn)
92{
93}
94
95int kvm_arch_init(void *opaque)
96{
97 return 0;
98}
99
100void kvm_arch_exit(void)
101{
102}
103
104/* Section: device related */
105long kvm_arch_dev_ioctl(struct file *filp,
106 unsigned int ioctl, unsigned long arg)
107{
108 if (ioctl == KVM_S390_ENABLE_SIE)
109 return s390_enable_sie();
110 return -EINVAL;
111}
112
113int kvm_dev_ioctl_check_extension(long ext)
114{
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200115 switch (ext) {
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200116 default:
117 return 0;
118 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100119}
120
121/* Section: vm related */
122/*
123 * Get (and clear) the dirty memory log for a memory slot.
124 */
125int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
126 struct kvm_dirty_log *log)
127{
128 return 0;
129}
130
131long kvm_arch_vm_ioctl(struct file *filp,
132 unsigned int ioctl, unsigned long arg)
133{
134 struct kvm *kvm = filp->private_data;
135 void __user *argp = (void __user *)arg;
136 int r;
137
138 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100139 case KVM_S390_INTERRUPT: {
140 struct kvm_s390_interrupt s390int;
141
142 r = -EFAULT;
143 if (copy_from_user(&s390int, argp, sizeof(s390int)))
144 break;
145 r = kvm_s390_inject_vm(kvm, &s390int);
146 break;
147 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100148 default:
149 r = -EINVAL;
150 }
151
152 return r;
153}
154
155struct kvm *kvm_arch_create_vm(void)
156{
157 struct kvm *kvm;
158 int rc;
159 char debug_name[16];
160
161 rc = s390_enable_sie();
162 if (rc)
163 goto out_nokvm;
164
165 rc = -ENOMEM;
166 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
167 if (!kvm)
168 goto out_nokvm;
169
170 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
171 if (!kvm->arch.sca)
172 goto out_nosca;
173
174 sprintf(debug_name, "kvm-%u", current->pid);
175
176 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
177 if (!kvm->arch.dbf)
178 goto out_nodbf;
179
Carsten Otteba5c1e92008-03-25 18:47:26 +0100180 spin_lock_init(&kvm->arch.float_int.lock);
181 INIT_LIST_HEAD(&kvm->arch.float_int.list);
182
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100183 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
184 VM_EVENT(kvm, 3, "%s", "vm created");
185
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100186 return kvm;
187out_nodbf:
188 free_page((unsigned long)(kvm->arch.sca));
189out_nosca:
190 kfree(kvm);
191out_nokvm:
192 return ERR_PTR(rc);
193}
194
Christian Borntraegerd329c032008-11-26 14:50:27 +0100195void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
196{
197 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
198 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100199 kvm_vcpu_uninit(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +0100200 kfree(vcpu);
201}
202
203static void kvm_free_vcpus(struct kvm *kvm)
204{
205 unsigned int i;
206
207 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
208 if (kvm->vcpus[i]) {
209 kvm_arch_vcpu_destroy(kvm->vcpus[i]);
210 kvm->vcpus[i] = NULL;
211 }
212 }
213}
214
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800215void kvm_arch_sync_events(struct kvm *kvm)
216{
217}
218
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100219void kvm_arch_destroy_vm(struct kvm *kvm)
220{
Christian Borntraegerd329c032008-11-26 14:50:27 +0100221 kvm_free_vcpus(kvm);
Carsten Ottedfdded72008-06-27 15:05:34 +0200222 kvm_free_physmem(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100223 free_page((unsigned long)(kvm->arch.sca));
Christian Borntraegerd329c032008-11-26 14:50:27 +0100224 debug_unregister(kvm->arch.dbf);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100225 kfree(kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100226}
227
228/* Section: vcpu related */
229int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
230{
231 return 0;
232}
233
234void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
235{
Christian Borntraeger6692cef2008-11-26 14:51:08 +0100236 /* Nothing todo */
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100237}
238
239void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
240{
241 save_fp_regs(&vcpu->arch.host_fpregs);
242 save_access_regs(vcpu->arch.host_acrs);
243 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
244 restore_fp_regs(&vcpu->arch.guest_fpregs);
245 restore_access_regs(vcpu->arch.guest_acrs);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100246}
247
248void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
249{
250 save_fp_regs(&vcpu->arch.guest_fpregs);
251 save_access_regs(vcpu->arch.guest_acrs);
252 restore_fp_regs(&vcpu->arch.host_fpregs);
253 restore_access_regs(vcpu->arch.host_acrs);
254}
255
256static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
257{
258 /* this equals initial cpu reset in pop, but we don't switch to ESA */
259 vcpu->arch.sie_block->gpsw.mask = 0UL;
260 vcpu->arch.sie_block->gpsw.addr = 0UL;
261 vcpu->arch.sie_block->prefix = 0UL;
262 vcpu->arch.sie_block->ihcpu = 0xffff;
263 vcpu->arch.sie_block->cputm = 0UL;
264 vcpu->arch.sie_block->ckc = 0UL;
265 vcpu->arch.sie_block->todpr = 0;
266 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
267 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
268 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
269 vcpu->arch.guest_fpregs.fpc = 0;
270 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
271 vcpu->arch.sie_block->gbea = 1;
272}
273
Christian Borntraeger4da29e92008-06-27 15:05:38 +0200274/* The current code can have up to 256 pages for virtio */
275#define VIRTIODESCSPACE (256ul * 4096ul)
276
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100277int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
278{
279 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
Christian Borntraeger4da29e92008-06-27 15:05:38 +0200280 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
281 vcpu->kvm->arch.guest_origin +
282 VIRTIODESCSPACE - 1ul;
283 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100284 vcpu->arch.sie_block->ecb = 2;
285 vcpu->arch.sie_block->eca = 0xC1002001U;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100286 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
287 (unsigned long) vcpu);
Christian Borntraeger453423d2008-03-25 18:47:29 +0100288 get_cpu_id(&vcpu->arch.cpu_id);
Christian Borntraeger92e6ecf2009-03-26 15:23:58 +0100289 vcpu->arch.cpu_id.version = 0xff;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100290 return 0;
291}
292
293struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
294 unsigned int id)
295{
296 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
297 int rc = -ENOMEM;
298
299 if (!vcpu)
300 goto out_nomem;
301
Christian Borntraeger180c12f2008-06-27 15:05:40 +0200302 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
303 get_zeroed_page(GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100304
305 if (!vcpu->arch.sie_block)
306 goto out_free_cpu;
307
308 vcpu->arch.sie_block->icpua = id;
309 BUG_ON(!kvm->arch.sca);
310 BUG_ON(kvm->arch.sca->cpu[id].sda);
311 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
312 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
313 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
314
Carsten Otteba5c1e92008-03-25 18:47:26 +0100315 spin_lock_init(&vcpu->arch.local_int.lock);
316 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
317 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
318 spin_lock_bh(&kvm->arch.float_int.lock);
319 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
320 init_waitqueue_head(&vcpu->arch.local_int.wq);
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100321 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
Carsten Otteba5c1e92008-03-25 18:47:26 +0100322 spin_unlock_bh(&kvm->arch.float_int.lock);
323
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100324 rc = kvm_vcpu_init(vcpu, kvm, id);
325 if (rc)
326 goto out_free_cpu;
327 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
328 vcpu->arch.sie_block);
329
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330 return vcpu;
331out_free_cpu:
332 kfree(vcpu);
333out_nomem:
334 return ERR_PTR(rc);
335}
336
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100337int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
338{
339 /* kvm common code refers to this, but never calls it */
340 BUG();
341 return 0;
342}
343
344static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
345{
346 vcpu_load(vcpu);
347 kvm_s390_vcpu_initial_reset(vcpu);
348 vcpu_put(vcpu);
349 return 0;
350}
351
352int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
353{
354 vcpu_load(vcpu);
355 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
356 vcpu_put(vcpu);
357 return 0;
358}
359
360int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
361{
362 vcpu_load(vcpu);
363 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
364 vcpu_put(vcpu);
365 return 0;
366}
367
368int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
369 struct kvm_sregs *sregs)
370{
371 vcpu_load(vcpu);
372 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
373 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
374 vcpu_put(vcpu);
375 return 0;
376}
377
378int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
379 struct kvm_sregs *sregs)
380{
381 vcpu_load(vcpu);
382 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
383 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
384 vcpu_put(vcpu);
385 return 0;
386}
387
388int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
389{
390 vcpu_load(vcpu);
391 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
392 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
393 vcpu_put(vcpu);
394 return 0;
395}
396
397int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
398{
399 vcpu_load(vcpu);
400 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
401 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
402 vcpu_put(vcpu);
403 return 0;
404}
405
406static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
407{
408 int rc = 0;
409
410 vcpu_load(vcpu);
411 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
412 rc = -EBUSY;
413 else
414 vcpu->arch.sie_block->gpsw = psw;
415 vcpu_put(vcpu);
416 return rc;
417}
418
419int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
420 struct kvm_translation *tr)
421{
422 return -EINVAL; /* not implemented yet */
423}
424
425int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
426 struct kvm_debug_guest *dbg)
427{
428 return -EINVAL; /* not implemented yet */
429}
430
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -0300431int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
432 struct kvm_mp_state *mp_state)
433{
434 return -EINVAL; /* not implemented yet */
435}
436
437int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
438 struct kvm_mp_state *mp_state)
439{
440 return -EINVAL; /* not implemented yet */
441}
442
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100443static void __vcpu_run(struct kvm_vcpu *vcpu)
444{
445 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
446
447 if (need_resched())
448 schedule();
449
Christian Borntraeger71cde582008-05-21 13:37:34 +0200450 if (test_thread_flag(TIF_MCCK_PENDING))
451 s390_handle_mcck();
452
Carsten Otte0ff31862008-05-21 13:37:37 +0200453 kvm_s390_deliver_pending_interrupts(vcpu);
454
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100455 vcpu->arch.sie_block->icptcode = 0;
456 local_irq_disable();
457 kvm_guest_enter();
458 local_irq_enable();
459 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
460 atomic_read(&vcpu->arch.sie_block->cpuflags));
Carsten Otte1f0d0f02008-05-21 13:37:40 +0200461 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
462 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
463 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
464 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100465 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
466 vcpu->arch.sie_block->icptcode);
467 local_irq_disable();
468 kvm_guest_exit();
469 local_irq_enable();
470
471 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
472}
473
474int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
475{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100476 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100477 sigset_t sigsaved;
478
479 vcpu_load(vcpu);
480
481 if (vcpu->sigset_active)
482 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
483
484 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
485
Carsten Otteba5c1e92008-03-25 18:47:26 +0100486 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
487
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100488 switch (kvm_run->exit_reason) {
489 case KVM_EXIT_S390_SIEIC:
490 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
491 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
492 break;
493 case KVM_EXIT_UNKNOWN:
494 case KVM_EXIT_S390_RESET:
495 break;
496 default:
497 BUG();
498 }
499
500 might_sleep();
501
502 do {
503 __vcpu_run(vcpu);
Christian Borntraeger8f2abe62008-03-25 18:47:23 +0100504 rc = kvm_handle_sie_intercept(vcpu);
505 } while (!signal_pending(current) && !rc);
506
507 if (signal_pending(current) && !rc)
508 rc = -EINTR;
509
510 if (rc == -ENOTSUPP) {
511 /* intercept cannot be handled in-kernel, prepare kvm-run */
512 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
513 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
514 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
515 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
516 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
517 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
518 rc = 0;
519 }
520
521 if (rc == -EREMOTE) {
522 /* intercept was handled, but userspace support is needed
523 * kvm_run has been prepared by the handler */
524 rc = 0;
525 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100526
527 if (vcpu->sigset_active)
528 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
529
530 vcpu_put(vcpu);
531
532 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +0200533 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100534}
535
536static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
537 unsigned long n, int prefix)
538{
539 if (prefix)
540 return copy_to_guest(vcpu, guestdest, from, n);
541 else
542 return copy_to_guest_absolute(vcpu, guestdest, from, n);
543}
544
545/*
546 * store status at address
547 * we use have two special cases:
548 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
549 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
550 */
551int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
552{
553 const unsigned char archmode = 1;
554 int prefix;
555
556 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
557 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
558 return -EFAULT;
559 addr = SAVE_AREA_BASE;
560 prefix = 0;
561 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
562 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
563 return -EFAULT;
564 addr = SAVE_AREA_BASE;
565 prefix = 1;
566 } else
567 prefix = 0;
568
569 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
570 vcpu->arch.guest_fpregs.fprs, 128, prefix))
571 return -EFAULT;
572
573 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
574 vcpu->arch.guest_gprs, 128, prefix))
575 return -EFAULT;
576
577 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
578 &vcpu->arch.sie_block->gpsw, 16, prefix))
579 return -EFAULT;
580
581 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
582 &vcpu->arch.sie_block->prefix, 4, prefix))
583 return -EFAULT;
584
585 if (__guestcopy(vcpu,
586 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
587 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
588 return -EFAULT;
589
590 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
591 &vcpu->arch.sie_block->todpr, 4, prefix))
592 return -EFAULT;
593
594 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
595 &vcpu->arch.sie_block->cputm, 8, prefix))
596 return -EFAULT;
597
598 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
599 &vcpu->arch.sie_block->ckc, 8, prefix))
600 return -EFAULT;
601
602 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
603 &vcpu->arch.guest_acrs, 64, prefix))
604 return -EFAULT;
605
606 if (__guestcopy(vcpu,
607 addr + offsetof(struct save_area_s390x, ctrl_regs),
608 &vcpu->arch.sie_block->gcr, 128, prefix))
609 return -EFAULT;
610 return 0;
611}
612
613static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
614{
615 int rc;
616
617 vcpu_load(vcpu);
618 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
619 vcpu_put(vcpu);
620 return rc;
621}
622
623long kvm_arch_vcpu_ioctl(struct file *filp,
624 unsigned int ioctl, unsigned long arg)
625{
626 struct kvm_vcpu *vcpu = filp->private_data;
627 void __user *argp = (void __user *)arg;
628
629 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +0100630 case KVM_S390_INTERRUPT: {
631 struct kvm_s390_interrupt s390int;
632
633 if (copy_from_user(&s390int, argp, sizeof(s390int)))
634 return -EFAULT;
635 return kvm_s390_inject_vcpu(vcpu, &s390int);
636 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100637 case KVM_S390_STORE_STATUS:
638 return kvm_s390_vcpu_store_status(vcpu, arg);
639 case KVM_S390_SET_INITIAL_PSW: {
640 psw_t psw;
641
642 if (copy_from_user(&psw, argp, sizeof(psw)))
643 return -EFAULT;
644 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
645 }
646 case KVM_S390_INITIAL_RESET:
647 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
648 default:
649 ;
650 }
651 return -EINVAL;
652}
653
654/* Section: memory related */
655int kvm_arch_set_memory_region(struct kvm *kvm,
656 struct kvm_userspace_memory_region *mem,
657 struct kvm_memory_slot old,
658 int user_alloc)
659{
660 /* A few sanity checks. We can have exactly one memory slot which has
661 to start at guest virtual zero and which has to be located at a
662 page boundary in userland and which has to end at a page boundary.
663 The memory in userland is ok to be fragmented into various different
664 vmas. It is okay to mmap() and munmap() stuff in this slot after
665 doing this call at any time */
666
667 if (mem->slot)
668 return -EINVAL;
669
670 if (mem->guest_phys_addr)
671 return -EINVAL;
672
673 if (mem->userspace_addr & (PAGE_SIZE - 1))
674 return -EINVAL;
675
676 if (mem->memory_size & (PAGE_SIZE - 1))
677 return -EINVAL;
678
679 kvm->arch.guest_origin = mem->userspace_addr;
680 kvm->arch.guest_memsize = mem->memory_size;
681
682 /* FIXME: we do want to interrupt running CPUs and update their memory
683 configuration now to avoid race conditions. But hey, changing the
684 memory layout while virtual CPUs are running is usually bad
685 programming practice. */
686
687 return 0;
688}
689
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -0300690void kvm_arch_flush_shadow(struct kvm *kvm)
691{
692}
693
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100694gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
695{
696 return gfn;
697}
698
699static int __init kvm_s390_init(void)
700{
701 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
702}
703
704static void __exit kvm_s390_exit(void)
705{
706 kvm_exit();
707}
708
709module_init(kvm_s390_init);
710module_exit(kvm_s390_exit);