blob: b4d24e2cce407ad43caca7296d6ccca1e485c77e [file] [log] [blame]
Xiantao Zhangb024b792008-04-01 15:29:29 +08001/*
2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
3 *
4 *
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/percpu.h>
26#include <linux/gfp.h>
27#include <linux/fs.h>
28#include <linux/smp.h>
29#include <linux/kvm_host.h>
30#include <linux/kvm.h>
31#include <linux/bitops.h>
32#include <linux/hrtimer.h>
33#include <linux/uaccess.h>
Xiantao Zhang2381ad22008-10-08 08:29:33 +080034#include <linux/intel-iommu.h>
Xiantao Zhangb024b792008-04-01 15:29:29 +080035
36#include <asm/pgtable.h>
37#include <asm/gcc_intrin.h>
38#include <asm/pal.h>
39#include <asm/cacheflush.h>
40#include <asm/div64.h>
41#include <asm/tlb.h>
Jes Sorensen9f726322008-09-12 14:12:08 +020042#include <asm/elf.h>
Xiantao Zhangb024b792008-04-01 15:29:29 +080043
44#include "misc.h"
45#include "vti.h"
46#include "iodev.h"
47#include "ioapic.h"
48#include "lapic.h"
Xiantao Zhang2f749772008-09-27 11:46:36 +080049#include "irq.h"
Xiantao Zhangb024b792008-04-01 15:29:29 +080050
51static unsigned long kvm_vmm_base;
52static unsigned long kvm_vsa_base;
53static unsigned long kvm_vm_buffer;
54static unsigned long kvm_vm_buffer_size;
55unsigned long kvm_vmm_gp;
56
57static long vp_env_info;
58
59static struct kvm_vmm_info *kvm_vmm_info;
60
61static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
62
63struct kvm_stats_debugfs_item debugfs_entries[] = {
64 { NULL }
65};
66
Xiantao Zhangb024b792008-04-01 15:29:29 +080067static void kvm_flush_icache(unsigned long start, unsigned long len)
68{
69 int l;
70
71 for (l = 0; l < (len + 32); l += 32)
72 ia64_fc(start + l);
73
74 ia64_sync_i();
75 ia64_srlz_i();
76}
77
78static void kvm_flush_tlb_all(void)
79{
80 unsigned long i, j, count0, count1, stride0, stride1, addr;
81 long flags;
82
83 addr = local_cpu_data->ptce_base;
84 count0 = local_cpu_data->ptce_count[0];
85 count1 = local_cpu_data->ptce_count[1];
86 stride0 = local_cpu_data->ptce_stride[0];
87 stride1 = local_cpu_data->ptce_stride[1];
88
89 local_irq_save(flags);
90 for (i = 0; i < count0; ++i) {
91 for (j = 0; j < count1; ++j) {
92 ia64_ptce(addr);
93 addr += stride1;
94 }
95 addr += stride0;
96 }
97 local_irq_restore(flags);
98 ia64_srlz_i(); /* srlz.i implies srlz.d */
99}
100
101long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
102{
103 struct ia64_pal_retval iprv;
104
105 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
106 (u64)opt_handler);
107
108 return iprv.status;
109}
110
111static DEFINE_SPINLOCK(vp_lock);
112
113void kvm_arch_hardware_enable(void *garbage)
114{
115 long status;
116 long tmp_base;
117 unsigned long pte;
118 unsigned long saved_psr;
119 int slot;
120
121 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
122 PAGE_KERNEL));
123 local_irq_save(saved_psr);
124 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
Julia Lawallcab7a1e2008-07-22 21:38:18 +0200125 local_irq_restore(saved_psr);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800126 if (slot < 0)
127 return;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800128
129 spin_lock(&vp_lock);
130 status = ia64_pal_vp_init_env(kvm_vsa_base ?
131 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
132 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
133 if (status != 0) {
134 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
135 return ;
136 }
137
138 if (!kvm_vsa_base) {
139 kvm_vsa_base = tmp_base;
140 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
141 }
142 spin_unlock(&vp_lock);
143 ia64_ptr_entry(0x3, slot);
144}
145
146void kvm_arch_hardware_disable(void *garbage)
147{
148
149 long status;
150 int slot;
151 unsigned long pte;
152 unsigned long saved_psr;
153 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
154
155 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
156 PAGE_KERNEL));
157
158 local_irq_save(saved_psr);
159 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
Julia Lawallcab7a1e2008-07-22 21:38:18 +0200160 local_irq_restore(saved_psr);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800161 if (slot < 0)
162 return;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800163
164 status = ia64_pal_vp_exit_env(host_iva);
165 if (status)
166 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
167 status);
168 ia64_ptr_entry(0x3, slot);
169}
170
171void kvm_arch_check_processor_compat(void *rtn)
172{
173 *(int *)rtn = 0;
174}
175
176int kvm_dev_ioctl_check_extension(long ext)
177{
178
179 int r;
180
181 switch (ext) {
182 case KVM_CAP_IRQCHIP:
183 case KVM_CAP_USER_MEMORY:
Xiantao Zhang8c4b5372008-08-28 09:34:08 +0800184 case KVM_CAP_MP_STATE:
Xiantao Zhangb024b792008-04-01 15:29:29 +0800185
186 r = 1;
187 break;
Laurent Vivier7f39f8a2008-05-30 16:05:57 +0200188 case KVM_CAP_COALESCED_MMIO:
189 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
190 break;
Xiantao Zhang2381ad22008-10-08 08:29:33 +0800191 case KVM_CAP_IOMMU:
192 r = intel_iommu_found();
193 break;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800194 default:
195 r = 0;
196 }
197 return r;
198
199}
200
201static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
Laurent Vivier92760492008-05-30 16:05:53 +0200202 gpa_t addr, int len, int is_write)
Xiantao Zhangb024b792008-04-01 15:29:29 +0800203{
204 struct kvm_io_device *dev;
205
Laurent Vivier92760492008-05-30 16:05:53 +0200206 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800207
208 return dev;
209}
210
211static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
212{
213 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
214 kvm_run->hw.hardware_exit_reason = 1;
215 return 0;
216}
217
218static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
219{
220 struct kvm_mmio_req *p;
221 struct kvm_io_device *mmio_dev;
222
223 p = kvm_get_vcpu_ioreq(vcpu);
224
225 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
226 goto mmio;
227 vcpu->mmio_needed = 1;
228 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
229 vcpu->mmio_size = kvm_run->mmio.len = p->size;
230 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
231
232 if (vcpu->mmio_is_write)
233 memcpy(vcpu->mmio_data, &p->data, p->size);
234 memcpy(kvm_run->mmio.data, &p->data, p->size);
235 kvm_run->exit_reason = KVM_EXIT_MMIO;
236 return 0;
237mmio:
Laurent Vivier92760492008-05-30 16:05:53 +0200238 mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800239 if (mmio_dev) {
240 if (!p->dir)
241 kvm_iodevice_write(mmio_dev, p->addr, p->size,
242 &p->data);
243 else
244 kvm_iodevice_read(mmio_dev, p->addr, p->size,
245 &p->data);
246
247 } else
248 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
249 p->state = STATE_IORESP_READY;
250
251 return 1;
252}
253
254static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
255{
256 struct exit_ctl_data *p;
257
258 p = kvm_get_exit_data(vcpu);
259
260 if (p->exit_reason == EXIT_REASON_PAL_CALL)
261 return kvm_pal_emul(vcpu, kvm_run);
262 else {
263 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
264 kvm_run->hw.hardware_exit_reason = 2;
265 return 0;
266 }
267}
268
269static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
270{
271 struct exit_ctl_data *p;
272
273 p = kvm_get_exit_data(vcpu);
274
275 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
276 kvm_sal_emul(vcpu);
277 return 1;
278 } else {
279 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
280 kvm_run->hw.hardware_exit_reason = 3;
281 return 0;
282 }
283
284}
285
286/*
287 * offset: address offset to IPI space.
288 * value: deliver value.
289 */
290static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
291 uint64_t vector)
292{
293 switch (dm) {
294 case SAPIC_FIXED:
295 kvm_apic_set_irq(vcpu, vector, 0);
296 break;
297 case SAPIC_NMI:
298 kvm_apic_set_irq(vcpu, 2, 0);
299 break;
300 case SAPIC_EXTINT:
301 kvm_apic_set_irq(vcpu, 0, 0);
302 break;
303 case SAPIC_INIT:
304 case SAPIC_PMI:
305 default:
306 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
307 break;
308 }
309}
310
311static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
312 unsigned long eid)
313{
314 union ia64_lid lid;
315 int i;
316
317 for (i = 0; i < KVM_MAX_VCPUS; i++) {
318 if (kvm->vcpus[i]) {
319 lid.val = VCPU_LID(kvm->vcpus[i]);
320 if (lid.id == id && lid.eid == eid)
321 return kvm->vcpus[i];
322 }
323 }
324
325 return NULL;
326}
327
328static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
329{
330 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
331 struct kvm_vcpu *target_vcpu;
332 struct kvm_pt_regs *regs;
333 union ia64_ipi_a addr = p->u.ipi_data.addr;
334 union ia64_ipi_d data = p->u.ipi_data.data;
335
336 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
337 if (!target_vcpu)
338 return handle_vm_error(vcpu, kvm_run);
339
340 if (!target_vcpu->arch.launched) {
341 regs = vcpu_regs(target_vcpu);
342
343 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
344 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
345
Avi Kivitya4535292008-04-13 17:54:35 +0300346 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800347 if (waitqueue_active(&target_vcpu->wq))
348 wake_up_interruptible(&target_vcpu->wq);
349 } else {
350 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
351 if (target_vcpu != vcpu)
352 kvm_vcpu_kick(target_vcpu);
353 }
354
355 return 1;
356}
357
358struct call_data {
359 struct kvm_ptc_g ptc_g_data;
360 struct kvm_vcpu *vcpu;
361};
362
363static void vcpu_global_purge(void *info)
364{
365 struct call_data *p = (struct call_data *)info;
366 struct kvm_vcpu *vcpu = p->vcpu;
367
368 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
369 return;
370
371 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
372 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
373 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
374 p->ptc_g_data;
375 } else {
376 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
377 vcpu->arch.ptc_g_count = 0;
378 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
379 }
380}
381
382static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
383{
384 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
385 struct kvm *kvm = vcpu->kvm;
386 struct call_data call_data;
387 int i;
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800388
Xiantao Zhangb024b792008-04-01 15:29:29 +0800389 call_data.ptc_g_data = p->u.ptc_g_data;
390
391 for (i = 0; i < KVM_MAX_VCPUS; i++) {
392 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
Avi Kivitya4535292008-04-13 17:54:35 +0300393 KVM_MP_STATE_UNINITIALIZED ||
Xiantao Zhangb024b792008-04-01 15:29:29 +0800394 vcpu == kvm->vcpus[i])
395 continue;
396
397 if (waitqueue_active(&kvm->vcpus[i]->wq))
398 wake_up_interruptible(&kvm->vcpus[i]->wq);
399
400 if (kvm->vcpus[i]->cpu != -1) {
401 call_data.vcpu = kvm->vcpus[i];
402 smp_call_function_single(kvm->vcpus[i]->cpu,
Takashi Iwai2f73cca2008-07-17 18:09:12 +0200403 vcpu_global_purge, &call_data, 1);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800404 } else
405 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
406
407 }
408 return 1;
409}
410
411static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
412{
413 return 1;
414}
415
416int kvm_emulate_halt(struct kvm_vcpu *vcpu)
417{
418
419 ktime_t kt;
420 long itc_diff;
421 unsigned long vcpu_now_itc;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800422 unsigned long expires;
423 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
424 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
425 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
426
Xiantao Zhangb024b792008-04-01 15:29:29 +0800427 if (irqchip_in_kernel(vcpu->kvm)) {
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800428
429 vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
430
431 if (time_after(vcpu_now_itc, vpd->itm)) {
432 vcpu->arch.timer_check = 1;
433 return 1;
434 }
435 itc_diff = vpd->itm - vcpu_now_itc;
436 if (itc_diff < 0)
437 itc_diff = -itc_diff;
438
439 expires = div64_u64(itc_diff, cyc_per_usec);
440 kt = ktime_set(0, 1000 * expires);
441
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800442 vcpu->arch.ht_active = 1;
443 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
444
Avi Kivitya4535292008-04-13 17:54:35 +0300445 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800446 kvm_vcpu_block(vcpu);
447 hrtimer_cancel(p_ht);
448 vcpu->arch.ht_active = 0;
449
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800450 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
451 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
452 vcpu->arch.mp_state =
453 KVM_MP_STATE_RUNNABLE;
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800454
Avi Kivitya4535292008-04-13 17:54:35 +0300455 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
Xiantao Zhangb024b792008-04-01 15:29:29 +0800456 return -EINTR;
457 return 1;
458 } else {
459 printk(KERN_ERR"kvm: Unsupported userspace halt!");
460 return 0;
461 }
462}
463
464static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
465 struct kvm_run *kvm_run)
466{
467 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
468 return 0;
469}
470
471static int handle_external_interrupt(struct kvm_vcpu *vcpu,
472 struct kvm_run *kvm_run)
473{
474 return 1;
475}
476
Xiantao Zhang7d637972008-11-21 20:58:11 +0800477static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
478 struct kvm_run *kvm_run)
479{
480 printk("VMM: %s", vcpu->arch.log_buf);
481 return 1;
482}
483
Xiantao Zhangb024b792008-04-01 15:29:29 +0800484static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
485 struct kvm_run *kvm_run) = {
486 [EXIT_REASON_VM_PANIC] = handle_vm_error,
487 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
488 [EXIT_REASON_PAL_CALL] = handle_pal_call,
489 [EXIT_REASON_SAL_CALL] = handle_sal_call,
490 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
491 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
492 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
493 [EXIT_REASON_IPI] = handle_ipi,
494 [EXIT_REASON_PTC_G] = handle_global_purge,
Xiantao Zhang7d637972008-11-21 20:58:11 +0800495 [EXIT_REASON_DEBUG] = handle_vcpu_debug,
Xiantao Zhangb024b792008-04-01 15:29:29 +0800496
497};
498
499static const int kvm_vti_max_exit_handlers =
500 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
501
Xiantao Zhangb024b792008-04-01 15:29:29 +0800502static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
503{
504 struct exit_ctl_data *p_exit_data;
505
506 p_exit_data = kvm_get_exit_data(vcpu);
507 return p_exit_data->exit_reason;
508}
509
510/*
511 * The guest has exited. See if we can fix it or if we need userspace
512 * assistance.
513 */
514static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
515{
516 u32 exit_reason = kvm_get_exit_reason(vcpu);
517 vcpu->arch.last_exit = exit_reason;
518
519 if (exit_reason < kvm_vti_max_exit_handlers
520 && kvm_vti_exit_handlers[exit_reason])
521 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
522 else {
523 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
524 kvm_run->hw.hardware_exit_reason = exit_reason;
525 }
526 return 0;
527}
528
529static inline void vti_set_rr6(unsigned long rr6)
530{
531 ia64_set_rr(RR6, rr6);
532 ia64_srlz_i();
533}
534
535static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
536{
537 unsigned long pte;
538 struct kvm *kvm = vcpu->kvm;
539 int r;
540
541 /*Insert a pair of tr to map vmm*/
542 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
543 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
544 if (r < 0)
545 goto out;
546 vcpu->arch.vmm_tr_slot = r;
547 /*Insert a pairt of tr to map data of vm*/
548 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
549 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
550 pte, KVM_VM_DATA_SHIFT);
551 if (r < 0)
552 goto out;
553 vcpu->arch.vm_tr_slot = r;
554 r = 0;
555out:
556 return r;
557
558}
559
560static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
561{
562
563 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
564 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
565
566}
567
568static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
569{
570 int cpu = smp_processor_id();
571
572 if (vcpu->arch.last_run_cpu != cpu ||
573 per_cpu(last_vcpu, cpu) != vcpu) {
574 per_cpu(last_vcpu, cpu) = vcpu;
575 vcpu->arch.last_run_cpu = cpu;
576 kvm_flush_tlb_all();
577 }
578
579 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
580 vti_set_rr6(vcpu->arch.vmm_rr);
581 return kvm_insert_vmm_mapping(vcpu);
582}
583static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
584{
585 kvm_purge_vmm_mapping(vcpu);
586 vti_set_rr6(vcpu->arch.host_rr6);
587}
588
589static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
590{
591 union context *host_ctx, *guest_ctx;
592 int r;
593
594 /*Get host and guest context with guest address space.*/
595 host_ctx = kvm_get_host_context(vcpu);
596 guest_ctx = kvm_get_guest_context(vcpu);
597
598 r = kvm_vcpu_pre_transition(vcpu);
599 if (r < 0)
600 goto out;
601 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
602 kvm_vcpu_post_transition(vcpu);
603 r = 0;
604out:
605 return r;
606}
607
608static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
609{
610 int r;
611
612again:
613 preempt_disable();
Xiantao Zhangb024b792008-04-01 15:29:29 +0800614 local_irq_disable();
615
616 if (signal_pending(current)) {
617 local_irq_enable();
618 preempt_enable();
619 r = -EINTR;
620 kvm_run->exit_reason = KVM_EXIT_INTR;
621 goto out;
622 }
623
624 vcpu->guest_mode = 1;
625 kvm_guest_enter();
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800626 down_read(&vcpu->kvm->slots_lock);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800627 r = vti_vcpu_run(vcpu, kvm_run);
628 if (r < 0) {
629 local_irq_enable();
630 preempt_enable();
631 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
632 goto out;
633 }
634
635 vcpu->arch.launched = 1;
636 vcpu->guest_mode = 0;
637 local_irq_enable();
638
639 /*
640 * We must have an instruction between local_irq_enable() and
641 * kvm_guest_exit(), so the timer interrupt isn't delayed by
642 * the interrupt shadow. The stat.exits increment will do nicely.
643 * But we need to prevent reordering, hence this barrier():
644 */
645 barrier();
Xiantao Zhangb024b792008-04-01 15:29:29 +0800646 kvm_guest_exit();
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800647 up_read(&vcpu->kvm->slots_lock);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800648 preempt_enable();
649
650 r = kvm_handle_exit(kvm_run, vcpu);
651
652 if (r > 0) {
653 if (!need_resched())
654 goto again;
655 }
656
657out:
658 if (r > 0) {
659 kvm_resched(vcpu);
660 goto again;
661 }
662
663 return r;
664}
665
666static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
667{
668 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
669
670 if (!vcpu->mmio_is_write)
671 memcpy(&p->data, vcpu->mmio_data, 8);
672 p->state = STATE_IORESP_READY;
673}
674
675int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
676{
677 int r;
678 sigset_t sigsaved;
679
680 vcpu_load(vcpu);
681
Xiantao Zhanga2e4e282008-10-23 15:02:52 +0800682 if (vcpu->sigset_active)
683 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
684
Avi Kivitya4535292008-04-13 17:54:35 +0300685 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
Xiantao Zhangb024b792008-04-01 15:29:29 +0800686 kvm_vcpu_block(vcpu);
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800687 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Xiantao Zhanga2e4e282008-10-23 15:02:52 +0800688 r = -EAGAIN;
689 goto out;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800690 }
691
Xiantao Zhangb024b792008-04-01 15:29:29 +0800692 if (vcpu->mmio_needed) {
693 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
694 kvm_set_mmio_data(vcpu);
695 vcpu->mmio_read_completed = 1;
696 vcpu->mmio_needed = 0;
697 }
698 r = __vcpu_run(vcpu, kvm_run);
Xiantao Zhanga2e4e282008-10-23 15:02:52 +0800699out:
Xiantao Zhangb024b792008-04-01 15:29:29 +0800700 if (vcpu->sigset_active)
701 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
702
703 vcpu_put(vcpu);
704 return r;
705}
706
Xiantao Zhangb024b792008-04-01 15:29:29 +0800707static struct kvm *kvm_alloc_kvm(void)
708{
709
710 struct kvm *kvm;
711 uint64_t vm_base;
712
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800713 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
714
Xiantao Zhangb024b792008-04-01 15:29:29 +0800715 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
716
717 if (!vm_base)
718 return ERR_PTR(-ENOMEM);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800719
Xiantao Zhangb024b792008-04-01 15:29:29 +0800720 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800721 kvm = (struct kvm *)(vm_base +
722 offsetof(struct kvm_vm_data, kvm_vm_struct));
Xiantao Zhangb024b792008-04-01 15:29:29 +0800723 kvm->arch.vm_base = vm_base;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800724 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800725
726 return kvm;
727}
728
729struct kvm_io_range {
730 unsigned long start;
731 unsigned long size;
732 unsigned long type;
733};
734
735static const struct kvm_io_range io_ranges[] = {
736 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
737 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
738 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
739 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
740 {PIB_START, PIB_SIZE, GPFN_PIB},
741};
742
743static void kvm_build_io_pmt(struct kvm *kvm)
744{
745 unsigned long i, j;
746
747 /* Mark I/O ranges */
748 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
749 i++) {
750 for (j = io_ranges[i].start;
751 j < io_ranges[i].start + io_ranges[i].size;
752 j += PAGE_SIZE)
753 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
754 io_ranges[i].type, 0);
755 }
756
757}
758
759/*Use unused rids to virtualize guest rid.*/
760#define GUEST_PHYSICAL_RR0 0x1739
761#define GUEST_PHYSICAL_RR4 0x2739
762#define VMM_INIT_RR 0x1660
763
764static void kvm_init_vm(struct kvm *kvm)
765{
Xiantao Zhangb024b792008-04-01 15:29:29 +0800766 BUG_ON(!kvm);
767
768 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
769 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
770 kvm->arch.vmm_init_rr = VMM_INIT_RR;
771
Xiantao Zhangb024b792008-04-01 15:29:29 +0800772 /*
773 *Fill P2M entries for MMIO/IO ranges
774 */
775 kvm_build_io_pmt(kvm);
776
Xiantao Zhang2381ad22008-10-08 08:29:33 +0800777 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
Sheng Yang5550af42008-10-15 20:15:06 +0800778
779 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
780 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800781}
782
783struct kvm *kvm_arch_create_vm(void)
784{
785 struct kvm *kvm = kvm_alloc_kvm();
786
787 if (IS_ERR(kvm))
788 return ERR_PTR(-ENOMEM);
789 kvm_init_vm(kvm);
790
791 return kvm;
792
793}
794
795static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
796 struct kvm_irqchip *chip)
797{
798 int r;
799
800 r = 0;
801 switch (chip->chip_id) {
802 case KVM_IRQCHIP_IOAPIC:
803 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
804 sizeof(struct kvm_ioapic_state));
805 break;
806 default:
807 r = -EINVAL;
808 break;
809 }
810 return r;
811}
812
813static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
814{
815 int r;
816
817 r = 0;
818 switch (chip->chip_id) {
819 case KVM_IRQCHIP_IOAPIC:
820 memcpy(ioapic_irqchip(kvm),
821 &chip->chip.ioapic,
822 sizeof(struct kvm_ioapic_state));
823 break;
824 default:
825 r = -EINVAL;
826 break;
827 }
828 return r;
829}
830
831#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
832
833int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
834{
835 int i;
836 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
837 int r;
838
839 vcpu_load(vcpu);
840
841 for (i = 0; i < 16; i++) {
842 vpd->vgr[i] = regs->vpd.vgr[i];
843 vpd->vbgr[i] = regs->vpd.vbgr[i];
844 }
845 for (i = 0; i < 128; i++)
846 vpd->vcr[i] = regs->vpd.vcr[i];
847 vpd->vhpi = regs->vpd.vhpi;
848 vpd->vnat = regs->vpd.vnat;
849 vpd->vbnat = regs->vpd.vbnat;
850 vpd->vpsr = regs->vpd.vpsr;
851
852 vpd->vpr = regs->vpd.vpr;
853
854 r = -EFAULT;
855 r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
856 sizeof(union context));
857 if (r)
858 goto out;
859 r = copy_from_user(vcpu + 1, regs->saved_stack +
860 sizeof(struct kvm_vcpu),
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800861 KVM_STK_OFFSET - sizeof(struct kvm_vcpu));
Xiantao Zhangb024b792008-04-01 15:29:29 +0800862 if (r)
863 goto out;
864 vcpu->arch.exit_data =
865 ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
866
867 RESTORE_REGS(mp_state);
868 RESTORE_REGS(vmm_rr);
869 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
870 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
871 RESTORE_REGS(itr_regions);
872 RESTORE_REGS(dtr_regions);
873 RESTORE_REGS(tc_regions);
874 RESTORE_REGS(irq_check);
875 RESTORE_REGS(itc_check);
876 RESTORE_REGS(timer_check);
877 RESTORE_REGS(timer_pending);
878 RESTORE_REGS(last_itc);
879 for (i = 0; i < 8; i++) {
880 vcpu->arch.vrr[i] = regs->vrr[i];
881 vcpu->arch.ibr[i] = regs->ibr[i];
882 vcpu->arch.dbr[i] = regs->dbr[i];
883 }
884 for (i = 0; i < 4; i++)
885 vcpu->arch.insvc[i] = regs->insvc[i];
886 RESTORE_REGS(xtp);
887 RESTORE_REGS(metaphysical_rr0);
888 RESTORE_REGS(metaphysical_rr4);
889 RESTORE_REGS(metaphysical_saved_rr0);
890 RESTORE_REGS(metaphysical_saved_rr4);
891 RESTORE_REGS(fp_psr);
892 RESTORE_REGS(saved_gp);
893
894 vcpu->arch.irq_new_pending = 1;
895 vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
896 set_bit(KVM_REQ_RESUME, &vcpu->requests);
897
898 vcpu_put(vcpu);
899 r = 0;
900out:
901 return r;
902}
903
904long kvm_arch_vm_ioctl(struct file *filp,
905 unsigned int ioctl, unsigned long arg)
906{
907 struct kvm *kvm = filp->private_data;
908 void __user *argp = (void __user *)arg;
909 int r = -EINVAL;
910
911 switch (ioctl) {
912 case KVM_SET_MEMORY_REGION: {
913 struct kvm_memory_region kvm_mem;
914 struct kvm_userspace_memory_region kvm_userspace_mem;
915
916 r = -EFAULT;
917 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
918 goto out;
919 kvm_userspace_mem.slot = kvm_mem.slot;
920 kvm_userspace_mem.flags = kvm_mem.flags;
921 kvm_userspace_mem.guest_phys_addr =
922 kvm_mem.guest_phys_addr;
923 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
924 r = kvm_vm_ioctl_set_memory_region(kvm,
925 &kvm_userspace_mem, 0);
926 if (r)
927 goto out;
928 break;
929 }
930 case KVM_CREATE_IRQCHIP:
931 r = -EFAULT;
932 r = kvm_ioapic_init(kvm);
933 if (r)
934 goto out;
935 break;
936 case KVM_IRQ_LINE: {
937 struct kvm_irq_level irq_event;
938
939 r = -EFAULT;
940 if (copy_from_user(&irq_event, argp, sizeof irq_event))
941 goto out;
942 if (irqchip_in_kernel(kvm)) {
943 mutex_lock(&kvm->lock);
Sheng Yang5550af42008-10-15 20:15:06 +0800944 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
945 irq_event.irq, irq_event.level);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800946 mutex_unlock(&kvm->lock);
947 r = 0;
948 }
949 break;
950 }
951 case KVM_GET_IRQCHIP: {
952 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
953 struct kvm_irqchip chip;
954
955 r = -EFAULT;
956 if (copy_from_user(&chip, argp, sizeof chip))
957 goto out;
958 r = -ENXIO;
959 if (!irqchip_in_kernel(kvm))
960 goto out;
961 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
962 if (r)
963 goto out;
964 r = -EFAULT;
965 if (copy_to_user(argp, &chip, sizeof chip))
966 goto out;
967 r = 0;
968 break;
969 }
970 case KVM_SET_IRQCHIP: {
971 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
972 struct kvm_irqchip chip;
973
974 r = -EFAULT;
975 if (copy_from_user(&chip, argp, sizeof chip))
976 goto out;
977 r = -ENXIO;
978 if (!irqchip_in_kernel(kvm))
979 goto out;
980 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
981 if (r)
982 goto out;
983 r = 0;
984 break;
985 }
986 default:
987 ;
988 }
989out:
990 return r;
991}
992
993int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
994 struct kvm_sregs *sregs)
995{
996 return -EINVAL;
997}
998
999int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1000 struct kvm_sregs *sregs)
1001{
1002 return -EINVAL;
1003
1004}
1005int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1006 struct kvm_translation *tr)
1007{
1008
1009 return -EINVAL;
1010}
1011
1012static int kvm_alloc_vmm_area(void)
1013{
1014 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1015 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1016 get_order(KVM_VMM_SIZE));
1017 if (!kvm_vmm_base)
1018 return -ENOMEM;
1019
1020 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1021 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1022
1023 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1024 kvm_vmm_base, kvm_vm_buffer);
1025 }
1026
1027 return 0;
1028}
1029
1030static void kvm_free_vmm_area(void)
1031{
1032 if (kvm_vmm_base) {
1033 /*Zero this area before free to avoid bits leak!!*/
1034 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1035 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1036 kvm_vmm_base = 0;
1037 kvm_vm_buffer = 0;
1038 kvm_vsa_base = 0;
1039 }
1040}
1041
Xiantao Zhangb024b792008-04-01 15:29:29 +08001042static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1043{
1044}
1045
1046static int vti_init_vpd(struct kvm_vcpu *vcpu)
1047{
1048 int i;
1049 union cpuid3_t cpuid3;
1050 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1051
1052 if (IS_ERR(vpd))
1053 return PTR_ERR(vpd);
1054
1055 /* CPUID init */
1056 for (i = 0; i < 5; i++)
1057 vpd->vcpuid[i] = ia64_get_cpuid(i);
1058
1059 /* Limit the CPUID number to 5 */
1060 cpuid3.value = vpd->vcpuid[3];
1061 cpuid3.number = 4; /* 5 - 1 */
1062 vpd->vcpuid[3] = cpuid3.value;
1063
1064 /*Set vac and vdc fields*/
1065 vpd->vac.a_from_int_cr = 1;
1066 vpd->vac.a_to_int_cr = 1;
1067 vpd->vac.a_from_psr = 1;
1068 vpd->vac.a_from_cpuid = 1;
1069 vpd->vac.a_cover = 1;
1070 vpd->vac.a_bsw = 1;
1071 vpd->vac.a_int = 1;
1072 vpd->vdc.d_vmsw = 1;
1073
1074 /*Set virtual buffer*/
1075 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1076
1077 return 0;
1078}
1079
1080static int vti_create_vp(struct kvm_vcpu *vcpu)
1081{
1082 long ret;
1083 struct vpd *vpd = vcpu->arch.vpd;
1084 unsigned long vmm_ivt;
1085
1086 vmm_ivt = kvm_vmm_info->vmm_ivt;
1087
1088 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1089
1090 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1091
1092 if (ret) {
1093 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1094 return -EINVAL;
1095 }
1096 return 0;
1097}
1098
1099static void init_ptce_info(struct kvm_vcpu *vcpu)
1100{
1101 ia64_ptce_info_t ptce = {0};
1102
1103 ia64_get_ptce(&ptce);
1104 vcpu->arch.ptce_base = ptce.base;
1105 vcpu->arch.ptce_count[0] = ptce.count[0];
1106 vcpu->arch.ptce_count[1] = ptce.count[1];
1107 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1108 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1109}
1110
1111static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1112{
1113 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1114
1115 if (hrtimer_cancel(p_ht))
Arjan van de Ven18dd36a2008-09-01 15:19:11 -07001116 hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001117}
1118
1119static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1120{
1121 struct kvm_vcpu *vcpu;
1122 wait_queue_head_t *q;
1123
1124 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001125 q = &vcpu->wq;
1126
Avi Kivitya4535292008-04-13 17:54:35 +03001127 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
Xiantao Zhangb024b792008-04-01 15:29:29 +08001128 goto out;
1129
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001130 if (waitqueue_active(q))
Xiantao Zhangb024b792008-04-01 15:29:29 +08001131 wake_up_interruptible(q);
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001132
Xiantao Zhangb024b792008-04-01 15:29:29 +08001133out:
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001134 vcpu->arch.timer_fired = 1;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001135 vcpu->arch.timer_check = 1;
1136 return HRTIMER_NORESTART;
1137}
1138
1139#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1140
1141int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1142{
1143 struct kvm_vcpu *v;
1144 int r;
1145 int i;
1146 long itc_offset;
1147 struct kvm *kvm = vcpu->kvm;
1148 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1149
1150 union context *p_ctx = &vcpu->arch.guest;
1151 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1152
1153 /*Init vcpu context for first run.*/
1154 if (IS_ERR(vmm_vcpu))
1155 return PTR_ERR(vmm_vcpu);
1156
1157 if (vcpu->vcpu_id == 0) {
Avi Kivitya4535292008-04-13 17:54:35 +03001158 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001159
1160 /*Set entry address for first run.*/
1161 regs->cr_iip = PALE_RESET_ENTRY;
1162
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001163 /*Initialize itc offset for vcpus*/
Xiantao Zhangb024b792008-04-01 15:29:29 +08001164 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001165 for (i = 0; i < KVM_MAX_VCPUS; i++) {
1166 v = (struct kvm_vcpu *)((char *)vcpu +
1167 sizeof(struct kvm_vcpu_data) * i);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001168 v->arch.itc_offset = itc_offset;
1169 v->arch.last_itc = 0;
1170 }
1171 } else
Avi Kivitya4535292008-04-13 17:54:35 +03001172 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001173
1174 r = -ENOMEM;
1175 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1176 if (!vcpu->arch.apic)
1177 goto out;
1178 vcpu->arch.apic->vcpu = vcpu;
1179
1180 p_ctx->gr[1] = 0;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001181 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001182 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1183 p_ctx->psr = 0x1008522000UL;
1184 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1185 p_ctx->caller_unat = 0;
1186 p_ctx->pr = 0x0;
1187 p_ctx->ar[36] = 0x0; /*unat*/
1188 p_ctx->ar[19] = 0x0; /*rnat*/
1189 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1190 ((sizeof(struct kvm_vcpu)+15) & ~15);
1191 p_ctx->ar[64] = 0x0; /*pfs*/
1192 p_ctx->cr[0] = 0x7e04UL;
1193 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1194 p_ctx->cr[8] = 0x3c;
1195
1196 /*Initilize region register*/
1197 p_ctx->rr[0] = 0x30;
1198 p_ctx->rr[1] = 0x30;
1199 p_ctx->rr[2] = 0x30;
1200 p_ctx->rr[3] = 0x30;
1201 p_ctx->rr[4] = 0x30;
1202 p_ctx->rr[5] = 0x30;
1203 p_ctx->rr[7] = 0x30;
1204
1205 /*Initilize branch register 0*/
1206 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1207
1208 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1209 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1210 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1211
1212 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1213 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1214
1215 vcpu->arch.last_run_cpu = -1;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001216 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001217 vcpu->arch.vsa_base = kvm_vsa_base;
1218 vcpu->arch.__gp = kvm_vmm_gp;
1219 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001220 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1221 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001222 init_ptce_info(vcpu);
1223
1224 r = 0;
1225out:
1226 return r;
1227}
1228
1229static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1230{
1231 unsigned long psr;
1232 int r;
1233
1234 local_irq_save(psr);
1235 r = kvm_insert_vmm_mapping(vcpu);
1236 if (r)
1237 goto fail;
1238 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1239 if (r)
1240 goto fail;
1241
1242 r = vti_init_vpd(vcpu);
1243 if (r) {
1244 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1245 goto uninit;
1246 }
1247
1248 r = vti_create_vp(vcpu);
1249 if (r)
1250 goto uninit;
1251
1252 kvm_purge_vmm_mapping(vcpu);
1253 local_irq_restore(psr);
1254
1255 return 0;
1256uninit:
1257 kvm_vcpu_uninit(vcpu);
1258fail:
Julia Lawallcab7a1e2008-07-22 21:38:18 +02001259 local_irq_restore(psr);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001260 return r;
1261}
1262
1263struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1264 unsigned int id)
1265{
1266 struct kvm_vcpu *vcpu;
1267 unsigned long vm_base = kvm->arch.vm_base;
1268 int r;
1269 int cpu;
1270
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001271 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1272
1273 r = -EINVAL;
1274 if (id >= KVM_MAX_VCPUS) {
1275 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1276 KVM_MAX_VCPUS);
1277 goto fail;
1278 }
1279
Xiantao Zhangb024b792008-04-01 15:29:29 +08001280 r = -ENOMEM;
1281 if (!vm_base) {
1282 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1283 goto fail;
1284 }
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001285 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1286 vcpu_data[id].vcpu_struct));
Xiantao Zhangb024b792008-04-01 15:29:29 +08001287 vcpu->kvm = kvm;
1288
1289 cpu = get_cpu();
1290 vti_vcpu_load(vcpu, cpu);
1291 r = vti_vcpu_setup(vcpu, id);
1292 put_cpu();
1293
1294 if (r) {
1295 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1296 goto fail;
1297 }
1298
1299 return vcpu;
1300fail:
1301 return ERR_PTR(r);
1302}
1303
1304int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1305{
1306 return 0;
1307}
1308
1309int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1310{
1311 return -EINVAL;
1312}
1313
1314int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1315{
1316 return -EINVAL;
1317}
1318
1319int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
1320 struct kvm_debug_guest *dbg)
1321{
1322 return -EINVAL;
1323}
1324
1325static void free_kvm(struct kvm *kvm)
1326{
1327 unsigned long vm_base = kvm->arch.vm_base;
1328
1329 if (vm_base) {
1330 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1331 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1332 }
1333
1334}
1335
1336static void kvm_release_vm_pages(struct kvm *kvm)
1337{
1338 struct kvm_memory_slot *memslot;
1339 int i, j;
1340 unsigned long base_gfn;
1341
1342 for (i = 0; i < kvm->nmemslots; i++) {
1343 memslot = &kvm->memslots[i];
1344 base_gfn = memslot->base_gfn;
1345
1346 for (j = 0; j < memslot->npages; j++) {
1347 if (memslot->rmap[j])
1348 put_page((struct page *)memslot->rmap[j]);
1349 }
1350 }
1351}
1352
1353void kvm_arch_destroy_vm(struct kvm *kvm)
1354{
Xiantao Zhang2381ad22008-10-08 08:29:33 +08001355 kvm_iommu_unmap_guest(kvm);
1356#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1357 kvm_free_all_assigned_devices(kvm);
1358#endif
Xiantao Zhangb024b792008-04-01 15:29:29 +08001359 kfree(kvm->arch.vioapic);
1360 kvm_release_vm_pages(kvm);
1361 kvm_free_physmem(kvm);
1362 free_kvm(kvm);
1363}
1364
1365void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1366{
1367}
1368
1369void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1370{
1371 if (cpu != vcpu->cpu) {
1372 vcpu->cpu = cpu;
1373 if (vcpu->arch.ht_active)
1374 kvm_migrate_hlt_timer(vcpu);
1375 }
1376}
1377
1378#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1379
1380int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1381{
1382 int i;
1383 int r;
1384 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1385 vcpu_load(vcpu);
1386
1387 for (i = 0; i < 16; i++) {
1388 regs->vpd.vgr[i] = vpd->vgr[i];
1389 regs->vpd.vbgr[i] = vpd->vbgr[i];
1390 }
1391 for (i = 0; i < 128; i++)
1392 regs->vpd.vcr[i] = vpd->vcr[i];
1393 regs->vpd.vhpi = vpd->vhpi;
1394 regs->vpd.vnat = vpd->vnat;
1395 regs->vpd.vbnat = vpd->vbnat;
1396 regs->vpd.vpsr = vpd->vpsr;
1397 regs->vpd.vpr = vpd->vpr;
1398
1399 r = -EFAULT;
1400 r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
1401 sizeof(union context));
1402 if (r)
1403 goto out;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001404 r = copy_to_user(regs->saved_stack, (void *)vcpu, KVM_STK_OFFSET);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001405 if (r)
1406 goto out;
1407 SAVE_REGS(mp_state);
1408 SAVE_REGS(vmm_rr);
1409 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1410 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1411 SAVE_REGS(itr_regions);
1412 SAVE_REGS(dtr_regions);
1413 SAVE_REGS(tc_regions);
1414 SAVE_REGS(irq_check);
1415 SAVE_REGS(itc_check);
1416 SAVE_REGS(timer_check);
1417 SAVE_REGS(timer_pending);
1418 SAVE_REGS(last_itc);
1419 for (i = 0; i < 8; i++) {
1420 regs->vrr[i] = vcpu->arch.vrr[i];
1421 regs->ibr[i] = vcpu->arch.ibr[i];
1422 regs->dbr[i] = vcpu->arch.dbr[i];
1423 }
1424 for (i = 0; i < 4; i++)
1425 regs->insvc[i] = vcpu->arch.insvc[i];
1426 regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
1427 SAVE_REGS(xtp);
1428 SAVE_REGS(metaphysical_rr0);
1429 SAVE_REGS(metaphysical_rr4);
1430 SAVE_REGS(metaphysical_saved_rr0);
1431 SAVE_REGS(metaphysical_saved_rr4);
1432 SAVE_REGS(fp_psr);
1433 SAVE_REGS(saved_gp);
1434 vcpu_put(vcpu);
1435 r = 0;
1436out:
1437 return r;
1438}
1439
1440void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1441{
1442
1443 hrtimer_cancel(&vcpu->arch.hlt_timer);
1444 kfree(vcpu->arch.apic);
1445}
1446
1447
1448long kvm_arch_vcpu_ioctl(struct file *filp,
1449 unsigned int ioctl, unsigned long arg)
1450{
1451 return -EINVAL;
1452}
1453
1454int kvm_arch_set_memory_region(struct kvm *kvm,
1455 struct kvm_userspace_memory_region *mem,
1456 struct kvm_memory_slot old,
1457 int user_alloc)
1458{
1459 unsigned long i;
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001460 unsigned long pfn;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001461 int npages = mem->memory_size >> PAGE_SHIFT;
1462 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1463 unsigned long base_gfn = memslot->base_gfn;
1464
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001465 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1466 return -ENOMEM;
1467
Xiantao Zhangb024b792008-04-01 15:29:29 +08001468 for (i = 0; i < npages; i++) {
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001469 pfn = gfn_to_pfn(kvm, base_gfn + i);
1470 if (!kvm_is_mmio_pfn(pfn)) {
1471 kvm_set_pmt_entry(kvm, base_gfn + i,
1472 pfn << PAGE_SHIFT,
Xiantao Zhangb010eb52008-09-28 01:39:46 -07001473 _PAGE_AR_RWX | _PAGE_MA_WB);
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001474 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
1475 } else {
1476 kvm_set_pmt_entry(kvm, base_gfn + i,
Xiantao Zhangb010eb52008-09-28 01:39:46 -07001477 GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001478 _PAGE_MA_UC);
1479 memslot->rmap[i] = 0;
1480 }
Xiantao Zhangb024b792008-04-01 15:29:29 +08001481 }
1482
1483 return 0;
1484}
1485
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001486void kvm_arch_flush_shadow(struct kvm *kvm)
1487{
1488}
Xiantao Zhangb024b792008-04-01 15:29:29 +08001489
1490long kvm_arch_dev_ioctl(struct file *filp,
1491 unsigned int ioctl, unsigned long arg)
1492{
1493 return -EINVAL;
1494}
1495
1496void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1497{
1498 kvm_vcpu_uninit(vcpu);
1499}
1500
1501static int vti_cpu_has_kvm_support(void)
1502{
1503 long avail = 1, status = 1, control = 1;
1504 long ret;
1505
1506 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1507 if (ret)
1508 goto out;
1509
1510 if (!(avail & PAL_PROC_VM_BIT))
1511 goto out;
1512
1513 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1514
1515 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1516 if (ret)
1517 goto out;
1518 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1519
1520 if (!(vp_env_info & VP_OPCODE)) {
1521 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1522 "vm_env_info:0x%lx\n", vp_env_info);
1523 }
1524
1525 return 1;
1526out:
1527 return 0;
1528}
1529
1530static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1531 struct module *module)
1532{
1533 unsigned long module_base;
1534 unsigned long vmm_size;
1535
1536 unsigned long vmm_offset, func_offset, fdesc_offset;
1537 struct fdesc *p_fdesc;
1538
1539 BUG_ON(!module);
1540
1541 if (!kvm_vmm_base) {
1542 printk("kvm: kvm area hasn't been initilized yet!!\n");
1543 return -EFAULT;
1544 }
1545
1546 /*Calculate new position of relocated vmm module.*/
1547 module_base = (unsigned long)module->module_core;
1548 vmm_size = module->core_size;
1549 if (unlikely(vmm_size > KVM_VMM_SIZE))
1550 return -EFAULT;
1551
1552 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1553 kvm_flush_icache(kvm_vmm_base, vmm_size);
1554
1555 /*Recalculate kvm_vmm_info based on new VMM*/
1556 vmm_offset = vmm_info->vmm_ivt - module_base;
1557 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1558 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1559 kvm_vmm_info->vmm_ivt);
1560
1561 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1562 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1563 fdesc_offset);
1564 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1565 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1566 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1567 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1568
1569 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1570 KVM_VMM_BASE+func_offset);
1571
1572 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1573 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1574 fdesc_offset);
1575 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1576 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1577 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1578 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1579
1580 kvm_vmm_gp = p_fdesc->gp;
1581
1582 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1583 kvm_vmm_info->vmm_entry);
1584 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1585 KVM_VMM_BASE + func_offset);
1586
1587 return 0;
1588}
1589
1590int kvm_arch_init(void *opaque)
1591{
1592 int r;
1593 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1594
1595 if (!vti_cpu_has_kvm_support()) {
1596 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1597 r = -EOPNOTSUPP;
1598 goto out;
1599 }
1600
1601 if (kvm_vmm_info) {
1602 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1603 r = -EEXIST;
1604 goto out;
1605 }
1606
1607 r = -ENOMEM;
1608 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1609 if (!kvm_vmm_info)
1610 goto out;
1611
1612 if (kvm_alloc_vmm_area())
1613 goto out_free0;
1614
1615 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1616 if (r)
1617 goto out_free1;
1618
1619 return 0;
1620
1621out_free1:
1622 kvm_free_vmm_area();
1623out_free0:
1624 kfree(kvm_vmm_info);
1625out:
1626 return r;
1627}
1628
1629void kvm_arch_exit(void)
1630{
1631 kvm_free_vmm_area();
1632 kfree(kvm_vmm_info);
1633 kvm_vmm_info = NULL;
1634}
1635
1636static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1637 struct kvm_dirty_log *log)
1638{
1639 struct kvm_memory_slot *memslot;
1640 int r, i;
1641 long n, base;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001642 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1643 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
Xiantao Zhangb024b792008-04-01 15:29:29 +08001644
1645 r = -EINVAL;
1646 if (log->slot >= KVM_MEMORY_SLOTS)
1647 goto out;
1648
1649 memslot = &kvm->memslots[log->slot];
1650 r = -ENOENT;
1651 if (!memslot->dirty_bitmap)
1652 goto out;
1653
1654 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1655 base = memslot->base_gfn / BITS_PER_LONG;
1656
1657 for (i = 0; i < n/sizeof(long); ++i) {
1658 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1659 dirty_bitmap[base + i] = 0;
1660 }
1661 r = 0;
1662out:
1663 return r;
1664}
1665
1666int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1667 struct kvm_dirty_log *log)
1668{
1669 int r;
1670 int n;
1671 struct kvm_memory_slot *memslot;
1672 int is_dirty = 0;
1673
1674 spin_lock(&kvm->arch.dirty_log_lock);
1675
1676 r = kvm_ia64_sync_dirty_log(kvm, log);
1677 if (r)
1678 goto out;
1679
1680 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1681 if (r)
1682 goto out;
1683
1684 /* If nothing is dirty, don't bother messing with page tables. */
1685 if (is_dirty) {
1686 kvm_flush_remote_tlbs(kvm);
1687 memslot = &kvm->memslots[log->slot];
1688 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1689 memset(memslot->dirty_bitmap, 0, n);
1690 }
1691 r = 0;
1692out:
1693 spin_unlock(&kvm->arch.dirty_log_lock);
1694 return r;
1695}
1696
1697int kvm_arch_hardware_setup(void)
1698{
1699 return 0;
1700}
1701
1702void kvm_arch_hardware_unsetup(void)
1703{
1704}
1705
1706static void vcpu_kick_intr(void *info)
1707{
1708#ifdef DEBUG
1709 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
1710 printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
1711#endif
1712}
1713
1714void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1715{
1716 int ipi_pcpu = vcpu->cpu;
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001717 int cpu = get_cpu();
Xiantao Zhangb024b792008-04-01 15:29:29 +08001718
1719 if (waitqueue_active(&vcpu->wq))
1720 wake_up_interruptible(&vcpu->wq);
1721
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001722 if (vcpu->guest_mode && cpu != ipi_pcpu)
Takashi Iwai2f73cca2008-07-17 18:09:12 +02001723 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001724 put_cpu();
Xiantao Zhangb024b792008-04-01 15:29:29 +08001725}
1726
1727int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
1728{
1729
1730 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1731
1732 if (!test_and_set_bit(vec, &vpd->irr[0])) {
1733 vcpu->arch.irq_new_pending = 1;
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001734 kvm_vcpu_kick(vcpu);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001735 return 1;
1736 }
1737 return 0;
1738}
1739
1740int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1741{
1742 return apic->vcpu->vcpu_id == dest;
1743}
1744
1745int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1746{
1747 return 0;
1748}
1749
1750struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
1751 unsigned long bitmap)
1752{
1753 struct kvm_vcpu *lvcpu = kvm->vcpus[0];
1754 int i;
1755
1756 for (i = 1; i < KVM_MAX_VCPUS; i++) {
1757 if (!kvm->vcpus[i])
1758 continue;
1759 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
1760 lvcpu = kvm->vcpus[i];
1761 }
1762
1763 return lvcpu;
1764}
1765
1766static int find_highest_bits(int *dat)
1767{
1768 u32 bits, bitnum;
1769 int i;
1770
1771 /* loop for all 256 bits */
1772 for (i = 7; i >= 0 ; i--) {
1773 bits = dat[i];
1774 if (bits) {
1775 bitnum = fls(bits);
1776 return i * 32 + bitnum - 1;
1777 }
1778 }
1779
1780 return -1;
1781}
1782
1783int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1784{
1785 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1786
1787 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1788 return NMI_VECTOR;
1789 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1790 return ExtINT_VECTOR;
1791
1792 return find_highest_bits((int *)&vpd->irr[0]);
1793}
1794
1795int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1796{
1797 if (kvm_highest_pending_irq(vcpu) != -1)
1798 return 1;
1799 return 0;
1800}
1801
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001802int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1803{
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001804 return vcpu->arch.timer_fired;
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001805}
1806
Xiantao Zhangb024b792008-04-01 15:29:29 +08001807gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1808{
1809 return gfn;
1810}
1811
1812int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1813{
Avi Kivitya4535292008-04-13 17:54:35 +03001814 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001815}
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001816
1817int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1818 struct kvm_mp_state *mp_state)
1819{
Xiantao Zhang8c4b5372008-08-28 09:34:08 +08001820 vcpu_load(vcpu);
1821 mp_state->mp_state = vcpu->arch.mp_state;
1822 vcpu_put(vcpu);
1823 return 0;
1824}
1825
1826static int vcpu_reset(struct kvm_vcpu *vcpu)
1827{
1828 int r;
1829 long psr;
1830 local_irq_save(psr);
1831 r = kvm_insert_vmm_mapping(vcpu);
1832 if (r)
1833 goto fail;
1834
1835 vcpu->arch.launched = 0;
1836 kvm_arch_vcpu_uninit(vcpu);
1837 r = kvm_arch_vcpu_init(vcpu);
1838 if (r)
1839 goto fail;
1840
1841 kvm_purge_vmm_mapping(vcpu);
1842 r = 0;
1843fail:
1844 local_irq_restore(psr);
1845 return r;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001846}
1847
1848int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1849 struct kvm_mp_state *mp_state)
1850{
Xiantao Zhang8c4b5372008-08-28 09:34:08 +08001851 int r = 0;
1852
1853 vcpu_load(vcpu);
1854 vcpu->arch.mp_state = mp_state->mp_state;
1855 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
1856 r = vcpu_reset(vcpu);
1857 vcpu_put(vcpu);
1858 return r;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001859}