blob: 0f5ebd94843725e11277603dcb87924fd0bd2df0 [file] [log] [blame]
Xiantao Zhangb024b792008-04-01 15:29:29 +08001/*
2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
3 *
4 *
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/percpu.h>
26#include <linux/gfp.h>
27#include <linux/fs.h>
28#include <linux/smp.h>
29#include <linux/kvm_host.h>
30#include <linux/kvm.h>
31#include <linux/bitops.h>
32#include <linux/hrtimer.h>
33#include <linux/uaccess.h>
Xiantao Zhang2381ad22008-10-08 08:29:33 +080034#include <linux/intel-iommu.h>
Xiantao Zhangb024b792008-04-01 15:29:29 +080035
36#include <asm/pgtable.h>
37#include <asm/gcc_intrin.h>
38#include <asm/pal.h>
39#include <asm/cacheflush.h>
40#include <asm/div64.h>
41#include <asm/tlb.h>
Jes Sorensen9f726322008-09-12 14:12:08 +020042#include <asm/elf.h>
Xiantao Zhangb024b792008-04-01 15:29:29 +080043
44#include "misc.h"
45#include "vti.h"
46#include "iodev.h"
47#include "ioapic.h"
48#include "lapic.h"
Xiantao Zhang2f749772008-09-27 11:46:36 +080049#include "irq.h"
Xiantao Zhangb024b792008-04-01 15:29:29 +080050
51static unsigned long kvm_vmm_base;
52static unsigned long kvm_vsa_base;
53static unsigned long kvm_vm_buffer;
54static unsigned long kvm_vm_buffer_size;
55unsigned long kvm_vmm_gp;
56
57static long vp_env_info;
58
59static struct kvm_vmm_info *kvm_vmm_info;
60
61static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
62
63struct kvm_stats_debugfs_item debugfs_entries[] = {
64 { NULL }
65};
66
Xiantao Zhangb024b792008-04-01 15:29:29 +080067static void kvm_flush_icache(unsigned long start, unsigned long len)
68{
69 int l;
70
71 for (l = 0; l < (len + 32); l += 32)
72 ia64_fc(start + l);
73
74 ia64_sync_i();
75 ia64_srlz_i();
76}
77
78static void kvm_flush_tlb_all(void)
79{
80 unsigned long i, j, count0, count1, stride0, stride1, addr;
81 long flags;
82
83 addr = local_cpu_data->ptce_base;
84 count0 = local_cpu_data->ptce_count[0];
85 count1 = local_cpu_data->ptce_count[1];
86 stride0 = local_cpu_data->ptce_stride[0];
87 stride1 = local_cpu_data->ptce_stride[1];
88
89 local_irq_save(flags);
90 for (i = 0; i < count0; ++i) {
91 for (j = 0; j < count1; ++j) {
92 ia64_ptce(addr);
93 addr += stride1;
94 }
95 addr += stride0;
96 }
97 local_irq_restore(flags);
98 ia64_srlz_i(); /* srlz.i implies srlz.d */
99}
100
101long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
102{
103 struct ia64_pal_retval iprv;
104
105 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
106 (u64)opt_handler);
107
108 return iprv.status;
109}
110
111static DEFINE_SPINLOCK(vp_lock);
112
113void kvm_arch_hardware_enable(void *garbage)
114{
115 long status;
116 long tmp_base;
117 unsigned long pte;
118 unsigned long saved_psr;
119 int slot;
120
121 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
122 PAGE_KERNEL));
123 local_irq_save(saved_psr);
124 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
Julia Lawallcab7a1e2008-07-22 21:38:18 +0200125 local_irq_restore(saved_psr);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800126 if (slot < 0)
127 return;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800128
129 spin_lock(&vp_lock);
130 status = ia64_pal_vp_init_env(kvm_vsa_base ?
131 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
132 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
133 if (status != 0) {
134 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
135 return ;
136 }
137
138 if (!kvm_vsa_base) {
139 kvm_vsa_base = tmp_base;
140 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
141 }
142 spin_unlock(&vp_lock);
143 ia64_ptr_entry(0x3, slot);
144}
145
146void kvm_arch_hardware_disable(void *garbage)
147{
148
149 long status;
150 int slot;
151 unsigned long pte;
152 unsigned long saved_psr;
153 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
154
155 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
156 PAGE_KERNEL));
157
158 local_irq_save(saved_psr);
159 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
Julia Lawallcab7a1e2008-07-22 21:38:18 +0200160 local_irq_restore(saved_psr);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800161 if (slot < 0)
162 return;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800163
164 status = ia64_pal_vp_exit_env(host_iva);
165 if (status)
166 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
167 status);
168 ia64_ptr_entry(0x3, slot);
169}
170
171void kvm_arch_check_processor_compat(void *rtn)
172{
173 *(int *)rtn = 0;
174}
175
176int kvm_dev_ioctl_check_extension(long ext)
177{
178
179 int r;
180
181 switch (ext) {
182 case KVM_CAP_IRQCHIP:
Xiantao Zhang8c4b5372008-08-28 09:34:08 +0800183 case KVM_CAP_MP_STATE:
Xiantao Zhangb024b792008-04-01 15:29:29 +0800184
185 r = 1;
186 break;
Laurent Vivier7f39f8a2008-05-30 16:05:57 +0200187 case KVM_CAP_COALESCED_MMIO:
188 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
189 break;
Xiantao Zhang2381ad22008-10-08 08:29:33 +0800190 case KVM_CAP_IOMMU:
191 r = intel_iommu_found();
192 break;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800193 default:
194 r = 0;
195 }
196 return r;
197
198}
199
200static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
Laurent Vivier92760492008-05-30 16:05:53 +0200201 gpa_t addr, int len, int is_write)
Xiantao Zhangb024b792008-04-01 15:29:29 +0800202{
203 struct kvm_io_device *dev;
204
Laurent Vivier92760492008-05-30 16:05:53 +0200205 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800206
207 return dev;
208}
209
210static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
211{
212 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
213 kvm_run->hw.hardware_exit_reason = 1;
214 return 0;
215}
216
217static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
218{
219 struct kvm_mmio_req *p;
220 struct kvm_io_device *mmio_dev;
221
222 p = kvm_get_vcpu_ioreq(vcpu);
223
224 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
225 goto mmio;
226 vcpu->mmio_needed = 1;
227 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
228 vcpu->mmio_size = kvm_run->mmio.len = p->size;
229 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
230
231 if (vcpu->mmio_is_write)
232 memcpy(vcpu->mmio_data, &p->data, p->size);
233 memcpy(kvm_run->mmio.data, &p->data, p->size);
234 kvm_run->exit_reason = KVM_EXIT_MMIO;
235 return 0;
236mmio:
Laurent Vivier92760492008-05-30 16:05:53 +0200237 mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800238 if (mmio_dev) {
239 if (!p->dir)
240 kvm_iodevice_write(mmio_dev, p->addr, p->size,
241 &p->data);
242 else
243 kvm_iodevice_read(mmio_dev, p->addr, p->size,
244 &p->data);
245
246 } else
247 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
248 p->state = STATE_IORESP_READY;
249
250 return 1;
251}
252
253static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
254{
255 struct exit_ctl_data *p;
256
257 p = kvm_get_exit_data(vcpu);
258
259 if (p->exit_reason == EXIT_REASON_PAL_CALL)
260 return kvm_pal_emul(vcpu, kvm_run);
261 else {
262 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
263 kvm_run->hw.hardware_exit_reason = 2;
264 return 0;
265 }
266}
267
268static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
269{
270 struct exit_ctl_data *p;
271
272 p = kvm_get_exit_data(vcpu);
273
274 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
275 kvm_sal_emul(vcpu);
276 return 1;
277 } else {
278 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
279 kvm_run->hw.hardware_exit_reason = 3;
280 return 0;
281 }
282
283}
284
285/*
286 * offset: address offset to IPI space.
287 * value: deliver value.
288 */
289static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
290 uint64_t vector)
291{
292 switch (dm) {
293 case SAPIC_FIXED:
294 kvm_apic_set_irq(vcpu, vector, 0);
295 break;
296 case SAPIC_NMI:
297 kvm_apic_set_irq(vcpu, 2, 0);
298 break;
299 case SAPIC_EXTINT:
300 kvm_apic_set_irq(vcpu, 0, 0);
301 break;
302 case SAPIC_INIT:
303 case SAPIC_PMI:
304 default:
305 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
306 break;
307 }
308}
309
310static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
311 unsigned long eid)
312{
313 union ia64_lid lid;
314 int i;
315
316 for (i = 0; i < KVM_MAX_VCPUS; i++) {
317 if (kvm->vcpus[i]) {
318 lid.val = VCPU_LID(kvm->vcpus[i]);
319 if (lid.id == id && lid.eid == eid)
320 return kvm->vcpus[i];
321 }
322 }
323
324 return NULL;
325}
326
327static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
328{
329 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
330 struct kvm_vcpu *target_vcpu;
331 struct kvm_pt_regs *regs;
332 union ia64_ipi_a addr = p->u.ipi_data.addr;
333 union ia64_ipi_d data = p->u.ipi_data.data;
334
335 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
336 if (!target_vcpu)
337 return handle_vm_error(vcpu, kvm_run);
338
339 if (!target_vcpu->arch.launched) {
340 regs = vcpu_regs(target_vcpu);
341
342 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
343 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
344
Avi Kivitya4535292008-04-13 17:54:35 +0300345 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800346 if (waitqueue_active(&target_vcpu->wq))
347 wake_up_interruptible(&target_vcpu->wq);
348 } else {
349 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
350 if (target_vcpu != vcpu)
351 kvm_vcpu_kick(target_vcpu);
352 }
353
354 return 1;
355}
356
357struct call_data {
358 struct kvm_ptc_g ptc_g_data;
359 struct kvm_vcpu *vcpu;
360};
361
362static void vcpu_global_purge(void *info)
363{
364 struct call_data *p = (struct call_data *)info;
365 struct kvm_vcpu *vcpu = p->vcpu;
366
367 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
368 return;
369
370 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
371 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
372 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
373 p->ptc_g_data;
374 } else {
375 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
376 vcpu->arch.ptc_g_count = 0;
377 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
378 }
379}
380
381static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
382{
383 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
384 struct kvm *kvm = vcpu->kvm;
385 struct call_data call_data;
386 int i;
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800387
Xiantao Zhangb024b792008-04-01 15:29:29 +0800388 call_data.ptc_g_data = p->u.ptc_g_data;
389
390 for (i = 0; i < KVM_MAX_VCPUS; i++) {
391 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
Avi Kivitya4535292008-04-13 17:54:35 +0300392 KVM_MP_STATE_UNINITIALIZED ||
Xiantao Zhangb024b792008-04-01 15:29:29 +0800393 vcpu == kvm->vcpus[i])
394 continue;
395
396 if (waitqueue_active(&kvm->vcpus[i]->wq))
397 wake_up_interruptible(&kvm->vcpus[i]->wq);
398
399 if (kvm->vcpus[i]->cpu != -1) {
400 call_data.vcpu = kvm->vcpus[i];
401 smp_call_function_single(kvm->vcpus[i]->cpu,
Takashi Iwai2f73cca2008-07-17 18:09:12 +0200402 vcpu_global_purge, &call_data, 1);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800403 } else
404 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
405
406 }
407 return 1;
408}
409
410static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
411{
412 return 1;
413}
414
415int kvm_emulate_halt(struct kvm_vcpu *vcpu)
416{
417
418 ktime_t kt;
419 long itc_diff;
420 unsigned long vcpu_now_itc;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800421 unsigned long expires;
422 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
423 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
424 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
425
Xiantao Zhangb024b792008-04-01 15:29:29 +0800426 if (irqchip_in_kernel(vcpu->kvm)) {
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800427
428 vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
429
430 if (time_after(vcpu_now_itc, vpd->itm)) {
431 vcpu->arch.timer_check = 1;
432 return 1;
433 }
434 itc_diff = vpd->itm - vcpu_now_itc;
435 if (itc_diff < 0)
436 itc_diff = -itc_diff;
437
438 expires = div64_u64(itc_diff, cyc_per_usec);
439 kt = ktime_set(0, 1000 * expires);
440
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800441 vcpu->arch.ht_active = 1;
442 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
443
Avi Kivitya4535292008-04-13 17:54:35 +0300444 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800445 kvm_vcpu_block(vcpu);
446 hrtimer_cancel(p_ht);
447 vcpu->arch.ht_active = 0;
448
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800449 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
450 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
451 vcpu->arch.mp_state =
452 KVM_MP_STATE_RUNNABLE;
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800453
Avi Kivitya4535292008-04-13 17:54:35 +0300454 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
Xiantao Zhangb024b792008-04-01 15:29:29 +0800455 return -EINTR;
456 return 1;
457 } else {
458 printk(KERN_ERR"kvm: Unsupported userspace halt!");
459 return 0;
460 }
461}
462
463static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
464 struct kvm_run *kvm_run)
465{
466 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
467 return 0;
468}
469
470static int handle_external_interrupt(struct kvm_vcpu *vcpu,
471 struct kvm_run *kvm_run)
472{
473 return 1;
474}
475
Xiantao Zhang7d637972008-11-21 20:58:11 +0800476static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
477 struct kvm_run *kvm_run)
478{
479 printk("VMM: %s", vcpu->arch.log_buf);
480 return 1;
481}
482
Xiantao Zhangb024b792008-04-01 15:29:29 +0800483static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
484 struct kvm_run *kvm_run) = {
485 [EXIT_REASON_VM_PANIC] = handle_vm_error,
486 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
487 [EXIT_REASON_PAL_CALL] = handle_pal_call,
488 [EXIT_REASON_SAL_CALL] = handle_sal_call,
489 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
490 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
491 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
492 [EXIT_REASON_IPI] = handle_ipi,
493 [EXIT_REASON_PTC_G] = handle_global_purge,
Xiantao Zhang7d637972008-11-21 20:58:11 +0800494 [EXIT_REASON_DEBUG] = handle_vcpu_debug,
Xiantao Zhangb024b792008-04-01 15:29:29 +0800495
496};
497
498static const int kvm_vti_max_exit_handlers =
499 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
500
Xiantao Zhangb024b792008-04-01 15:29:29 +0800501static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
502{
503 struct exit_ctl_data *p_exit_data;
504
505 p_exit_data = kvm_get_exit_data(vcpu);
506 return p_exit_data->exit_reason;
507}
508
509/*
510 * The guest has exited. See if we can fix it or if we need userspace
511 * assistance.
512 */
513static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
514{
515 u32 exit_reason = kvm_get_exit_reason(vcpu);
516 vcpu->arch.last_exit = exit_reason;
517
518 if (exit_reason < kvm_vti_max_exit_handlers
519 && kvm_vti_exit_handlers[exit_reason])
520 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
521 else {
522 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
523 kvm_run->hw.hardware_exit_reason = exit_reason;
524 }
525 return 0;
526}
527
528static inline void vti_set_rr6(unsigned long rr6)
529{
530 ia64_set_rr(RR6, rr6);
531 ia64_srlz_i();
532}
533
534static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
535{
536 unsigned long pte;
537 struct kvm *kvm = vcpu->kvm;
538 int r;
539
540 /*Insert a pair of tr to map vmm*/
541 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
542 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
543 if (r < 0)
544 goto out;
545 vcpu->arch.vmm_tr_slot = r;
546 /*Insert a pairt of tr to map data of vm*/
547 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
548 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
549 pte, KVM_VM_DATA_SHIFT);
550 if (r < 0)
551 goto out;
552 vcpu->arch.vm_tr_slot = r;
553 r = 0;
554out:
555 return r;
556
557}
558
559static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
560{
561
562 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
563 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
564
565}
566
567static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
568{
569 int cpu = smp_processor_id();
570
571 if (vcpu->arch.last_run_cpu != cpu ||
572 per_cpu(last_vcpu, cpu) != vcpu) {
573 per_cpu(last_vcpu, cpu) = vcpu;
574 vcpu->arch.last_run_cpu = cpu;
575 kvm_flush_tlb_all();
576 }
577
578 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
579 vti_set_rr6(vcpu->arch.vmm_rr);
580 return kvm_insert_vmm_mapping(vcpu);
581}
582static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
583{
584 kvm_purge_vmm_mapping(vcpu);
585 vti_set_rr6(vcpu->arch.host_rr6);
586}
587
588static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
589{
590 union context *host_ctx, *guest_ctx;
591 int r;
592
593 /*Get host and guest context with guest address space.*/
594 host_ctx = kvm_get_host_context(vcpu);
595 guest_ctx = kvm_get_guest_context(vcpu);
596
597 r = kvm_vcpu_pre_transition(vcpu);
598 if (r < 0)
599 goto out;
600 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
601 kvm_vcpu_post_transition(vcpu);
602 r = 0;
603out:
604 return r;
605}
606
607static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
608{
609 int r;
610
611again:
612 preempt_disable();
Xiantao Zhangb024b792008-04-01 15:29:29 +0800613 local_irq_disable();
614
615 if (signal_pending(current)) {
616 local_irq_enable();
617 preempt_enable();
618 r = -EINTR;
619 kvm_run->exit_reason = KVM_EXIT_INTR;
620 goto out;
621 }
622
623 vcpu->guest_mode = 1;
624 kvm_guest_enter();
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800625 down_read(&vcpu->kvm->slots_lock);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800626 r = vti_vcpu_run(vcpu, kvm_run);
627 if (r < 0) {
628 local_irq_enable();
629 preempt_enable();
630 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
631 goto out;
632 }
633
634 vcpu->arch.launched = 1;
635 vcpu->guest_mode = 0;
636 local_irq_enable();
637
638 /*
639 * We must have an instruction between local_irq_enable() and
640 * kvm_guest_exit(), so the timer interrupt isn't delayed by
641 * the interrupt shadow. The stat.exits increment will do nicely.
642 * But we need to prevent reordering, hence this barrier():
643 */
644 barrier();
Xiantao Zhangb024b792008-04-01 15:29:29 +0800645 kvm_guest_exit();
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800646 up_read(&vcpu->kvm->slots_lock);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800647 preempt_enable();
648
649 r = kvm_handle_exit(kvm_run, vcpu);
650
651 if (r > 0) {
652 if (!need_resched())
653 goto again;
654 }
655
656out:
657 if (r > 0) {
658 kvm_resched(vcpu);
659 goto again;
660 }
661
662 return r;
663}
664
665static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
666{
667 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
668
669 if (!vcpu->mmio_is_write)
670 memcpy(&p->data, vcpu->mmio_data, 8);
671 p->state = STATE_IORESP_READY;
672}
673
674int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
675{
676 int r;
677 sigset_t sigsaved;
678
679 vcpu_load(vcpu);
680
Xiantao Zhanga2e4e282008-10-23 15:02:52 +0800681 if (vcpu->sigset_active)
682 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
683
Avi Kivitya4535292008-04-13 17:54:35 +0300684 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
Xiantao Zhangb024b792008-04-01 15:29:29 +0800685 kvm_vcpu_block(vcpu);
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800686 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Xiantao Zhanga2e4e282008-10-23 15:02:52 +0800687 r = -EAGAIN;
688 goto out;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800689 }
690
Xiantao Zhangb024b792008-04-01 15:29:29 +0800691 if (vcpu->mmio_needed) {
692 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
693 kvm_set_mmio_data(vcpu);
694 vcpu->mmio_read_completed = 1;
695 vcpu->mmio_needed = 0;
696 }
697 r = __vcpu_run(vcpu, kvm_run);
Xiantao Zhanga2e4e282008-10-23 15:02:52 +0800698out:
Xiantao Zhangb024b792008-04-01 15:29:29 +0800699 if (vcpu->sigset_active)
700 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
701
702 vcpu_put(vcpu);
703 return r;
704}
705
Xiantao Zhangb024b792008-04-01 15:29:29 +0800706static struct kvm *kvm_alloc_kvm(void)
707{
708
709 struct kvm *kvm;
710 uint64_t vm_base;
711
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800712 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
713
Xiantao Zhangb024b792008-04-01 15:29:29 +0800714 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
715
716 if (!vm_base)
717 return ERR_PTR(-ENOMEM);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800718
Xiantao Zhangb024b792008-04-01 15:29:29 +0800719 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800720 kvm = (struct kvm *)(vm_base +
721 offsetof(struct kvm_vm_data, kvm_vm_struct));
Xiantao Zhangb024b792008-04-01 15:29:29 +0800722 kvm->arch.vm_base = vm_base;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800723 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800724
725 return kvm;
726}
727
728struct kvm_io_range {
729 unsigned long start;
730 unsigned long size;
731 unsigned long type;
732};
733
734static const struct kvm_io_range io_ranges[] = {
735 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
736 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
737 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
738 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
739 {PIB_START, PIB_SIZE, GPFN_PIB},
740};
741
742static void kvm_build_io_pmt(struct kvm *kvm)
743{
744 unsigned long i, j;
745
746 /* Mark I/O ranges */
747 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
748 i++) {
749 for (j = io_ranges[i].start;
750 j < io_ranges[i].start + io_ranges[i].size;
751 j += PAGE_SIZE)
752 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
753 io_ranges[i].type, 0);
754 }
755
756}
757
758/*Use unused rids to virtualize guest rid.*/
759#define GUEST_PHYSICAL_RR0 0x1739
760#define GUEST_PHYSICAL_RR4 0x2739
761#define VMM_INIT_RR 0x1660
762
763static void kvm_init_vm(struct kvm *kvm)
764{
Xiantao Zhangb024b792008-04-01 15:29:29 +0800765 BUG_ON(!kvm);
766
767 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
768 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
769 kvm->arch.vmm_init_rr = VMM_INIT_RR;
770
Xiantao Zhangb024b792008-04-01 15:29:29 +0800771 /*
772 *Fill P2M entries for MMIO/IO ranges
773 */
774 kvm_build_io_pmt(kvm);
775
Xiantao Zhang2381ad22008-10-08 08:29:33 +0800776 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
Sheng Yang5550af42008-10-15 20:15:06 +0800777
778 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
779 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800780}
781
782struct kvm *kvm_arch_create_vm(void)
783{
784 struct kvm *kvm = kvm_alloc_kvm();
785
786 if (IS_ERR(kvm))
787 return ERR_PTR(-ENOMEM);
788 kvm_init_vm(kvm);
789
790 return kvm;
791
792}
793
794static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
795 struct kvm_irqchip *chip)
796{
797 int r;
798
799 r = 0;
800 switch (chip->chip_id) {
801 case KVM_IRQCHIP_IOAPIC:
802 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
803 sizeof(struct kvm_ioapic_state));
804 break;
805 default:
806 r = -EINVAL;
807 break;
808 }
809 return r;
810}
811
812static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
813{
814 int r;
815
816 r = 0;
817 switch (chip->chip_id) {
818 case KVM_IRQCHIP_IOAPIC:
819 memcpy(ioapic_irqchip(kvm),
820 &chip->chip.ioapic,
821 sizeof(struct kvm_ioapic_state));
822 break;
823 default:
824 r = -EINVAL;
825 break;
826 }
827 return r;
828}
829
830#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
831
832int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
833{
Xiantao Zhangb024b792008-04-01 15:29:29 +0800834 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
Jes Sorensen042b26e2008-12-16 16:45:47 +0100835 int i;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800836
837 vcpu_load(vcpu);
838
839 for (i = 0; i < 16; i++) {
840 vpd->vgr[i] = regs->vpd.vgr[i];
841 vpd->vbgr[i] = regs->vpd.vbgr[i];
842 }
843 for (i = 0; i < 128; i++)
844 vpd->vcr[i] = regs->vpd.vcr[i];
845 vpd->vhpi = regs->vpd.vhpi;
846 vpd->vnat = regs->vpd.vnat;
847 vpd->vbnat = regs->vpd.vbnat;
848 vpd->vpsr = regs->vpd.vpsr;
849
850 vpd->vpr = regs->vpd.vpr;
851
Jes Sorensen042b26e2008-12-16 16:45:47 +0100852 memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
Xiantao Zhangb024b792008-04-01 15:29:29 +0800853
854 RESTORE_REGS(mp_state);
855 RESTORE_REGS(vmm_rr);
856 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
857 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
858 RESTORE_REGS(itr_regions);
859 RESTORE_REGS(dtr_regions);
860 RESTORE_REGS(tc_regions);
861 RESTORE_REGS(irq_check);
862 RESTORE_REGS(itc_check);
863 RESTORE_REGS(timer_check);
864 RESTORE_REGS(timer_pending);
865 RESTORE_REGS(last_itc);
866 for (i = 0; i < 8; i++) {
867 vcpu->arch.vrr[i] = regs->vrr[i];
868 vcpu->arch.ibr[i] = regs->ibr[i];
869 vcpu->arch.dbr[i] = regs->dbr[i];
870 }
871 for (i = 0; i < 4; i++)
872 vcpu->arch.insvc[i] = regs->insvc[i];
873 RESTORE_REGS(xtp);
874 RESTORE_REGS(metaphysical_rr0);
875 RESTORE_REGS(metaphysical_rr4);
876 RESTORE_REGS(metaphysical_saved_rr0);
877 RESTORE_REGS(metaphysical_saved_rr4);
878 RESTORE_REGS(fp_psr);
879 RESTORE_REGS(saved_gp);
880
881 vcpu->arch.irq_new_pending = 1;
882 vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
883 set_bit(KVM_REQ_RESUME, &vcpu->requests);
884
885 vcpu_put(vcpu);
Jes Sorensen042b26e2008-12-16 16:45:47 +0100886
887 return 0;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800888}
889
890long kvm_arch_vm_ioctl(struct file *filp,
891 unsigned int ioctl, unsigned long arg)
892{
893 struct kvm *kvm = filp->private_data;
894 void __user *argp = (void __user *)arg;
895 int r = -EINVAL;
896
897 switch (ioctl) {
898 case KVM_SET_MEMORY_REGION: {
899 struct kvm_memory_region kvm_mem;
900 struct kvm_userspace_memory_region kvm_userspace_mem;
901
902 r = -EFAULT;
903 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
904 goto out;
905 kvm_userspace_mem.slot = kvm_mem.slot;
906 kvm_userspace_mem.flags = kvm_mem.flags;
907 kvm_userspace_mem.guest_phys_addr =
908 kvm_mem.guest_phys_addr;
909 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
910 r = kvm_vm_ioctl_set_memory_region(kvm,
911 &kvm_userspace_mem, 0);
912 if (r)
913 goto out;
914 break;
915 }
916 case KVM_CREATE_IRQCHIP:
917 r = -EFAULT;
918 r = kvm_ioapic_init(kvm);
919 if (r)
920 goto out;
921 break;
922 case KVM_IRQ_LINE: {
923 struct kvm_irq_level irq_event;
924
925 r = -EFAULT;
926 if (copy_from_user(&irq_event, argp, sizeof irq_event))
927 goto out;
928 if (irqchip_in_kernel(kvm)) {
929 mutex_lock(&kvm->lock);
Sheng Yang5550af42008-10-15 20:15:06 +0800930 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
931 irq_event.irq, irq_event.level);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800932 mutex_unlock(&kvm->lock);
933 r = 0;
934 }
935 break;
936 }
937 case KVM_GET_IRQCHIP: {
938 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
939 struct kvm_irqchip chip;
940
941 r = -EFAULT;
942 if (copy_from_user(&chip, argp, sizeof chip))
943 goto out;
944 r = -ENXIO;
945 if (!irqchip_in_kernel(kvm))
946 goto out;
947 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
948 if (r)
949 goto out;
950 r = -EFAULT;
951 if (copy_to_user(argp, &chip, sizeof chip))
952 goto out;
953 r = 0;
954 break;
955 }
956 case KVM_SET_IRQCHIP: {
957 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
958 struct kvm_irqchip chip;
959
960 r = -EFAULT;
961 if (copy_from_user(&chip, argp, sizeof chip))
962 goto out;
963 r = -ENXIO;
964 if (!irqchip_in_kernel(kvm))
965 goto out;
966 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
967 if (r)
968 goto out;
969 r = 0;
970 break;
971 }
972 default:
973 ;
974 }
975out:
976 return r;
977}
978
979int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
980 struct kvm_sregs *sregs)
981{
982 return -EINVAL;
983}
984
985int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
986 struct kvm_sregs *sregs)
987{
988 return -EINVAL;
989
990}
991int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
992 struct kvm_translation *tr)
993{
994
995 return -EINVAL;
996}
997
998static int kvm_alloc_vmm_area(void)
999{
1000 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1001 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1002 get_order(KVM_VMM_SIZE));
1003 if (!kvm_vmm_base)
1004 return -ENOMEM;
1005
1006 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1007 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1008
1009 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1010 kvm_vmm_base, kvm_vm_buffer);
1011 }
1012
1013 return 0;
1014}
1015
1016static void kvm_free_vmm_area(void)
1017{
1018 if (kvm_vmm_base) {
1019 /*Zero this area before free to avoid bits leak!!*/
1020 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1021 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1022 kvm_vmm_base = 0;
1023 kvm_vm_buffer = 0;
1024 kvm_vsa_base = 0;
1025 }
1026}
1027
Xiantao Zhangb024b792008-04-01 15:29:29 +08001028static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1029{
1030}
1031
1032static int vti_init_vpd(struct kvm_vcpu *vcpu)
1033{
1034 int i;
1035 union cpuid3_t cpuid3;
1036 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1037
1038 if (IS_ERR(vpd))
1039 return PTR_ERR(vpd);
1040
1041 /* CPUID init */
1042 for (i = 0; i < 5; i++)
1043 vpd->vcpuid[i] = ia64_get_cpuid(i);
1044
1045 /* Limit the CPUID number to 5 */
1046 cpuid3.value = vpd->vcpuid[3];
1047 cpuid3.number = 4; /* 5 - 1 */
1048 vpd->vcpuid[3] = cpuid3.value;
1049
1050 /*Set vac and vdc fields*/
1051 vpd->vac.a_from_int_cr = 1;
1052 vpd->vac.a_to_int_cr = 1;
1053 vpd->vac.a_from_psr = 1;
1054 vpd->vac.a_from_cpuid = 1;
1055 vpd->vac.a_cover = 1;
1056 vpd->vac.a_bsw = 1;
1057 vpd->vac.a_int = 1;
1058 vpd->vdc.d_vmsw = 1;
1059
1060 /*Set virtual buffer*/
1061 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1062
1063 return 0;
1064}
1065
1066static int vti_create_vp(struct kvm_vcpu *vcpu)
1067{
1068 long ret;
1069 struct vpd *vpd = vcpu->arch.vpd;
1070 unsigned long vmm_ivt;
1071
1072 vmm_ivt = kvm_vmm_info->vmm_ivt;
1073
1074 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1075
1076 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1077
1078 if (ret) {
1079 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1080 return -EINVAL;
1081 }
1082 return 0;
1083}
1084
1085static void init_ptce_info(struct kvm_vcpu *vcpu)
1086{
1087 ia64_ptce_info_t ptce = {0};
1088
1089 ia64_get_ptce(&ptce);
1090 vcpu->arch.ptce_base = ptce.base;
1091 vcpu->arch.ptce_count[0] = ptce.count[0];
1092 vcpu->arch.ptce_count[1] = ptce.count[1];
1093 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1094 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1095}
1096
1097static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1098{
1099 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1100
1101 if (hrtimer_cancel(p_ht))
Arjan van de Ven18dd36a2008-09-01 15:19:11 -07001102 hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001103}
1104
1105static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1106{
1107 struct kvm_vcpu *vcpu;
1108 wait_queue_head_t *q;
1109
1110 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001111 q = &vcpu->wq;
1112
Avi Kivitya4535292008-04-13 17:54:35 +03001113 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
Xiantao Zhangb024b792008-04-01 15:29:29 +08001114 goto out;
1115
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001116 if (waitqueue_active(q))
Xiantao Zhangb024b792008-04-01 15:29:29 +08001117 wake_up_interruptible(q);
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001118
Xiantao Zhangb024b792008-04-01 15:29:29 +08001119out:
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001120 vcpu->arch.timer_fired = 1;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001121 vcpu->arch.timer_check = 1;
1122 return HRTIMER_NORESTART;
1123}
1124
1125#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1126
1127int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1128{
1129 struct kvm_vcpu *v;
1130 int r;
1131 int i;
1132 long itc_offset;
1133 struct kvm *kvm = vcpu->kvm;
1134 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1135
1136 union context *p_ctx = &vcpu->arch.guest;
1137 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1138
1139 /*Init vcpu context for first run.*/
1140 if (IS_ERR(vmm_vcpu))
1141 return PTR_ERR(vmm_vcpu);
1142
1143 if (vcpu->vcpu_id == 0) {
Avi Kivitya4535292008-04-13 17:54:35 +03001144 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001145
1146 /*Set entry address for first run.*/
1147 regs->cr_iip = PALE_RESET_ENTRY;
1148
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001149 /*Initialize itc offset for vcpus*/
Xiantao Zhangb024b792008-04-01 15:29:29 +08001150 itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001151 for (i = 0; i < KVM_MAX_VCPUS; i++) {
1152 v = (struct kvm_vcpu *)((char *)vcpu +
1153 sizeof(struct kvm_vcpu_data) * i);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001154 v->arch.itc_offset = itc_offset;
1155 v->arch.last_itc = 0;
1156 }
1157 } else
Avi Kivitya4535292008-04-13 17:54:35 +03001158 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001159
1160 r = -ENOMEM;
1161 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1162 if (!vcpu->arch.apic)
1163 goto out;
1164 vcpu->arch.apic->vcpu = vcpu;
1165
1166 p_ctx->gr[1] = 0;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001167 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001168 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1169 p_ctx->psr = 0x1008522000UL;
1170 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1171 p_ctx->caller_unat = 0;
1172 p_ctx->pr = 0x0;
1173 p_ctx->ar[36] = 0x0; /*unat*/
1174 p_ctx->ar[19] = 0x0; /*rnat*/
1175 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1176 ((sizeof(struct kvm_vcpu)+15) & ~15);
1177 p_ctx->ar[64] = 0x0; /*pfs*/
1178 p_ctx->cr[0] = 0x7e04UL;
1179 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1180 p_ctx->cr[8] = 0x3c;
1181
1182 /*Initilize region register*/
1183 p_ctx->rr[0] = 0x30;
1184 p_ctx->rr[1] = 0x30;
1185 p_ctx->rr[2] = 0x30;
1186 p_ctx->rr[3] = 0x30;
1187 p_ctx->rr[4] = 0x30;
1188 p_ctx->rr[5] = 0x30;
1189 p_ctx->rr[7] = 0x30;
1190
1191 /*Initilize branch register 0*/
1192 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1193
1194 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1195 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1196 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1197
1198 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1199 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1200
1201 vcpu->arch.last_run_cpu = -1;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001202 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001203 vcpu->arch.vsa_base = kvm_vsa_base;
1204 vcpu->arch.__gp = kvm_vmm_gp;
1205 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001206 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1207 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001208 init_ptce_info(vcpu);
1209
1210 r = 0;
1211out:
1212 return r;
1213}
1214
1215static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1216{
1217 unsigned long psr;
1218 int r;
1219
1220 local_irq_save(psr);
1221 r = kvm_insert_vmm_mapping(vcpu);
1222 if (r)
1223 goto fail;
1224 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1225 if (r)
1226 goto fail;
1227
1228 r = vti_init_vpd(vcpu);
1229 if (r) {
1230 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1231 goto uninit;
1232 }
1233
1234 r = vti_create_vp(vcpu);
1235 if (r)
1236 goto uninit;
1237
1238 kvm_purge_vmm_mapping(vcpu);
1239 local_irq_restore(psr);
1240
1241 return 0;
1242uninit:
1243 kvm_vcpu_uninit(vcpu);
1244fail:
Julia Lawallcab7a1e2008-07-22 21:38:18 +02001245 local_irq_restore(psr);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001246 return r;
1247}
1248
1249struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1250 unsigned int id)
1251{
1252 struct kvm_vcpu *vcpu;
1253 unsigned long vm_base = kvm->arch.vm_base;
1254 int r;
1255 int cpu;
1256
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001257 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1258
1259 r = -EINVAL;
1260 if (id >= KVM_MAX_VCPUS) {
1261 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1262 KVM_MAX_VCPUS);
1263 goto fail;
1264 }
1265
Xiantao Zhangb024b792008-04-01 15:29:29 +08001266 r = -ENOMEM;
1267 if (!vm_base) {
1268 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1269 goto fail;
1270 }
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001271 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1272 vcpu_data[id].vcpu_struct));
Xiantao Zhangb024b792008-04-01 15:29:29 +08001273 vcpu->kvm = kvm;
1274
1275 cpu = get_cpu();
1276 vti_vcpu_load(vcpu, cpu);
1277 r = vti_vcpu_setup(vcpu, id);
1278 put_cpu();
1279
1280 if (r) {
1281 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1282 goto fail;
1283 }
1284
1285 return vcpu;
1286fail:
1287 return ERR_PTR(r);
1288}
1289
1290int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1291{
1292 return 0;
1293}
1294
1295int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1296{
1297 return -EINVAL;
1298}
1299
1300int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1301{
1302 return -EINVAL;
1303}
1304
1305int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
1306 struct kvm_debug_guest *dbg)
1307{
1308 return -EINVAL;
1309}
1310
1311static void free_kvm(struct kvm *kvm)
1312{
1313 unsigned long vm_base = kvm->arch.vm_base;
1314
1315 if (vm_base) {
1316 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1317 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1318 }
1319
1320}
1321
1322static void kvm_release_vm_pages(struct kvm *kvm)
1323{
1324 struct kvm_memory_slot *memslot;
1325 int i, j;
1326 unsigned long base_gfn;
1327
1328 for (i = 0; i < kvm->nmemslots; i++) {
1329 memslot = &kvm->memslots[i];
1330 base_gfn = memslot->base_gfn;
1331
1332 for (j = 0; j < memslot->npages; j++) {
1333 if (memslot->rmap[j])
1334 put_page((struct page *)memslot->rmap[j]);
1335 }
1336 }
1337}
1338
1339void kvm_arch_destroy_vm(struct kvm *kvm)
1340{
Xiantao Zhang2381ad22008-10-08 08:29:33 +08001341 kvm_iommu_unmap_guest(kvm);
1342#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1343 kvm_free_all_assigned_devices(kvm);
1344#endif
Xiantao Zhangb024b792008-04-01 15:29:29 +08001345 kfree(kvm->arch.vioapic);
1346 kvm_release_vm_pages(kvm);
1347 kvm_free_physmem(kvm);
1348 free_kvm(kvm);
1349}
1350
1351void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1352{
1353}
1354
1355void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1356{
1357 if (cpu != vcpu->cpu) {
1358 vcpu->cpu = cpu;
1359 if (vcpu->arch.ht_active)
1360 kvm_migrate_hlt_timer(vcpu);
1361 }
1362}
1363
1364#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1365
1366int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1367{
Xiantao Zhangb024b792008-04-01 15:29:29 +08001368 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
Jes Sorensen042b26e2008-12-16 16:45:47 +01001369 int i;
1370
Xiantao Zhangb024b792008-04-01 15:29:29 +08001371 vcpu_load(vcpu);
1372
1373 for (i = 0; i < 16; i++) {
1374 regs->vpd.vgr[i] = vpd->vgr[i];
1375 regs->vpd.vbgr[i] = vpd->vbgr[i];
1376 }
1377 for (i = 0; i < 128; i++)
1378 regs->vpd.vcr[i] = vpd->vcr[i];
1379 regs->vpd.vhpi = vpd->vhpi;
1380 regs->vpd.vnat = vpd->vnat;
1381 regs->vpd.vbnat = vpd->vbnat;
1382 regs->vpd.vpsr = vpd->vpsr;
1383 regs->vpd.vpr = vpd->vpr;
1384
Jes Sorensen042b26e2008-12-16 16:45:47 +01001385 memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
1386
Xiantao Zhangb024b792008-04-01 15:29:29 +08001387 SAVE_REGS(mp_state);
1388 SAVE_REGS(vmm_rr);
1389 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1390 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1391 SAVE_REGS(itr_regions);
1392 SAVE_REGS(dtr_regions);
1393 SAVE_REGS(tc_regions);
1394 SAVE_REGS(irq_check);
1395 SAVE_REGS(itc_check);
1396 SAVE_REGS(timer_check);
1397 SAVE_REGS(timer_pending);
1398 SAVE_REGS(last_itc);
1399 for (i = 0; i < 8; i++) {
1400 regs->vrr[i] = vcpu->arch.vrr[i];
1401 regs->ibr[i] = vcpu->arch.ibr[i];
1402 regs->dbr[i] = vcpu->arch.dbr[i];
1403 }
1404 for (i = 0; i < 4; i++)
1405 regs->insvc[i] = vcpu->arch.insvc[i];
1406 regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
1407 SAVE_REGS(xtp);
1408 SAVE_REGS(metaphysical_rr0);
1409 SAVE_REGS(metaphysical_rr4);
1410 SAVE_REGS(metaphysical_saved_rr0);
1411 SAVE_REGS(metaphysical_saved_rr4);
1412 SAVE_REGS(fp_psr);
1413 SAVE_REGS(saved_gp);
Jes Sorensen042b26e2008-12-16 16:45:47 +01001414
Xiantao Zhangb024b792008-04-01 15:29:29 +08001415 vcpu_put(vcpu);
Jes Sorensen042b26e2008-12-16 16:45:47 +01001416 return 0;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001417}
1418
1419void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1420{
1421
1422 hrtimer_cancel(&vcpu->arch.hlt_timer);
1423 kfree(vcpu->arch.apic);
1424}
1425
1426
1427long kvm_arch_vcpu_ioctl(struct file *filp,
1428 unsigned int ioctl, unsigned long arg)
1429{
1430 return -EINVAL;
1431}
1432
1433int kvm_arch_set_memory_region(struct kvm *kvm,
1434 struct kvm_userspace_memory_region *mem,
1435 struct kvm_memory_slot old,
1436 int user_alloc)
1437{
1438 unsigned long i;
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001439 unsigned long pfn;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001440 int npages = mem->memory_size >> PAGE_SHIFT;
1441 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1442 unsigned long base_gfn = memslot->base_gfn;
1443
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001444 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1445 return -ENOMEM;
1446
Xiantao Zhangb024b792008-04-01 15:29:29 +08001447 for (i = 0; i < npages; i++) {
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001448 pfn = gfn_to_pfn(kvm, base_gfn + i);
1449 if (!kvm_is_mmio_pfn(pfn)) {
1450 kvm_set_pmt_entry(kvm, base_gfn + i,
1451 pfn << PAGE_SHIFT,
Xiantao Zhangb010eb52008-09-28 01:39:46 -07001452 _PAGE_AR_RWX | _PAGE_MA_WB);
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001453 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
1454 } else {
1455 kvm_set_pmt_entry(kvm, base_gfn + i,
Xiantao Zhangb010eb52008-09-28 01:39:46 -07001456 GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001457 _PAGE_MA_UC);
1458 memslot->rmap[i] = 0;
1459 }
Xiantao Zhangb024b792008-04-01 15:29:29 +08001460 }
1461
1462 return 0;
1463}
1464
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001465void kvm_arch_flush_shadow(struct kvm *kvm)
1466{
1467}
Xiantao Zhangb024b792008-04-01 15:29:29 +08001468
1469long kvm_arch_dev_ioctl(struct file *filp,
1470 unsigned int ioctl, unsigned long arg)
1471{
1472 return -EINVAL;
1473}
1474
1475void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1476{
1477 kvm_vcpu_uninit(vcpu);
1478}
1479
1480static int vti_cpu_has_kvm_support(void)
1481{
1482 long avail = 1, status = 1, control = 1;
1483 long ret;
1484
1485 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1486 if (ret)
1487 goto out;
1488
1489 if (!(avail & PAL_PROC_VM_BIT))
1490 goto out;
1491
1492 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1493
1494 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1495 if (ret)
1496 goto out;
1497 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1498
1499 if (!(vp_env_info & VP_OPCODE)) {
1500 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1501 "vm_env_info:0x%lx\n", vp_env_info);
1502 }
1503
1504 return 1;
1505out:
1506 return 0;
1507}
1508
1509static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1510 struct module *module)
1511{
1512 unsigned long module_base;
1513 unsigned long vmm_size;
1514
1515 unsigned long vmm_offset, func_offset, fdesc_offset;
1516 struct fdesc *p_fdesc;
1517
1518 BUG_ON(!module);
1519
1520 if (!kvm_vmm_base) {
1521 printk("kvm: kvm area hasn't been initilized yet!!\n");
1522 return -EFAULT;
1523 }
1524
1525 /*Calculate new position of relocated vmm module.*/
1526 module_base = (unsigned long)module->module_core;
1527 vmm_size = module->core_size;
1528 if (unlikely(vmm_size > KVM_VMM_SIZE))
1529 return -EFAULT;
1530
1531 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1532 kvm_flush_icache(kvm_vmm_base, vmm_size);
1533
1534 /*Recalculate kvm_vmm_info based on new VMM*/
1535 vmm_offset = vmm_info->vmm_ivt - module_base;
1536 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1537 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1538 kvm_vmm_info->vmm_ivt);
1539
1540 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1541 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1542 fdesc_offset);
1543 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1544 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1545 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1546 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1547
1548 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1549 KVM_VMM_BASE+func_offset);
1550
1551 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1552 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1553 fdesc_offset);
1554 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1555 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1556 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1557 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1558
1559 kvm_vmm_gp = p_fdesc->gp;
1560
1561 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1562 kvm_vmm_info->vmm_entry);
1563 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1564 KVM_VMM_BASE + func_offset);
1565
1566 return 0;
1567}
1568
1569int kvm_arch_init(void *opaque)
1570{
1571 int r;
1572 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1573
1574 if (!vti_cpu_has_kvm_support()) {
1575 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1576 r = -EOPNOTSUPP;
1577 goto out;
1578 }
1579
1580 if (kvm_vmm_info) {
1581 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1582 r = -EEXIST;
1583 goto out;
1584 }
1585
1586 r = -ENOMEM;
1587 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1588 if (!kvm_vmm_info)
1589 goto out;
1590
1591 if (kvm_alloc_vmm_area())
1592 goto out_free0;
1593
1594 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1595 if (r)
1596 goto out_free1;
1597
1598 return 0;
1599
1600out_free1:
1601 kvm_free_vmm_area();
1602out_free0:
1603 kfree(kvm_vmm_info);
1604out:
1605 return r;
1606}
1607
1608void kvm_arch_exit(void)
1609{
1610 kvm_free_vmm_area();
1611 kfree(kvm_vmm_info);
1612 kvm_vmm_info = NULL;
1613}
1614
1615static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1616 struct kvm_dirty_log *log)
1617{
1618 struct kvm_memory_slot *memslot;
1619 int r, i;
1620 long n, base;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001621 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1622 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
Xiantao Zhangb024b792008-04-01 15:29:29 +08001623
1624 r = -EINVAL;
1625 if (log->slot >= KVM_MEMORY_SLOTS)
1626 goto out;
1627
1628 memslot = &kvm->memslots[log->slot];
1629 r = -ENOENT;
1630 if (!memslot->dirty_bitmap)
1631 goto out;
1632
1633 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1634 base = memslot->base_gfn / BITS_PER_LONG;
1635
1636 for (i = 0; i < n/sizeof(long); ++i) {
1637 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1638 dirty_bitmap[base + i] = 0;
1639 }
1640 r = 0;
1641out:
1642 return r;
1643}
1644
1645int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1646 struct kvm_dirty_log *log)
1647{
1648 int r;
1649 int n;
1650 struct kvm_memory_slot *memslot;
1651 int is_dirty = 0;
1652
1653 spin_lock(&kvm->arch.dirty_log_lock);
1654
1655 r = kvm_ia64_sync_dirty_log(kvm, log);
1656 if (r)
1657 goto out;
1658
1659 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1660 if (r)
1661 goto out;
1662
1663 /* If nothing is dirty, don't bother messing with page tables. */
1664 if (is_dirty) {
1665 kvm_flush_remote_tlbs(kvm);
1666 memslot = &kvm->memslots[log->slot];
1667 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1668 memset(memslot->dirty_bitmap, 0, n);
1669 }
1670 r = 0;
1671out:
1672 spin_unlock(&kvm->arch.dirty_log_lock);
1673 return r;
1674}
1675
1676int kvm_arch_hardware_setup(void)
1677{
1678 return 0;
1679}
1680
1681void kvm_arch_hardware_unsetup(void)
1682{
1683}
1684
1685static void vcpu_kick_intr(void *info)
1686{
1687#ifdef DEBUG
1688 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
1689 printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
1690#endif
1691}
1692
1693void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1694{
1695 int ipi_pcpu = vcpu->cpu;
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001696 int cpu = get_cpu();
Xiantao Zhangb024b792008-04-01 15:29:29 +08001697
1698 if (waitqueue_active(&vcpu->wq))
1699 wake_up_interruptible(&vcpu->wq);
1700
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001701 if (vcpu->guest_mode && cpu != ipi_pcpu)
Takashi Iwai2f73cca2008-07-17 18:09:12 +02001702 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001703 put_cpu();
Xiantao Zhangb024b792008-04-01 15:29:29 +08001704}
1705
1706int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
1707{
1708
1709 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1710
1711 if (!test_and_set_bit(vec, &vpd->irr[0])) {
1712 vcpu->arch.irq_new_pending = 1;
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001713 kvm_vcpu_kick(vcpu);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001714 return 1;
1715 }
1716 return 0;
1717}
1718
1719int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1720{
1721 return apic->vcpu->vcpu_id == dest;
1722}
1723
1724int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1725{
1726 return 0;
1727}
1728
1729struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
1730 unsigned long bitmap)
1731{
1732 struct kvm_vcpu *lvcpu = kvm->vcpus[0];
1733 int i;
1734
1735 for (i = 1; i < KVM_MAX_VCPUS; i++) {
1736 if (!kvm->vcpus[i])
1737 continue;
1738 if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
1739 lvcpu = kvm->vcpus[i];
1740 }
1741
1742 return lvcpu;
1743}
1744
1745static int find_highest_bits(int *dat)
1746{
1747 u32 bits, bitnum;
1748 int i;
1749
1750 /* loop for all 256 bits */
1751 for (i = 7; i >= 0 ; i--) {
1752 bits = dat[i];
1753 if (bits) {
1754 bitnum = fls(bits);
1755 return i * 32 + bitnum - 1;
1756 }
1757 }
1758
1759 return -1;
1760}
1761
1762int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1763{
1764 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1765
1766 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1767 return NMI_VECTOR;
1768 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1769 return ExtINT_VECTOR;
1770
1771 return find_highest_bits((int *)&vpd->irr[0]);
1772}
1773
1774int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1775{
1776 if (kvm_highest_pending_irq(vcpu) != -1)
1777 return 1;
1778 return 0;
1779}
1780
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001781int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1782{
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001783 return vcpu->arch.timer_fired;
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001784}
1785
Xiantao Zhangb024b792008-04-01 15:29:29 +08001786gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1787{
1788 return gfn;
1789}
1790
1791int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1792{
Avi Kivitya4535292008-04-13 17:54:35 +03001793 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001794}
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001795
1796int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1797 struct kvm_mp_state *mp_state)
1798{
Xiantao Zhang8c4b5372008-08-28 09:34:08 +08001799 vcpu_load(vcpu);
1800 mp_state->mp_state = vcpu->arch.mp_state;
1801 vcpu_put(vcpu);
1802 return 0;
1803}
1804
1805static int vcpu_reset(struct kvm_vcpu *vcpu)
1806{
1807 int r;
1808 long psr;
1809 local_irq_save(psr);
1810 r = kvm_insert_vmm_mapping(vcpu);
1811 if (r)
1812 goto fail;
1813
1814 vcpu->arch.launched = 0;
1815 kvm_arch_vcpu_uninit(vcpu);
1816 r = kvm_arch_vcpu_init(vcpu);
1817 if (r)
1818 goto fail;
1819
1820 kvm_purge_vmm_mapping(vcpu);
1821 r = 0;
1822fail:
1823 local_irq_restore(psr);
1824 return r;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001825}
1826
1827int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1828 struct kvm_mp_state *mp_state)
1829{
Xiantao Zhang8c4b5372008-08-28 09:34:08 +08001830 int r = 0;
1831
1832 vcpu_load(vcpu);
1833 vcpu->arch.mp_state = mp_state->mp_state;
1834 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
1835 r = vcpu_reset(vcpu);
1836 vcpu_put(vcpu);
1837 return r;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001838}