blob: f127fb723f2fa76c772cc470f54aed317bfc4453 [file] [log] [blame]
Xiantao Zhangb024b792008-04-01 15:29:29 +08001/*
2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
3 *
4 *
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/percpu.h>
26#include <linux/gfp.h>
27#include <linux/fs.h>
28#include <linux/smp.h>
29#include <linux/kvm_host.h>
30#include <linux/kvm.h>
31#include <linux/bitops.h>
32#include <linux/hrtimer.h>
33#include <linux/uaccess.h>
Joerg Roedel19de40a2008-12-03 14:43:34 +010034#include <linux/iommu.h>
Xiantao Zhang2381ad22008-10-08 08:29:33 +080035#include <linux/intel-iommu.h>
Xiantao Zhangb024b792008-04-01 15:29:29 +080036
37#include <asm/pgtable.h>
38#include <asm/gcc_intrin.h>
39#include <asm/pal.h>
40#include <asm/cacheflush.h>
41#include <asm/div64.h>
42#include <asm/tlb.h>
Jes Sorensen9f726322008-09-12 14:12:08 +020043#include <asm/elf.h>
Jes Sorensen0c72ea72009-02-25 10:38:52 -060044#include <asm/sn/addrs.h>
45#include <asm/sn/clksupport.h>
46#include <asm/sn/shub_mmr.h>
Xiantao Zhangb024b792008-04-01 15:29:29 +080047
48#include "misc.h"
49#include "vti.h"
50#include "iodev.h"
51#include "ioapic.h"
52#include "lapic.h"
Xiantao Zhang2f749772008-09-27 11:46:36 +080053#include "irq.h"
Xiantao Zhangb024b792008-04-01 15:29:29 +080054
55static unsigned long kvm_vmm_base;
56static unsigned long kvm_vsa_base;
57static unsigned long kvm_vm_buffer;
58static unsigned long kvm_vm_buffer_size;
59unsigned long kvm_vmm_gp;
60
61static long vp_env_info;
62
63static struct kvm_vmm_info *kvm_vmm_info;
64
65static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
66
67struct kvm_stats_debugfs_item debugfs_entries[] = {
68 { NULL }
69};
70
Jes Sorensenc6c9fcd2009-02-25 10:38:53 -060071static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
72{
73#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
74 if (vcpu->kvm->arch.is_sn2)
75 return rtc_time();
76 else
77#endif
78 return ia64_getreg(_IA64_REG_AR_ITC);
79}
80
Xiantao Zhangb024b792008-04-01 15:29:29 +080081static void kvm_flush_icache(unsigned long start, unsigned long len)
82{
83 int l;
84
85 for (l = 0; l < (len + 32); l += 32)
Isaku Yamahata71205692009-03-27 15:11:57 +090086 ia64_fc((void *)(start + l));
Xiantao Zhangb024b792008-04-01 15:29:29 +080087
88 ia64_sync_i();
89 ia64_srlz_i();
90}
91
92static void kvm_flush_tlb_all(void)
93{
94 unsigned long i, j, count0, count1, stride0, stride1, addr;
95 long flags;
96
97 addr = local_cpu_data->ptce_base;
98 count0 = local_cpu_data->ptce_count[0];
99 count1 = local_cpu_data->ptce_count[1];
100 stride0 = local_cpu_data->ptce_stride[0];
101 stride1 = local_cpu_data->ptce_stride[1];
102
103 local_irq_save(flags);
104 for (i = 0; i < count0; ++i) {
105 for (j = 0; j < count1; ++j) {
106 ia64_ptce(addr);
107 addr += stride1;
108 }
109 addr += stride0;
110 }
111 local_irq_restore(flags);
112 ia64_srlz_i(); /* srlz.i implies srlz.d */
113}
114
115long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
116{
117 struct ia64_pal_retval iprv;
118
119 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
120 (u64)opt_handler);
121
122 return iprv.status;
123}
124
125static DEFINE_SPINLOCK(vp_lock);
126
127void kvm_arch_hardware_enable(void *garbage)
128{
129 long status;
130 long tmp_base;
131 unsigned long pte;
132 unsigned long saved_psr;
133 int slot;
134
Jes Sorensen0c72ea72009-02-25 10:38:52 -0600135 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
Xiantao Zhangb024b792008-04-01 15:29:29 +0800136 local_irq_save(saved_psr);
137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
Julia Lawallcab7a1e2008-07-22 21:38:18 +0200138 local_irq_restore(saved_psr);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800139 if (slot < 0)
140 return;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800141
142 spin_lock(&vp_lock);
143 status = ia64_pal_vp_init_env(kvm_vsa_base ?
144 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) {
147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
148 return ;
149 }
150
151 if (!kvm_vsa_base) {
152 kvm_vsa_base = tmp_base;
153 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
154 }
155 spin_unlock(&vp_lock);
156 ia64_ptr_entry(0x3, slot);
157}
158
159void kvm_arch_hardware_disable(void *garbage)
160{
161
162 long status;
163 int slot;
164 unsigned long pte;
165 unsigned long saved_psr;
166 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
167
168 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
169 PAGE_KERNEL));
170
171 local_irq_save(saved_psr);
172 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
Julia Lawallcab7a1e2008-07-22 21:38:18 +0200173 local_irq_restore(saved_psr);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800174 if (slot < 0)
175 return;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800176
177 status = ia64_pal_vp_exit_env(host_iva);
178 if (status)
179 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
180 status);
181 ia64_ptr_entry(0x3, slot);
182}
183
184void kvm_arch_check_processor_compat(void *rtn)
185{
186 *(int *)rtn = 0;
187}
188
189int kvm_dev_ioctl_check_extension(long ext)
190{
191
192 int r;
193
194 switch (ext) {
195 case KVM_CAP_IRQCHIP:
Xiantao Zhang8c4b5372008-08-28 09:34:08 +0800196 case KVM_CAP_MP_STATE:
Gleb Natapov49256632009-02-04 17:28:14 +0200197 case KVM_CAP_IRQ_INJECT_STATUS:
Xiantao Zhangb024b792008-04-01 15:29:29 +0800198 r = 1;
199 break;
Laurent Vivier7f39f8a2008-05-30 16:05:57 +0200200 case KVM_CAP_COALESCED_MMIO:
201 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
202 break;
Xiantao Zhang2381ad22008-10-08 08:29:33 +0800203 case KVM_CAP_IOMMU:
Joerg Roedel19de40a2008-12-03 14:43:34 +0100204 r = iommu_found();
Xiantao Zhang2381ad22008-10-08 08:29:33 +0800205 break;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800206 default:
207 r = 0;
208 }
209 return r;
210
211}
212
213static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
Laurent Vivier92760492008-05-30 16:05:53 +0200214 gpa_t addr, int len, int is_write)
Xiantao Zhangb024b792008-04-01 15:29:29 +0800215{
216 struct kvm_io_device *dev;
217
Laurent Vivier92760492008-05-30 16:05:53 +0200218 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800219
220 return dev;
221}
222
223static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
224{
225 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
226 kvm_run->hw.hardware_exit_reason = 1;
227 return 0;
228}
229
230static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
231{
232 struct kvm_mmio_req *p;
233 struct kvm_io_device *mmio_dev;
234
235 p = kvm_get_vcpu_ioreq(vcpu);
236
237 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
238 goto mmio;
239 vcpu->mmio_needed = 1;
240 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
241 vcpu->mmio_size = kvm_run->mmio.len = p->size;
242 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
243
244 if (vcpu->mmio_is_write)
245 memcpy(vcpu->mmio_data, &p->data, p->size);
246 memcpy(kvm_run->mmio.data, &p->data, p->size);
247 kvm_run->exit_reason = KVM_EXIT_MMIO;
248 return 0;
249mmio:
Laurent Vivier92760492008-05-30 16:05:53 +0200250 mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800251 if (mmio_dev) {
252 if (!p->dir)
253 kvm_iodevice_write(mmio_dev, p->addr, p->size,
254 &p->data);
255 else
256 kvm_iodevice_read(mmio_dev, p->addr, p->size,
257 &p->data);
258
259 } else
260 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
261 p->state = STATE_IORESP_READY;
262
263 return 1;
264}
265
266static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
267{
268 struct exit_ctl_data *p;
269
270 p = kvm_get_exit_data(vcpu);
271
272 if (p->exit_reason == EXIT_REASON_PAL_CALL)
273 return kvm_pal_emul(vcpu, kvm_run);
274 else {
275 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
276 kvm_run->hw.hardware_exit_reason = 2;
277 return 0;
278 }
279}
280
281static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
282{
283 struct exit_ctl_data *p;
284
285 p = kvm_get_exit_data(vcpu);
286
287 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
288 kvm_sal_emul(vcpu);
289 return 1;
290 } else {
291 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
292 kvm_run->hw.hardware_exit_reason = 3;
293 return 0;
294 }
295
296}
297
Gleb Natapov58c2dde2009-03-05 16:35:04 +0200298static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
299{
300 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
301
302 if (!test_and_set_bit(vector, &vpd->irr[0])) {
303 vcpu->arch.irq_new_pending = 1;
304 kvm_vcpu_kick(vcpu);
305 return 1;
306 }
307 return 0;
308}
309
Xiantao Zhangb024b792008-04-01 15:29:29 +0800310/*
311 * offset: address offset to IPI space.
312 * value: deliver value.
313 */
314static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
315 uint64_t vector)
316{
317 switch (dm) {
318 case SAPIC_FIXED:
Xiantao Zhangb024b792008-04-01 15:29:29 +0800319 break;
320 case SAPIC_NMI:
Gleb Natapov58c2dde2009-03-05 16:35:04 +0200321 vector = 2;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800322 break;
323 case SAPIC_EXTINT:
Gleb Natapov58c2dde2009-03-05 16:35:04 +0200324 vector = 0;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800325 break;
326 case SAPIC_INIT:
327 case SAPIC_PMI:
328 default:
329 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
Gleb Natapov58c2dde2009-03-05 16:35:04 +0200330 return;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800331 }
Gleb Natapov58c2dde2009-03-05 16:35:04 +0200332 __apic_accept_irq(vcpu, vector);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800333}
334
335static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
336 unsigned long eid)
337{
338 union ia64_lid lid;
339 int i;
340
Jes Sorensen934d5342009-01-21 15:16:43 +0100341 for (i = 0; i < kvm->arch.online_vcpus; i++) {
Xiantao Zhangb024b792008-04-01 15:29:29 +0800342 if (kvm->vcpus[i]) {
343 lid.val = VCPU_LID(kvm->vcpus[i]);
344 if (lid.id == id && lid.eid == eid)
345 return kvm->vcpus[i];
346 }
347 }
348
349 return NULL;
350}
351
352static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
353{
354 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
355 struct kvm_vcpu *target_vcpu;
356 struct kvm_pt_regs *regs;
357 union ia64_ipi_a addr = p->u.ipi_data.addr;
358 union ia64_ipi_d data = p->u.ipi_data.data;
359
360 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
361 if (!target_vcpu)
362 return handle_vm_error(vcpu, kvm_run);
363
364 if (!target_vcpu->arch.launched) {
365 regs = vcpu_regs(target_vcpu);
366
367 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
368 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
369
Avi Kivitya4535292008-04-13 17:54:35 +0300370 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800371 if (waitqueue_active(&target_vcpu->wq))
372 wake_up_interruptible(&target_vcpu->wq);
373 } else {
374 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
375 if (target_vcpu != vcpu)
376 kvm_vcpu_kick(target_vcpu);
377 }
378
379 return 1;
380}
381
382struct call_data {
383 struct kvm_ptc_g ptc_g_data;
384 struct kvm_vcpu *vcpu;
385};
386
387static void vcpu_global_purge(void *info)
388{
389 struct call_data *p = (struct call_data *)info;
390 struct kvm_vcpu *vcpu = p->vcpu;
391
392 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
393 return;
394
395 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
396 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
397 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
398 p->ptc_g_data;
399 } else {
400 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
401 vcpu->arch.ptc_g_count = 0;
402 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
403 }
404}
405
406static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
407{
408 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
409 struct kvm *kvm = vcpu->kvm;
410 struct call_data call_data;
411 int i;
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800412
Xiantao Zhangb024b792008-04-01 15:29:29 +0800413 call_data.ptc_g_data = p->u.ptc_g_data;
414
Jes Sorensen934d5342009-01-21 15:16:43 +0100415 for (i = 0; i < kvm->arch.online_vcpus; i++) {
Xiantao Zhangb024b792008-04-01 15:29:29 +0800416 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
Avi Kivitya4535292008-04-13 17:54:35 +0300417 KVM_MP_STATE_UNINITIALIZED ||
Xiantao Zhangb024b792008-04-01 15:29:29 +0800418 vcpu == kvm->vcpus[i])
419 continue;
420
421 if (waitqueue_active(&kvm->vcpus[i]->wq))
422 wake_up_interruptible(&kvm->vcpus[i]->wq);
423
424 if (kvm->vcpus[i]->cpu != -1) {
425 call_data.vcpu = kvm->vcpus[i];
426 smp_call_function_single(kvm->vcpus[i]->cpu,
Takashi Iwai2f73cca2008-07-17 18:09:12 +0200427 vcpu_global_purge, &call_data, 1);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800428 } else
429 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
430
431 }
432 return 1;
433}
434
435static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
436{
437 return 1;
438}
439
Jes Sorensen0c72ea72009-02-25 10:38:52 -0600440static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
441{
442 unsigned long pte, rtc_phys_addr, map_addr;
443 int slot;
444
445 map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
446 rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
447 pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
448 slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
449 vcpu->arch.sn_rtc_tr_slot = slot;
450 if (slot < 0) {
451 printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
452 slot = 0;
453 }
454 return slot;
455}
456
Xiantao Zhangb024b792008-04-01 15:29:29 +0800457int kvm_emulate_halt(struct kvm_vcpu *vcpu)
458{
459
460 ktime_t kt;
461 long itc_diff;
462 unsigned long vcpu_now_itc;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800463 unsigned long expires;
464 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
465 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
466 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
467
Xiantao Zhangb024b792008-04-01 15:29:29 +0800468 if (irqchip_in_kernel(vcpu->kvm)) {
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800469
Jes Sorensenc6c9fcd2009-02-25 10:38:53 -0600470 vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800471
472 if (time_after(vcpu_now_itc, vpd->itm)) {
473 vcpu->arch.timer_check = 1;
474 return 1;
475 }
476 itc_diff = vpd->itm - vcpu_now_itc;
477 if (itc_diff < 0)
478 itc_diff = -itc_diff;
479
480 expires = div64_u64(itc_diff, cyc_per_usec);
481 kt = ktime_set(0, 1000 * expires);
482
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800483 vcpu->arch.ht_active = 1;
484 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
485
Avi Kivitya4535292008-04-13 17:54:35 +0300486 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800487 kvm_vcpu_block(vcpu);
488 hrtimer_cancel(p_ht);
489 vcpu->arch.ht_active = 0;
490
Gleb Natapov09cec752009-03-23 15:11:44 +0200491 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
492 kvm_cpu_has_pending_timer(vcpu))
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800493 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
Gleb Natapov09cec752009-03-23 15:11:44 +0200494 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800495
Avi Kivitya4535292008-04-13 17:54:35 +0300496 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
Xiantao Zhangb024b792008-04-01 15:29:29 +0800497 return -EINTR;
498 return 1;
499 } else {
500 printk(KERN_ERR"kvm: Unsupported userspace halt!");
501 return 0;
502 }
503}
504
505static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
506 struct kvm_run *kvm_run)
507{
508 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
509 return 0;
510}
511
512static int handle_external_interrupt(struct kvm_vcpu *vcpu,
513 struct kvm_run *kvm_run)
514{
515 return 1;
516}
517
Xiantao Zhang7d637972008-11-21 20:58:11 +0800518static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
519 struct kvm_run *kvm_run)
520{
521 printk("VMM: %s", vcpu->arch.log_buf);
522 return 1;
523}
524
Xiantao Zhangb024b792008-04-01 15:29:29 +0800525static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
526 struct kvm_run *kvm_run) = {
527 [EXIT_REASON_VM_PANIC] = handle_vm_error,
528 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
529 [EXIT_REASON_PAL_CALL] = handle_pal_call,
530 [EXIT_REASON_SAL_CALL] = handle_sal_call,
531 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
532 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
533 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
534 [EXIT_REASON_IPI] = handle_ipi,
535 [EXIT_REASON_PTC_G] = handle_global_purge,
Xiantao Zhang7d637972008-11-21 20:58:11 +0800536 [EXIT_REASON_DEBUG] = handle_vcpu_debug,
Xiantao Zhangb024b792008-04-01 15:29:29 +0800537
538};
539
540static const int kvm_vti_max_exit_handlers =
541 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
542
Xiantao Zhangb024b792008-04-01 15:29:29 +0800543static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
544{
545 struct exit_ctl_data *p_exit_data;
546
547 p_exit_data = kvm_get_exit_data(vcpu);
548 return p_exit_data->exit_reason;
549}
550
551/*
552 * The guest has exited. See if we can fix it or if we need userspace
553 * assistance.
554 */
555static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
556{
557 u32 exit_reason = kvm_get_exit_reason(vcpu);
558 vcpu->arch.last_exit = exit_reason;
559
560 if (exit_reason < kvm_vti_max_exit_handlers
561 && kvm_vti_exit_handlers[exit_reason])
562 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
563 else {
564 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
565 kvm_run->hw.hardware_exit_reason = exit_reason;
566 }
567 return 0;
568}
569
570static inline void vti_set_rr6(unsigned long rr6)
571{
572 ia64_set_rr(RR6, rr6);
573 ia64_srlz_i();
574}
575
576static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
577{
578 unsigned long pte;
579 struct kvm *kvm = vcpu->kvm;
580 int r;
581
582 /*Insert a pair of tr to map vmm*/
583 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
584 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
585 if (r < 0)
586 goto out;
587 vcpu->arch.vmm_tr_slot = r;
588 /*Insert a pairt of tr to map data of vm*/
589 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
590 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
591 pte, KVM_VM_DATA_SHIFT);
592 if (r < 0)
593 goto out;
594 vcpu->arch.vm_tr_slot = r;
Jes Sorensen0c72ea72009-02-25 10:38:52 -0600595
596#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
597 if (kvm->arch.is_sn2) {
598 r = kvm_sn2_setup_mappings(vcpu);
599 if (r < 0)
600 goto out;
601 }
602#endif
603
Xiantao Zhangb024b792008-04-01 15:29:29 +0800604 r = 0;
605out:
606 return r;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800607}
608
609static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
610{
Jes Sorensen0c72ea72009-02-25 10:38:52 -0600611 struct kvm *kvm = vcpu->kvm;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800612 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
613 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
Jes Sorensen0c72ea72009-02-25 10:38:52 -0600614#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
615 if (kvm->arch.is_sn2)
616 ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
617#endif
Xiantao Zhangb024b792008-04-01 15:29:29 +0800618}
619
620static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
621{
622 int cpu = smp_processor_id();
623
624 if (vcpu->arch.last_run_cpu != cpu ||
625 per_cpu(last_vcpu, cpu) != vcpu) {
626 per_cpu(last_vcpu, cpu) = vcpu;
627 vcpu->arch.last_run_cpu = cpu;
628 kvm_flush_tlb_all();
629 }
630
631 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
632 vti_set_rr6(vcpu->arch.vmm_rr);
633 return kvm_insert_vmm_mapping(vcpu);
634}
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200635
Xiantao Zhangb024b792008-04-01 15:29:29 +0800636static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
637{
638 kvm_purge_vmm_mapping(vcpu);
639 vti_set_rr6(vcpu->arch.host_rr6);
640}
641
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200642static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Xiantao Zhangb024b792008-04-01 15:29:29 +0800643{
644 union context *host_ctx, *guest_ctx;
645 int r;
646
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200647 /*
648 * down_read() may sleep and return with interrupts enabled
649 */
650 down_read(&vcpu->kvm->slots_lock);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800651
652again:
Xiantao Zhangb024b792008-04-01 15:29:29 +0800653 if (signal_pending(current)) {
Xiantao Zhangb024b792008-04-01 15:29:29 +0800654 r = -EINTR;
655 kvm_run->exit_reason = KVM_EXIT_INTR;
656 goto out;
657 }
658
Jes Sorensend24d2c12009-04-09 16:38:14 +0200659 preempt_disable();
660 local_irq_disable();
661
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200662 /*Get host and guest context with guest address space.*/
663 host_ctx = kvm_get_host_context(vcpu);
664 guest_ctx = kvm_get_guest_context(vcpu);
665
Xiantao Zhangb024b792008-04-01 15:29:29 +0800666 vcpu->guest_mode = 1;
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200667
668 r = kvm_vcpu_pre_transition(vcpu);
669 if (r < 0)
670 goto vcpu_run_fail;
671
672 up_read(&vcpu->kvm->slots_lock);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800673 kvm_guest_enter();
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200674
675 /*
676 * Transition to the guest
677 */
678 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
679
680 kvm_vcpu_post_transition(vcpu);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800681
682 vcpu->arch.launched = 1;
683 vcpu->guest_mode = 0;
684 local_irq_enable();
685
686 /*
687 * We must have an instruction between local_irq_enable() and
688 * kvm_guest_exit(), so the timer interrupt isn't delayed by
689 * the interrupt shadow. The stat.exits increment will do nicely.
690 * But we need to prevent reordering, hence this barrier():
691 */
692 barrier();
Xiantao Zhangb024b792008-04-01 15:29:29 +0800693 kvm_guest_exit();
Xiantao Zhangb024b792008-04-01 15:29:29 +0800694 preempt_enable();
695
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200696 down_read(&vcpu->kvm->slots_lock);
697
Xiantao Zhangb024b792008-04-01 15:29:29 +0800698 r = kvm_handle_exit(kvm_run, vcpu);
699
700 if (r > 0) {
701 if (!need_resched())
702 goto again;
703 }
704
705out:
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200706 up_read(&vcpu->kvm->slots_lock);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800707 if (r > 0) {
708 kvm_resched(vcpu);
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200709 down_read(&vcpu->kvm->slots_lock);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800710 goto again;
711 }
712
713 return r;
Jes Sorensenc6b60c62009-04-16 10:43:48 +0200714
715vcpu_run_fail:
716 local_irq_enable();
717 preempt_enable();
718 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
719 goto out;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800720}
721
722static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
723{
724 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
725
726 if (!vcpu->mmio_is_write)
727 memcpy(&p->data, vcpu->mmio_data, 8);
728 p->state = STATE_IORESP_READY;
729}
730
731int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
732{
733 int r;
734 sigset_t sigsaved;
735
736 vcpu_load(vcpu);
737
Xiantao Zhanga2e4e282008-10-23 15:02:52 +0800738 if (vcpu->sigset_active)
739 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
740
Avi Kivitya4535292008-04-13 17:54:35 +0300741 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
Xiantao Zhangb024b792008-04-01 15:29:29 +0800742 kvm_vcpu_block(vcpu);
Xiantao Zhangdecc9012008-10-16 15:58:15 +0800743 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Xiantao Zhanga2e4e282008-10-23 15:02:52 +0800744 r = -EAGAIN;
745 goto out;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800746 }
747
Xiantao Zhangb024b792008-04-01 15:29:29 +0800748 if (vcpu->mmio_needed) {
749 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
750 kvm_set_mmio_data(vcpu);
751 vcpu->mmio_read_completed = 1;
752 vcpu->mmio_needed = 0;
753 }
754 r = __vcpu_run(vcpu, kvm_run);
Xiantao Zhanga2e4e282008-10-23 15:02:52 +0800755out:
Xiantao Zhangb024b792008-04-01 15:29:29 +0800756 if (vcpu->sigset_active)
757 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
758
759 vcpu_put(vcpu);
760 return r;
761}
762
Xiantao Zhangb024b792008-04-01 15:29:29 +0800763static struct kvm *kvm_alloc_kvm(void)
764{
765
766 struct kvm *kvm;
767 uint64_t vm_base;
768
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800769 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
770
Xiantao Zhangb024b792008-04-01 15:29:29 +0800771 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
772
773 if (!vm_base)
774 return ERR_PTR(-ENOMEM);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800775
Xiantao Zhangb024b792008-04-01 15:29:29 +0800776 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800777 kvm = (struct kvm *)(vm_base +
778 offsetof(struct kvm_vm_data, kvm_vm_struct));
Xiantao Zhangb024b792008-04-01 15:29:29 +0800779 kvm->arch.vm_base = vm_base;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +0800780 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800781
782 return kvm;
783}
784
785struct kvm_io_range {
786 unsigned long start;
787 unsigned long size;
788 unsigned long type;
789};
790
791static const struct kvm_io_range io_ranges[] = {
792 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
793 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
794 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
795 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
796 {PIB_START, PIB_SIZE, GPFN_PIB},
797};
798
799static void kvm_build_io_pmt(struct kvm *kvm)
800{
801 unsigned long i, j;
802
803 /* Mark I/O ranges */
804 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
805 i++) {
806 for (j = io_ranges[i].start;
807 j < io_ranges[i].start + io_ranges[i].size;
808 j += PAGE_SIZE)
809 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
810 io_ranges[i].type, 0);
811 }
812
813}
814
815/*Use unused rids to virtualize guest rid.*/
816#define GUEST_PHYSICAL_RR0 0x1739
817#define GUEST_PHYSICAL_RR4 0x2739
818#define VMM_INIT_RR 0x1660
819
820static void kvm_init_vm(struct kvm *kvm)
821{
Xiantao Zhangb024b792008-04-01 15:29:29 +0800822 BUG_ON(!kvm);
823
824 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
825 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
826 kvm->arch.vmm_init_rr = VMM_INIT_RR;
827
Xiantao Zhangb024b792008-04-01 15:29:29 +0800828 /*
829 *Fill P2M entries for MMIO/IO ranges
830 */
831 kvm_build_io_pmt(kvm);
832
Xiantao Zhang2381ad22008-10-08 08:29:33 +0800833 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
Sheng Yang5550af42008-10-15 20:15:06 +0800834
835 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
836 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800837}
838
839struct kvm *kvm_arch_create_vm(void)
840{
841 struct kvm *kvm = kvm_alloc_kvm();
842
843 if (IS_ERR(kvm))
844 return ERR_PTR(-ENOMEM);
Jes Sorensen0c72ea72009-02-25 10:38:52 -0600845
846 kvm->arch.is_sn2 = ia64_platform_is("sn2");
847
Xiantao Zhangb024b792008-04-01 15:29:29 +0800848 kvm_init_vm(kvm);
849
Jes Sorensen934d5342009-01-21 15:16:43 +0100850 kvm->arch.online_vcpus = 0;
851
Xiantao Zhangb024b792008-04-01 15:29:29 +0800852 return kvm;
853
854}
855
856static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
857 struct kvm_irqchip *chip)
858{
859 int r;
860
861 r = 0;
862 switch (chip->chip_id) {
863 case KVM_IRQCHIP_IOAPIC:
864 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
865 sizeof(struct kvm_ioapic_state));
866 break;
867 default:
868 r = -EINVAL;
869 break;
870 }
871 return r;
872}
873
874static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
875{
876 int r;
877
878 r = 0;
879 switch (chip->chip_id) {
880 case KVM_IRQCHIP_IOAPIC:
881 memcpy(ioapic_irqchip(kvm),
882 &chip->chip.ioapic,
883 sizeof(struct kvm_ioapic_state));
884 break;
885 default:
886 r = -EINVAL;
887 break;
888 }
889 return r;
890}
891
892#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
893
894int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
895{
Xiantao Zhangb024b792008-04-01 15:29:29 +0800896 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
Jes Sorensen042b26e2008-12-16 16:45:47 +0100897 int i;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800898
899 vcpu_load(vcpu);
900
901 for (i = 0; i < 16; i++) {
902 vpd->vgr[i] = regs->vpd.vgr[i];
903 vpd->vbgr[i] = regs->vpd.vbgr[i];
904 }
905 for (i = 0; i < 128; i++)
906 vpd->vcr[i] = regs->vpd.vcr[i];
907 vpd->vhpi = regs->vpd.vhpi;
908 vpd->vnat = regs->vpd.vnat;
909 vpd->vbnat = regs->vpd.vbnat;
910 vpd->vpsr = regs->vpd.vpsr;
911
912 vpd->vpr = regs->vpd.vpr;
913
Jes Sorensen042b26e2008-12-16 16:45:47 +0100914 memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
Xiantao Zhangb024b792008-04-01 15:29:29 +0800915
916 RESTORE_REGS(mp_state);
917 RESTORE_REGS(vmm_rr);
918 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
919 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
920 RESTORE_REGS(itr_regions);
921 RESTORE_REGS(dtr_regions);
922 RESTORE_REGS(tc_regions);
923 RESTORE_REGS(irq_check);
924 RESTORE_REGS(itc_check);
925 RESTORE_REGS(timer_check);
926 RESTORE_REGS(timer_pending);
927 RESTORE_REGS(last_itc);
928 for (i = 0; i < 8; i++) {
929 vcpu->arch.vrr[i] = regs->vrr[i];
930 vcpu->arch.ibr[i] = regs->ibr[i];
931 vcpu->arch.dbr[i] = regs->dbr[i];
932 }
933 for (i = 0; i < 4; i++)
934 vcpu->arch.insvc[i] = regs->insvc[i];
935 RESTORE_REGS(xtp);
936 RESTORE_REGS(metaphysical_rr0);
937 RESTORE_REGS(metaphysical_rr4);
938 RESTORE_REGS(metaphysical_saved_rr0);
939 RESTORE_REGS(metaphysical_saved_rr4);
940 RESTORE_REGS(fp_psr);
941 RESTORE_REGS(saved_gp);
942
943 vcpu->arch.irq_new_pending = 1;
Jes Sorensenc6c9fcd2009-02-25 10:38:53 -0600944 vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
Xiantao Zhangb024b792008-04-01 15:29:29 +0800945 set_bit(KVM_REQ_RESUME, &vcpu->requests);
946
947 vcpu_put(vcpu);
Jes Sorensen042b26e2008-12-16 16:45:47 +0100948
949 return 0;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800950}
951
952long kvm_arch_vm_ioctl(struct file *filp,
953 unsigned int ioctl, unsigned long arg)
954{
955 struct kvm *kvm = filp->private_data;
956 void __user *argp = (void __user *)arg;
957 int r = -EINVAL;
958
959 switch (ioctl) {
960 case KVM_SET_MEMORY_REGION: {
961 struct kvm_memory_region kvm_mem;
962 struct kvm_userspace_memory_region kvm_userspace_mem;
963
964 r = -EFAULT;
965 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
966 goto out;
967 kvm_userspace_mem.slot = kvm_mem.slot;
968 kvm_userspace_mem.flags = kvm_mem.flags;
969 kvm_userspace_mem.guest_phys_addr =
970 kvm_mem.guest_phys_addr;
971 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
972 r = kvm_vm_ioctl_set_memory_region(kvm,
973 &kvm_userspace_mem, 0);
974 if (r)
975 goto out;
976 break;
977 }
978 case KVM_CREATE_IRQCHIP:
979 r = -EFAULT;
980 r = kvm_ioapic_init(kvm);
981 if (r)
982 goto out;
Avi Kivity399ec802008-11-19 13:58:46 +0200983 r = kvm_setup_default_irq_routing(kvm);
984 if (r) {
985 kfree(kvm->arch.vioapic);
986 goto out;
987 }
Xiantao Zhangb024b792008-04-01 15:29:29 +0800988 break;
Gleb Natapov49256632009-02-04 17:28:14 +0200989 case KVM_IRQ_LINE_STATUS:
Xiantao Zhangb024b792008-04-01 15:29:29 +0800990 case KVM_IRQ_LINE: {
991 struct kvm_irq_level irq_event;
992
993 r = -EFAULT;
994 if (copy_from_user(&irq_event, argp, sizeof irq_event))
995 goto out;
996 if (irqchip_in_kernel(kvm)) {
Gleb Natapov49256632009-02-04 17:28:14 +0200997 __s32 status;
Xiantao Zhangb024b792008-04-01 15:29:29 +0800998 mutex_lock(&kvm->lock);
Gleb Natapov49256632009-02-04 17:28:14 +0200999 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
Sheng Yang5550af42008-10-15 20:15:06 +08001000 irq_event.irq, irq_event.level);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001001 mutex_unlock(&kvm->lock);
Gleb Natapov49256632009-02-04 17:28:14 +02001002 if (ioctl == KVM_IRQ_LINE_STATUS) {
1003 irq_event.status = status;
1004 if (copy_to_user(argp, &irq_event,
1005 sizeof irq_event))
1006 goto out;
1007 }
Xiantao Zhangb024b792008-04-01 15:29:29 +08001008 r = 0;
1009 }
1010 break;
1011 }
1012 case KVM_GET_IRQCHIP: {
1013 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1014 struct kvm_irqchip chip;
1015
1016 r = -EFAULT;
1017 if (copy_from_user(&chip, argp, sizeof chip))
1018 goto out;
1019 r = -ENXIO;
1020 if (!irqchip_in_kernel(kvm))
1021 goto out;
1022 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1023 if (r)
1024 goto out;
1025 r = -EFAULT;
1026 if (copy_to_user(argp, &chip, sizeof chip))
1027 goto out;
1028 r = 0;
1029 break;
1030 }
1031 case KVM_SET_IRQCHIP: {
1032 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1033 struct kvm_irqchip chip;
1034
1035 r = -EFAULT;
1036 if (copy_from_user(&chip, argp, sizeof chip))
1037 goto out;
1038 r = -ENXIO;
1039 if (!irqchip_in_kernel(kvm))
1040 goto out;
1041 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1042 if (r)
1043 goto out;
1044 r = 0;
1045 break;
1046 }
1047 default:
1048 ;
1049 }
1050out:
1051 return r;
1052}
1053
1054int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1055 struct kvm_sregs *sregs)
1056{
1057 return -EINVAL;
1058}
1059
1060int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1061 struct kvm_sregs *sregs)
1062{
1063 return -EINVAL;
1064
1065}
1066int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1067 struct kvm_translation *tr)
1068{
1069
1070 return -EINVAL;
1071}
1072
1073static int kvm_alloc_vmm_area(void)
1074{
1075 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1076 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1077 get_order(KVM_VMM_SIZE));
1078 if (!kvm_vmm_base)
1079 return -ENOMEM;
1080
1081 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1082 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1083
1084 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1085 kvm_vmm_base, kvm_vm_buffer);
1086 }
1087
1088 return 0;
1089}
1090
1091static void kvm_free_vmm_area(void)
1092{
1093 if (kvm_vmm_base) {
1094 /*Zero this area before free to avoid bits leak!!*/
1095 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1096 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1097 kvm_vmm_base = 0;
1098 kvm_vm_buffer = 0;
1099 kvm_vsa_base = 0;
1100 }
1101}
1102
Xiantao Zhangb024b792008-04-01 15:29:29 +08001103static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1104{
1105}
1106
1107static int vti_init_vpd(struct kvm_vcpu *vcpu)
1108{
1109 int i;
1110 union cpuid3_t cpuid3;
1111 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1112
1113 if (IS_ERR(vpd))
1114 return PTR_ERR(vpd);
1115
1116 /* CPUID init */
1117 for (i = 0; i < 5; i++)
1118 vpd->vcpuid[i] = ia64_get_cpuid(i);
1119
1120 /* Limit the CPUID number to 5 */
1121 cpuid3.value = vpd->vcpuid[3];
1122 cpuid3.number = 4; /* 5 - 1 */
1123 vpd->vcpuid[3] = cpuid3.value;
1124
1125 /*Set vac and vdc fields*/
1126 vpd->vac.a_from_int_cr = 1;
1127 vpd->vac.a_to_int_cr = 1;
1128 vpd->vac.a_from_psr = 1;
1129 vpd->vac.a_from_cpuid = 1;
1130 vpd->vac.a_cover = 1;
1131 vpd->vac.a_bsw = 1;
1132 vpd->vac.a_int = 1;
1133 vpd->vdc.d_vmsw = 1;
1134
1135 /*Set virtual buffer*/
1136 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1137
1138 return 0;
1139}
1140
1141static int vti_create_vp(struct kvm_vcpu *vcpu)
1142{
1143 long ret;
1144 struct vpd *vpd = vcpu->arch.vpd;
1145 unsigned long vmm_ivt;
1146
1147 vmm_ivt = kvm_vmm_info->vmm_ivt;
1148
1149 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1150
1151 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1152
1153 if (ret) {
1154 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1155 return -EINVAL;
1156 }
1157 return 0;
1158}
1159
1160static void init_ptce_info(struct kvm_vcpu *vcpu)
1161{
1162 ia64_ptce_info_t ptce = {0};
1163
1164 ia64_get_ptce(&ptce);
1165 vcpu->arch.ptce_base = ptce.base;
1166 vcpu->arch.ptce_count[0] = ptce.count[0];
1167 vcpu->arch.ptce_count[1] = ptce.count[1];
1168 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1169 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1170}
1171
1172static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1173{
1174 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1175
1176 if (hrtimer_cancel(p_ht))
Arjan van de Ven18dd36a2008-09-01 15:19:11 -07001177 hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001178}
1179
1180static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1181{
1182 struct kvm_vcpu *vcpu;
1183 wait_queue_head_t *q;
1184
1185 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001186 q = &vcpu->wq;
1187
Avi Kivitya4535292008-04-13 17:54:35 +03001188 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
Xiantao Zhangb024b792008-04-01 15:29:29 +08001189 goto out;
1190
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001191 if (waitqueue_active(q))
Xiantao Zhangb024b792008-04-01 15:29:29 +08001192 wake_up_interruptible(q);
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001193
Xiantao Zhangb024b792008-04-01 15:29:29 +08001194out:
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001195 vcpu->arch.timer_fired = 1;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001196 vcpu->arch.timer_check = 1;
1197 return HRTIMER_NORESTART;
1198}
1199
1200#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1201
1202int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1203{
1204 struct kvm_vcpu *v;
1205 int r;
1206 int i;
1207 long itc_offset;
1208 struct kvm *kvm = vcpu->kvm;
1209 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1210
1211 union context *p_ctx = &vcpu->arch.guest;
1212 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1213
1214 /*Init vcpu context for first run.*/
1215 if (IS_ERR(vmm_vcpu))
1216 return PTR_ERR(vmm_vcpu);
1217
1218 if (vcpu->vcpu_id == 0) {
Avi Kivitya4535292008-04-13 17:54:35 +03001219 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001220
1221 /*Set entry address for first run.*/
1222 regs->cr_iip = PALE_RESET_ENTRY;
1223
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001224 /*Initialize itc offset for vcpus*/
Jes Sorensenc6c9fcd2009-02-25 10:38:53 -06001225 itc_offset = 0UL - kvm_get_itc(vcpu);
Jes Sorensen934d5342009-01-21 15:16:43 +01001226 for (i = 0; i < kvm->arch.online_vcpus; i++) {
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001227 v = (struct kvm_vcpu *)((char *)vcpu +
1228 sizeof(struct kvm_vcpu_data) * i);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001229 v->arch.itc_offset = itc_offset;
1230 v->arch.last_itc = 0;
1231 }
1232 } else
Avi Kivitya4535292008-04-13 17:54:35 +03001233 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001234
1235 r = -ENOMEM;
1236 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1237 if (!vcpu->arch.apic)
1238 goto out;
1239 vcpu->arch.apic->vcpu = vcpu;
1240
1241 p_ctx->gr[1] = 0;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001242 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001243 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1244 p_ctx->psr = 0x1008522000UL;
1245 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1246 p_ctx->caller_unat = 0;
1247 p_ctx->pr = 0x0;
1248 p_ctx->ar[36] = 0x0; /*unat*/
1249 p_ctx->ar[19] = 0x0; /*rnat*/
1250 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1251 ((sizeof(struct kvm_vcpu)+15) & ~15);
1252 p_ctx->ar[64] = 0x0; /*pfs*/
1253 p_ctx->cr[0] = 0x7e04UL;
1254 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1255 p_ctx->cr[8] = 0x3c;
1256
1257 /*Initilize region register*/
1258 p_ctx->rr[0] = 0x30;
1259 p_ctx->rr[1] = 0x30;
1260 p_ctx->rr[2] = 0x30;
1261 p_ctx->rr[3] = 0x30;
1262 p_ctx->rr[4] = 0x30;
1263 p_ctx->rr[5] = 0x30;
1264 p_ctx->rr[7] = 0x30;
1265
1266 /*Initilize branch register 0*/
1267 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1268
1269 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1270 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1271 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1272
1273 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1274 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1275
1276 vcpu->arch.last_run_cpu = -1;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001277 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001278 vcpu->arch.vsa_base = kvm_vsa_base;
1279 vcpu->arch.__gp = kvm_vmm_gp;
1280 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001281 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1282 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001283 init_ptce_info(vcpu);
1284
1285 r = 0;
1286out:
1287 return r;
1288}
1289
1290static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1291{
1292 unsigned long psr;
1293 int r;
1294
1295 local_irq_save(psr);
1296 r = kvm_insert_vmm_mapping(vcpu);
1297 if (r)
1298 goto fail;
1299 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1300 if (r)
1301 goto fail;
1302
1303 r = vti_init_vpd(vcpu);
1304 if (r) {
1305 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1306 goto uninit;
1307 }
1308
1309 r = vti_create_vp(vcpu);
1310 if (r)
1311 goto uninit;
1312
1313 kvm_purge_vmm_mapping(vcpu);
1314 local_irq_restore(psr);
1315
1316 return 0;
1317uninit:
1318 kvm_vcpu_uninit(vcpu);
1319fail:
Julia Lawallcab7a1e2008-07-22 21:38:18 +02001320 local_irq_restore(psr);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001321 return r;
1322}
1323
1324struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1325 unsigned int id)
1326{
1327 struct kvm_vcpu *vcpu;
1328 unsigned long vm_base = kvm->arch.vm_base;
1329 int r;
1330 int cpu;
1331
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001332 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1333
1334 r = -EINVAL;
1335 if (id >= KVM_MAX_VCPUS) {
1336 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1337 KVM_MAX_VCPUS);
1338 goto fail;
1339 }
1340
Xiantao Zhangb024b792008-04-01 15:29:29 +08001341 r = -ENOMEM;
1342 if (!vm_base) {
1343 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1344 goto fail;
1345 }
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001346 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1347 vcpu_data[id].vcpu_struct));
Xiantao Zhangb024b792008-04-01 15:29:29 +08001348 vcpu->kvm = kvm;
1349
1350 cpu = get_cpu();
1351 vti_vcpu_load(vcpu, cpu);
1352 r = vti_vcpu_setup(vcpu, id);
1353 put_cpu();
1354
1355 if (r) {
1356 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1357 goto fail;
1358 }
1359
Jes Sorensen934d5342009-01-21 15:16:43 +01001360 kvm->arch.online_vcpus++;
1361
Xiantao Zhangb024b792008-04-01 15:29:29 +08001362 return vcpu;
1363fail:
1364 return ERR_PTR(r);
1365}
1366
1367int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1368{
1369 return 0;
1370}
1371
1372int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1373{
1374 return -EINVAL;
1375}
1376
1377int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1378{
1379 return -EINVAL;
1380}
1381
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001382int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1383 struct kvm_guest_debug *dbg)
Xiantao Zhangb024b792008-04-01 15:29:29 +08001384{
1385 return -EINVAL;
1386}
1387
1388static void free_kvm(struct kvm *kvm)
1389{
1390 unsigned long vm_base = kvm->arch.vm_base;
1391
1392 if (vm_base) {
1393 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1394 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1395 }
1396
1397}
1398
1399static void kvm_release_vm_pages(struct kvm *kvm)
1400{
1401 struct kvm_memory_slot *memslot;
1402 int i, j;
1403 unsigned long base_gfn;
1404
1405 for (i = 0; i < kvm->nmemslots; i++) {
1406 memslot = &kvm->memslots[i];
1407 base_gfn = memslot->base_gfn;
1408
1409 for (j = 0; j < memslot->npages; j++) {
1410 if (memslot->rmap[j])
1411 put_page((struct page *)memslot->rmap[j]);
1412 }
1413 }
1414}
1415
Sheng Yangad8ba2c2009-01-06 10:03:02 +08001416void kvm_arch_sync_events(struct kvm *kvm)
1417{
1418}
1419
Xiantao Zhangb024b792008-04-01 15:29:29 +08001420void kvm_arch_destroy_vm(struct kvm *kvm)
1421{
Xiantao Zhang2381ad22008-10-08 08:29:33 +08001422 kvm_iommu_unmap_guest(kvm);
1423#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1424 kvm_free_all_assigned_devices(kvm);
1425#endif
Xiantao Zhangb024b792008-04-01 15:29:29 +08001426 kfree(kvm->arch.vioapic);
1427 kvm_release_vm_pages(kvm);
1428 kvm_free_physmem(kvm);
1429 free_kvm(kvm);
1430}
1431
1432void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1433{
1434}
1435
1436void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1437{
1438 if (cpu != vcpu->cpu) {
1439 vcpu->cpu = cpu;
1440 if (vcpu->arch.ht_active)
1441 kvm_migrate_hlt_timer(vcpu);
1442 }
1443}
1444
1445#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1446
1447int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1448{
Xiantao Zhangb024b792008-04-01 15:29:29 +08001449 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
Jes Sorensen042b26e2008-12-16 16:45:47 +01001450 int i;
1451
Xiantao Zhangb024b792008-04-01 15:29:29 +08001452 vcpu_load(vcpu);
1453
1454 for (i = 0; i < 16; i++) {
1455 regs->vpd.vgr[i] = vpd->vgr[i];
1456 regs->vpd.vbgr[i] = vpd->vbgr[i];
1457 }
1458 for (i = 0; i < 128; i++)
1459 regs->vpd.vcr[i] = vpd->vcr[i];
1460 regs->vpd.vhpi = vpd->vhpi;
1461 regs->vpd.vnat = vpd->vnat;
1462 regs->vpd.vbnat = vpd->vbnat;
1463 regs->vpd.vpsr = vpd->vpsr;
1464 regs->vpd.vpr = vpd->vpr;
1465
Jes Sorensen042b26e2008-12-16 16:45:47 +01001466 memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
1467
Xiantao Zhangb024b792008-04-01 15:29:29 +08001468 SAVE_REGS(mp_state);
1469 SAVE_REGS(vmm_rr);
1470 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1471 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1472 SAVE_REGS(itr_regions);
1473 SAVE_REGS(dtr_regions);
1474 SAVE_REGS(tc_regions);
1475 SAVE_REGS(irq_check);
1476 SAVE_REGS(itc_check);
1477 SAVE_REGS(timer_check);
1478 SAVE_REGS(timer_pending);
1479 SAVE_REGS(last_itc);
1480 for (i = 0; i < 8; i++) {
1481 regs->vrr[i] = vcpu->arch.vrr[i];
1482 regs->ibr[i] = vcpu->arch.ibr[i];
1483 regs->dbr[i] = vcpu->arch.dbr[i];
1484 }
1485 for (i = 0; i < 4; i++)
1486 regs->insvc[i] = vcpu->arch.insvc[i];
Jes Sorensenc6c9fcd2009-02-25 10:38:53 -06001487 regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001488 SAVE_REGS(xtp);
1489 SAVE_REGS(metaphysical_rr0);
1490 SAVE_REGS(metaphysical_rr4);
1491 SAVE_REGS(metaphysical_saved_rr0);
1492 SAVE_REGS(metaphysical_saved_rr4);
1493 SAVE_REGS(fp_psr);
1494 SAVE_REGS(saved_gp);
Jes Sorensen042b26e2008-12-16 16:45:47 +01001495
Xiantao Zhangb024b792008-04-01 15:29:29 +08001496 vcpu_put(vcpu);
Jes Sorensen042b26e2008-12-16 16:45:47 +01001497 return 0;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001498}
1499
Jes Sorensene9a999f2008-12-18 12:17:51 +01001500int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
1501 struct kvm_ia64_vcpu_stack *stack)
1502{
1503 memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
1504 return 0;
1505}
1506
1507int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
1508 struct kvm_ia64_vcpu_stack *stack)
1509{
1510 memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
1511 sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
1512
1513 vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
1514 return 0;
1515}
1516
Xiantao Zhangb024b792008-04-01 15:29:29 +08001517void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1518{
1519
1520 hrtimer_cancel(&vcpu->arch.hlt_timer);
1521 kfree(vcpu->arch.apic);
1522}
1523
1524
1525long kvm_arch_vcpu_ioctl(struct file *filp,
Jes Sorensene9a999f2008-12-18 12:17:51 +01001526 unsigned int ioctl, unsigned long arg)
Xiantao Zhangb024b792008-04-01 15:29:29 +08001527{
Jes Sorensene9a999f2008-12-18 12:17:51 +01001528 struct kvm_vcpu *vcpu = filp->private_data;
1529 void __user *argp = (void __user *)arg;
1530 struct kvm_ia64_vcpu_stack *stack = NULL;
1531 long r;
1532
1533 switch (ioctl) {
1534 case KVM_IA64_VCPU_GET_STACK: {
1535 struct kvm_ia64_vcpu_stack __user *user_stack;
1536 void __user *first_p = argp;
1537
1538 r = -EFAULT;
1539 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1540 goto out;
1541
1542 if (!access_ok(VERIFY_WRITE, user_stack,
1543 sizeof(struct kvm_ia64_vcpu_stack))) {
1544 printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
1545 "Illegal user destination address for stack\n");
1546 goto out;
1547 }
1548 stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1549 if (!stack) {
1550 r = -ENOMEM;
1551 goto out;
1552 }
1553
1554 r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
1555 if (r)
1556 goto out;
1557
1558 if (copy_to_user(user_stack, stack,
1559 sizeof(struct kvm_ia64_vcpu_stack)))
1560 goto out;
1561
1562 break;
1563 }
1564 case KVM_IA64_VCPU_SET_STACK: {
1565 struct kvm_ia64_vcpu_stack __user *user_stack;
1566 void __user *first_p = argp;
1567
1568 r = -EFAULT;
1569 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1570 goto out;
1571
1572 if (!access_ok(VERIFY_READ, user_stack,
1573 sizeof(struct kvm_ia64_vcpu_stack))) {
1574 printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
1575 "Illegal user address for stack\n");
1576 goto out;
1577 }
1578 stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1579 if (!stack) {
1580 r = -ENOMEM;
1581 goto out;
1582 }
1583 if (copy_from_user(stack, user_stack,
1584 sizeof(struct kvm_ia64_vcpu_stack)))
1585 goto out;
1586
1587 r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
1588 break;
1589 }
1590
1591 default:
1592 r = -EINVAL;
1593 }
1594
1595out:
1596 kfree(stack);
1597 return r;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001598}
1599
1600int kvm_arch_set_memory_region(struct kvm *kvm,
1601 struct kvm_userspace_memory_region *mem,
1602 struct kvm_memory_slot old,
1603 int user_alloc)
1604{
1605 unsigned long i;
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001606 unsigned long pfn;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001607 int npages = mem->memory_size >> PAGE_SHIFT;
1608 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1609 unsigned long base_gfn = memslot->base_gfn;
1610
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001611 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1612 return -ENOMEM;
1613
Xiantao Zhangb024b792008-04-01 15:29:29 +08001614 for (i = 0; i < npages; i++) {
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001615 pfn = gfn_to_pfn(kvm, base_gfn + i);
1616 if (!kvm_is_mmio_pfn(pfn)) {
1617 kvm_set_pmt_entry(kvm, base_gfn + i,
1618 pfn << PAGE_SHIFT,
Xiantao Zhangb010eb52008-09-28 01:39:46 -07001619 _PAGE_AR_RWX | _PAGE_MA_WB);
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001620 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
1621 } else {
1622 kvm_set_pmt_entry(kvm, base_gfn + i,
Xiantao Zhangb010eb52008-09-28 01:39:46 -07001623 GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
Xiantao Zhang1cbea802008-10-03 14:58:09 +08001624 _PAGE_MA_UC);
1625 memslot->rmap[i] = 0;
1626 }
Xiantao Zhangb024b792008-04-01 15:29:29 +08001627 }
1628
1629 return 0;
1630}
1631
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001632void kvm_arch_flush_shadow(struct kvm *kvm)
1633{
1634}
Xiantao Zhangb024b792008-04-01 15:29:29 +08001635
1636long kvm_arch_dev_ioctl(struct file *filp,
Jes Sorensene9a999f2008-12-18 12:17:51 +01001637 unsigned int ioctl, unsigned long arg)
Xiantao Zhangb024b792008-04-01 15:29:29 +08001638{
1639 return -EINVAL;
1640}
1641
1642void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1643{
1644 kvm_vcpu_uninit(vcpu);
1645}
1646
1647static int vti_cpu_has_kvm_support(void)
1648{
1649 long avail = 1, status = 1, control = 1;
1650 long ret;
1651
1652 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1653 if (ret)
1654 goto out;
1655
1656 if (!(avail & PAL_PROC_VM_BIT))
1657 goto out;
1658
1659 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1660
1661 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1662 if (ret)
1663 goto out;
1664 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1665
1666 if (!(vp_env_info & VP_OPCODE)) {
1667 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1668 "vm_env_info:0x%lx\n", vp_env_info);
1669 }
1670
1671 return 1;
1672out:
1673 return 0;
1674}
1675
Jes Sorensen0b5d7a22009-02-25 10:38:55 -06001676
1677/*
1678 * On SN2, the ITC isn't stable, so copy in fast path code to use the
1679 * SN2 RTC, replacing the ITC based default verion.
1680 */
1681static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
1682 struct module *module)
1683{
1684 unsigned long new_ar, new_ar_sn2;
1685 unsigned long module_base;
1686
1687 if (!ia64_platform_is("sn2"))
1688 return;
1689
1690 module_base = (unsigned long)module->module_core;
1691
1692 new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
1693 new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
1694
1695 printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
1696 "as source\n");
1697
1698 /*
1699 * Copy the SN2 version of mov_ar into place. They are both
1700 * the same size, so 6 bundles is sufficient (6 * 0x10).
1701 */
1702 memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
1703}
1704
Xiantao Zhangb024b792008-04-01 15:29:29 +08001705static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
Jes Sorensen0b5d7a22009-02-25 10:38:55 -06001706 struct module *module)
Xiantao Zhangb024b792008-04-01 15:29:29 +08001707{
1708 unsigned long module_base;
1709 unsigned long vmm_size;
1710
1711 unsigned long vmm_offset, func_offset, fdesc_offset;
1712 struct fdesc *p_fdesc;
1713
1714 BUG_ON(!module);
1715
1716 if (!kvm_vmm_base) {
1717 printk("kvm: kvm area hasn't been initilized yet!!\n");
1718 return -EFAULT;
1719 }
1720
1721 /*Calculate new position of relocated vmm module.*/
1722 module_base = (unsigned long)module->module_core;
1723 vmm_size = module->core_size;
1724 if (unlikely(vmm_size > KVM_VMM_SIZE))
1725 return -EFAULT;
1726
1727 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
Jes Sorensen0b5d7a22009-02-25 10:38:55 -06001728 kvm_patch_vmm(vmm_info, module);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001729 kvm_flush_icache(kvm_vmm_base, vmm_size);
1730
1731 /*Recalculate kvm_vmm_info based on new VMM*/
1732 vmm_offset = vmm_info->vmm_ivt - module_base;
1733 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1734 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1735 kvm_vmm_info->vmm_ivt);
1736
1737 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1738 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1739 fdesc_offset);
1740 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1741 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1742 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1743 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1744
1745 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1746 KVM_VMM_BASE+func_offset);
1747
1748 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1749 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1750 fdesc_offset);
1751 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1752 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1753 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1754 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1755
1756 kvm_vmm_gp = p_fdesc->gp;
1757
1758 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1759 kvm_vmm_info->vmm_entry);
1760 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1761 KVM_VMM_BASE + func_offset);
1762
1763 return 0;
1764}
1765
1766int kvm_arch_init(void *opaque)
1767{
1768 int r;
1769 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1770
1771 if (!vti_cpu_has_kvm_support()) {
1772 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1773 r = -EOPNOTSUPP;
1774 goto out;
1775 }
1776
1777 if (kvm_vmm_info) {
1778 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1779 r = -EEXIST;
1780 goto out;
1781 }
1782
1783 r = -ENOMEM;
1784 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1785 if (!kvm_vmm_info)
1786 goto out;
1787
1788 if (kvm_alloc_vmm_area())
1789 goto out_free0;
1790
1791 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1792 if (r)
1793 goto out_free1;
1794
1795 return 0;
1796
1797out_free1:
1798 kvm_free_vmm_area();
1799out_free0:
1800 kfree(kvm_vmm_info);
1801out:
1802 return r;
1803}
1804
1805void kvm_arch_exit(void)
1806{
1807 kvm_free_vmm_area();
1808 kfree(kvm_vmm_info);
1809 kvm_vmm_info = NULL;
1810}
1811
1812static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1813 struct kvm_dirty_log *log)
1814{
1815 struct kvm_memory_slot *memslot;
1816 int r, i;
1817 long n, base;
Xiantao Zhanga917f7af32008-10-23 14:56:44 +08001818 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1819 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
Xiantao Zhangb024b792008-04-01 15:29:29 +08001820
1821 r = -EINVAL;
1822 if (log->slot >= KVM_MEMORY_SLOTS)
1823 goto out;
1824
1825 memslot = &kvm->memslots[log->slot];
1826 r = -ENOENT;
1827 if (!memslot->dirty_bitmap)
1828 goto out;
1829
1830 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1831 base = memslot->base_gfn / BITS_PER_LONG;
1832
1833 for (i = 0; i < n/sizeof(long); ++i) {
1834 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1835 dirty_bitmap[base + i] = 0;
1836 }
1837 r = 0;
1838out:
1839 return r;
1840}
1841
1842int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1843 struct kvm_dirty_log *log)
1844{
1845 int r;
1846 int n;
1847 struct kvm_memory_slot *memslot;
1848 int is_dirty = 0;
1849
1850 spin_lock(&kvm->arch.dirty_log_lock);
1851
1852 r = kvm_ia64_sync_dirty_log(kvm, log);
1853 if (r)
1854 goto out;
1855
1856 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1857 if (r)
1858 goto out;
1859
1860 /* If nothing is dirty, don't bother messing with page tables. */
1861 if (is_dirty) {
1862 kvm_flush_remote_tlbs(kvm);
1863 memslot = &kvm->memslots[log->slot];
1864 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1865 memset(memslot->dirty_bitmap, 0, n);
1866 }
1867 r = 0;
1868out:
1869 spin_unlock(&kvm->arch.dirty_log_lock);
1870 return r;
1871}
1872
1873int kvm_arch_hardware_setup(void)
1874{
1875 return 0;
1876}
1877
1878void kvm_arch_hardware_unsetup(void)
1879{
1880}
1881
1882static void vcpu_kick_intr(void *info)
1883{
1884#ifdef DEBUG
1885 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
1886 printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
1887#endif
1888}
1889
1890void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1891{
1892 int ipi_pcpu = vcpu->cpu;
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001893 int cpu = get_cpu();
Xiantao Zhangb024b792008-04-01 15:29:29 +08001894
1895 if (waitqueue_active(&vcpu->wq))
1896 wake_up_interruptible(&vcpu->wq);
1897
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001898 if (vcpu->guest_mode && cpu != ipi_pcpu)
Takashi Iwai2f73cca2008-07-17 18:09:12 +02001899 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001900 put_cpu();
Xiantao Zhangb024b792008-04-01 15:29:29 +08001901}
1902
Gleb Natapov58c2dde2009-03-05 16:35:04 +02001903int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
Xiantao Zhangb024b792008-04-01 15:29:29 +08001904{
Gleb Natapov58c2dde2009-03-05 16:35:04 +02001905 return __apic_accept_irq(vcpu, irq->vector);
Xiantao Zhangb024b792008-04-01 15:29:29 +08001906}
1907
1908int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1909{
1910 return apic->vcpu->vcpu_id == dest;
1911}
1912
1913int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1914{
1915 return 0;
1916}
1917
Gleb Natapove1035712009-03-05 16:34:59 +02001918int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
Xiantao Zhangb024b792008-04-01 15:29:29 +08001919{
Gleb Natapove1035712009-03-05 16:34:59 +02001920 return vcpu1->arch.xtp - vcpu2->arch.xtp;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001921}
1922
Gleb Natapov343f94f2009-03-05 16:34:54 +02001923int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1924 int short_hand, int dest, int dest_mode)
1925{
Gleb Natapov58c2dde2009-03-05 16:35:04 +02001926 struct kvm_lapic *target = vcpu->arch.apic;
Gleb Natapov343f94f2009-03-05 16:34:54 +02001927 return (dest_mode == 0) ?
1928 kvm_apic_match_physical_addr(target, dest) :
1929 kvm_apic_match_logical_addr(target, dest);
1930}
1931
Xiantao Zhangb024b792008-04-01 15:29:29 +08001932static int find_highest_bits(int *dat)
1933{
1934 u32 bits, bitnum;
1935 int i;
1936
1937 /* loop for all 256 bits */
1938 for (i = 7; i >= 0 ; i--) {
1939 bits = dat[i];
1940 if (bits) {
1941 bitnum = fls(bits);
1942 return i * 32 + bitnum - 1;
1943 }
1944 }
1945
1946 return -1;
1947}
1948
1949int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1950{
1951 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1952
1953 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1954 return NMI_VECTOR;
1955 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1956 return ExtINT_VECTOR;
1957
1958 return find_highest_bits((int *)&vpd->irr[0]);
1959}
1960
1961int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1962{
1963 if (kvm_highest_pending_irq(vcpu) != -1)
1964 return 1;
1965 return 0;
1966}
1967
Gleb Natapov78646122009-03-23 12:12:11 +02001968int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
1969{
1970 /* do real check here */
1971 return 1;
1972}
1973
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001974int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1975{
Xiantao Zhangdecc9012008-10-16 15:58:15 +08001976 return vcpu->arch.timer_fired;
Marcelo Tosatti3d808402008-04-11 14:53:26 -03001977}
1978
Xiantao Zhangb024b792008-04-01 15:29:29 +08001979gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1980{
1981 return gfn;
1982}
1983
1984int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1985{
Avi Kivitya4535292008-04-13 17:54:35 +03001986 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
Xiantao Zhangb024b792008-04-01 15:29:29 +08001987}
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001988
1989int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1990 struct kvm_mp_state *mp_state)
1991{
Xiantao Zhang8c4b5372008-08-28 09:34:08 +08001992 vcpu_load(vcpu);
1993 mp_state->mp_state = vcpu->arch.mp_state;
1994 vcpu_put(vcpu);
1995 return 0;
1996}
1997
1998static int vcpu_reset(struct kvm_vcpu *vcpu)
1999{
2000 int r;
2001 long psr;
2002 local_irq_save(psr);
2003 r = kvm_insert_vmm_mapping(vcpu);
2004 if (r)
2005 goto fail;
2006
2007 vcpu->arch.launched = 0;
2008 kvm_arch_vcpu_uninit(vcpu);
2009 r = kvm_arch_vcpu_init(vcpu);
2010 if (r)
2011 goto fail;
2012
2013 kvm_purge_vmm_mapping(vcpu);
2014 r = 0;
2015fail:
2016 local_irq_restore(psr);
2017 return r;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002018}
2019
2020int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2021 struct kvm_mp_state *mp_state)
2022{
Xiantao Zhang8c4b5372008-08-28 09:34:08 +08002023 int r = 0;
2024
2025 vcpu_load(vcpu);
2026 vcpu->arch.mp_state = mp_state->mp_state;
2027 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
2028 r = vcpu_reset(vcpu);
2029 vcpu_put(vcpu);
2030 return r;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002031}