blob: e2f46b16ce3149d50ed7d26f3108a886bc5896b3 [file] [log] [blame]
Jes Sorensen625efab2007-10-22 11:03:28 +10001/*
2 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
3 * Copyright (C) 2007, Jes Sorensen <jes@sgi.com> SGI.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/kernel.h>
21#include <linux/start_kernel.h>
22#include <linux/string.h>
23#include <linux/console.h>
24#include <linux/screen_info.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
27#include <linux/clocksource.h>
28#include <linux/clockchips.h>
29#include <linux/cpu.h>
30#include <linux/lguest.h>
31#include <linux/lguest_launcher.h>
32#include <linux/lguest_bus.h>
33#include <asm/paravirt.h>
34#include <asm/param.h>
35#include <asm/page.h>
36#include <asm/pgtable.h>
37#include <asm/desc.h>
38#include <asm/setup.h>
39#include <asm/lguest.h>
40#include <asm/uaccess.h>
41#include <asm/i387.h>
42#include "../lg.h"
43
44static int cpu_had_pge;
45
46static struct {
47 unsigned long offset;
48 unsigned short segment;
49} lguest_entry;
50
51/* Offset from where switcher.S was compiled to where we've copied it */
52static unsigned long switcher_offset(void)
53{
54 return SWITCHER_ADDR - (unsigned long)start_switcher_text;
55}
56
57/* This cpu's struct lguest_pages. */
58static struct lguest_pages *lguest_pages(unsigned int cpu)
59{
60 return &(((struct lguest_pages *)
61 (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
62}
63
64static DEFINE_PER_CPU(struct lguest *, last_guest);
65
66/*S:010
67 * We are getting close to the Switcher.
68 *
69 * Remember that each CPU has two pages which are visible to the Guest when it
70 * runs on that CPU. This has to contain the state for that Guest: we copy the
71 * state in just before we run the Guest.
72 *
73 * Each Guest has "changed" flags which indicate what has changed in the Guest
74 * since it last ran. We saw this set in interrupts_and_traps.c and
75 * segments.c.
76 */
77static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
78{
79 /* Copying all this data can be quite expensive. We usually run the
80 * same Guest we ran last time (and that Guest hasn't run anywhere else
81 * meanwhile). If that's not the case, we pretend everything in the
82 * Guest has changed. */
83 if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) {
84 __get_cpu_var(last_guest) = lg;
85 lg->last_pages = pages;
86 lg->changed = CHANGED_ALL;
87 }
88
89 /* These copies are pretty cheap, so we do them unconditionally: */
90 /* Save the current Host top-level page directory. */
91 pages->state.host_cr3 = __pa(current->mm->pgd);
92 /* Set up the Guest's page tables to see this CPU's pages (and no
93 * other CPU's pages). */
94 map_switcher_in_guest(lg, pages);
95 /* Set up the two "TSS" members which tell the CPU what stack to use
96 * for traps which do directly into the Guest (ie. traps at privilege
97 * level 1). */
98 pages->state.guest_tss.esp1 = lg->esp1;
99 pages->state.guest_tss.ss1 = lg->ss1;
100
101 /* Copy direct-to-Guest trap entries. */
102 if (lg->changed & CHANGED_IDT)
103 copy_traps(lg, pages->state.guest_idt, default_idt_entries);
104
105 /* Copy all GDT entries which the Guest can change. */
106 if (lg->changed & CHANGED_GDT)
107 copy_gdt(lg, pages->state.guest_gdt);
108 /* If only the TLS entries have changed, copy them. */
109 else if (lg->changed & CHANGED_GDT_TLS)
110 copy_gdt_tls(lg, pages->state.guest_gdt);
111
112 /* Mark the Guest as unchanged for next time. */
113 lg->changed = 0;
114}
115
116/* Finally: the code to actually call into the Switcher to run the Guest. */
117static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
118{
119 /* This is a dummy value we need for GCC's sake. */
120 unsigned int clobber;
121
122 /* Copy the guest-specific information into this CPU's "struct
123 * lguest_pages". */
124 copy_in_guest_info(lg, pages);
125
126 /* Set the trap number to 256 (impossible value). If we fault while
127 * switching to the Guest (bad segment registers or bug), this will
128 * cause us to abort the Guest. */
129 lg->regs->trapnum = 256;
130
131 /* Now: we push the "eflags" register on the stack, then do an "lcall".
132 * This is how we change from using the kernel code segment to using
133 * the dedicated lguest code segment, as well as jumping into the
134 * Switcher.
135 *
136 * The lcall also pushes the old code segment (KERNEL_CS) onto the
137 * stack, then the address of this call. This stack layout happens to
138 * exactly match the stack of an interrupt... */
139 asm volatile("pushf; lcall *lguest_entry"
140 /* This is how we tell GCC that %eax ("a") and %ebx ("b")
141 * are changed by this routine. The "=" means output. */
142 : "=a"(clobber), "=b"(clobber)
143 /* %eax contains the pages pointer. ("0" refers to the
144 * 0-th argument above, ie "a"). %ebx contains the
145 * physical address of the Guest's top-level page
146 * directory. */
147 : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir))
148 /* We tell gcc that all these registers could change,
149 * which means we don't have to save and restore them in
150 * the Switcher. */
151 : "memory", "%edx", "%ecx", "%edi", "%esi");
152}
153/*:*/
154
155/*H:040 This is the i386-specific code to setup and run the Guest. Interrupts
156 * are disabled: we own the CPU. */
157void lguest_arch_run_guest(struct lguest *lg)
158{
159 /* Remember the awfully-named TS bit? If the Guest has asked
160 * to set it we set it now, so we can trap and pass that trap
161 * to the Guest if it uses the FPU. */
162 if (lg->ts)
163 lguest_set_ts();
164
165 /* SYSENTER is an optimized way of doing system calls. We
166 * can't allow it because it always jumps to privilege level 0.
167 * A normal Guest won't try it because we don't advertise it in
168 * CPUID, but a malicious Guest (or malicious Guest userspace
169 * program) could, so we tell the CPU to disable it before
170 * running the Guest. */
171 if (boot_cpu_has(X86_FEATURE_SEP))
172 wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
173
174 /* Now we actually run the Guest. It will pop back out when
175 * something interesting happens, and we can examine its
176 * registers to see what it was doing. */
177 run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
178
179 /* The "regs" pointer contains two extra entries which are not
180 * really registers: a trap number which says what interrupt or
181 * trap made the switcher code come back, and an error code
182 * which some traps set. */
183
184 /* If the Guest page faulted, then the cr2 register will tell
185 * us the bad virtual address. We have to grab this now,
186 * because once we re-enable interrupts an interrupt could
187 * fault and thus overwrite cr2, or we could even move off to a
188 * different CPU. */
189 if (lg->regs->trapnum == 14)
190 lg->arch.last_pagefault = read_cr2();
191 /* Similarly, if we took a trap because the Guest used the FPU,
192 * we have to restore the FPU it expects to see. */
193 else if (lg->regs->trapnum == 7)
194 math_state_restore();
195
196 /* Restore SYSENTER if it's supposed to be on. */
197 if (boot_cpu_has(X86_FEATURE_SEP))
198 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
199}
200
201/*H:130 Our Guest is usually so well behaved; it never tries to do things it
202 * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't
203 * quite complete, because it doesn't contain replacements for the Intel I/O
204 * instructions. As a result, the Guest sometimes fumbles across one during
205 * the boot process as it probes for various things which are usually attached
206 * to a PC.
207 *
208 * When the Guest uses one of these instructions, we get trap #13 (General
209 * Protection Fault) and come here. We see if it's one of those troublesome
210 * instructions and skip over it. We return true if we did. */
211static int emulate_insn(struct lguest *lg)
212{
213 u8 insn;
214 unsigned int insnlen = 0, in = 0, shift = 0;
215 /* The eip contains the *virtual* address of the Guest's instruction:
216 * guest_pa just subtracts the Guest's page_offset. */
217 unsigned long physaddr = guest_pa(lg, lg->regs->eip);
218
219 /* The guest_pa() function only works for Guest kernel addresses, but
220 * that's all we're trying to do anyway. */
221 if (lg->regs->eip < lg->page_offset)
222 return 0;
223
224 /* Decoding x86 instructions is icky. */
225 lgread(lg, &insn, physaddr, 1);
226
227 /* 0x66 is an "operand prefix". It means it's using the upper 16 bits
228 of the eax register. */
229 if (insn == 0x66) {
230 shift = 16;
231 /* The instruction is 1 byte so far, read the next byte. */
232 insnlen = 1;
233 lgread(lg, &insn, physaddr + insnlen, 1);
234 }
235
236 /* We can ignore the lower bit for the moment and decode the 4 opcodes
237 * we need to emulate. */
238 switch (insn & 0xFE) {
239 case 0xE4: /* in <next byte>,%al */
240 insnlen += 2;
241 in = 1;
242 break;
243 case 0xEC: /* in (%dx),%al */
244 insnlen += 1;
245 in = 1;
246 break;
247 case 0xE6: /* out %al,<next byte> */
248 insnlen += 2;
249 break;
250 case 0xEE: /* out %al,(%dx) */
251 insnlen += 1;
252 break;
253 default:
254 /* OK, we don't know what this is, can't emulate. */
255 return 0;
256 }
257
258 /* If it was an "IN" instruction, they expect the result to be read
259 * into %eax, so we change %eax. We always return all-ones, which
260 * traditionally means "there's nothing there". */
261 if (in) {
262 /* Lower bit tells is whether it's a 16 or 32 bit access */
263 if (insn & 0x1)
264 lg->regs->eax = 0xFFFFFFFF;
265 else
266 lg->regs->eax |= (0xFFFF << shift);
267 }
268 /* Finally, we've "done" the instruction, so move past it. */
269 lg->regs->eip += insnlen;
270 /* Success! */
271 return 1;
272}
273
274/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
275void lguest_arch_handle_trap(struct lguest *lg)
276{
277 switch (lg->regs->trapnum) {
278 case 13: /* We've intercepted a GPF. */
279 /* Check if this was one of those annoying IN or OUT
280 * instructions which we need to emulate. If so, we
281 * just go back into the Guest after we've done it. */
282 if (lg->regs->errcode == 0) {
283 if (emulate_insn(lg))
284 return;
285 }
286 break;
287 case 14: /* We've intercepted a page fault. */
288 /* The Guest accessed a virtual address that wasn't
289 * mapped. This happens a lot: we don't actually set
290 * up most of the page tables for the Guest at all when
291 * we start: as it runs it asks for more and more, and
292 * we set them up as required. In this case, we don't
293 * even tell the Guest that the fault happened.
294 *
295 * The errcode tells whether this was a read or a
296 * write, and whether kernel or userspace code. */
297 if (demand_page(lg, lg->arch.last_pagefault, lg->regs->errcode))
298 return;
299
300 /* OK, it's really not there (or not OK): the Guest
301 * needs to know. We write out the cr2 value so it
302 * knows where the fault occurred.
303 *
304 * Note that if the Guest were really messed up, this
305 * could happen before it's done the INITIALIZE
306 * hypercall, so lg->lguest_data will be NULL */
307 if (lg->lguest_data &&
308 put_user(lg->arch.last_pagefault, &lg->lguest_data->cr2))
309 kill_guest(lg, "Writing cr2");
310 break;
311 case 7: /* We've intercepted a Device Not Available fault. */
312 /* If the Guest doesn't want to know, we already
313 * restored the Floating Point Unit, so we just
314 * continue without telling it. */
315 if (!lg->ts)
316 return;
317 break;
318 case 32 ... 255:
319 /* These values mean a real interrupt occurred, in
320 * which case the Host handler has already been run.
321 * We just do a friendly check if another process
322 * should now be run, then fall through to loop
323 * around: */
324 cond_resched();
325 case LGUEST_TRAP_ENTRY: /* Handled before re-entering Guest */
326 return;
327 }
328
329 /* We didn't handle the trap, so it needs to go to the Guest. */
330 if (!deliver_trap(lg, lg->regs->trapnum))
331 /* If the Guest doesn't have a handler (either it hasn't
332 * registered any yet, or it's one of the faults we don't let
333 * it handle), it dies with a cryptic error message. */
334 kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
335 lg->regs->trapnum, lg->regs->eip,
336 lg->regs->trapnum == 14 ? lg->arch.last_pagefault
337 : lg->regs->errcode);
338}
339
340/* Now we can look at each of the routines this calls, in increasing order of
341 * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(),
342 * deliver_trap() and demand_page(). After all those, we'll be ready to
343 * examine the Switcher, and our philosophical understanding of the Host/Guest
344 * duality will be complete. :*/
345static void adjust_pge(void *on)
346{
347 if (on)
348 write_cr4(read_cr4() | X86_CR4_PGE);
349 else
350 write_cr4(read_cr4() & ~X86_CR4_PGE);
351}
352
353/*H:020 Now the Switcher is mapped and every thing else is ready, we need to do
354 * some more i386-specific initialization. */
355void __init lguest_arch_host_init(void)
356{
357 int i;
358
359 /* Most of the i386/switcher.S doesn't care that it's been moved; on
360 * Intel, jumps are relative, and it doesn't access any references to
361 * external code or data.
362 *
363 * The only exception is the interrupt handlers in switcher.S: their
364 * addresses are placed in a table (default_idt_entries), so we need to
365 * update the table with the new addresses. switcher_offset() is a
366 * convenience function which returns the distance between the builtin
367 * switcher code and the high-mapped copy we just made. */
368 for (i = 0; i < IDT_ENTRIES; i++)
369 default_idt_entries[i] += switcher_offset();
370
371 /*
372 * Set up the Switcher's per-cpu areas.
373 *
374 * Each CPU gets two pages of its own within the high-mapped region
375 * (aka. "struct lguest_pages"). Much of this can be initialized now,
376 * but some depends on what Guest we are running (which is set up in
377 * copy_in_guest_info()).
378 */
379 for_each_possible_cpu(i) {
380 /* lguest_pages() returns this CPU's two pages. */
381 struct lguest_pages *pages = lguest_pages(i);
382 /* This is a convenience pointer to make the code fit one
383 * statement to a line. */
384 struct lguest_ro_state *state = &pages->state;
385
386 /* The Global Descriptor Table: the Host has a different one
387 * for each CPU. We keep a descriptor for the GDT which says
388 * where it is and how big it is (the size is actually the last
389 * byte, not the size, hence the "-1"). */
390 state->host_gdt_desc.size = GDT_SIZE-1;
391 state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
392
393 /* All CPUs on the Host use the same Interrupt Descriptor
394 * Table, so we just use store_idt(), which gets this CPU's IDT
395 * descriptor. */
396 store_idt(&state->host_idt_desc);
397
398 /* The descriptors for the Guest's GDT and IDT can be filled
399 * out now, too. We copy the GDT & IDT into ->guest_gdt and
400 * ->guest_idt before actually running the Guest. */
401 state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
402 state->guest_idt_desc.address = (long)&state->guest_idt;
403 state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
404 state->guest_gdt_desc.address = (long)&state->guest_gdt;
405
406 /* We know where we want the stack to be when the Guest enters
407 * the switcher: in pages->regs. The stack grows upwards, so
408 * we start it at the end of that structure. */
409 state->guest_tss.esp0 = (long)(&pages->regs + 1);
410 /* And this is the GDT entry to use for the stack: we keep a
411 * couple of special LGUEST entries. */
412 state->guest_tss.ss0 = LGUEST_DS;
413
414 /* x86 can have a finegrained bitmap which indicates what I/O
415 * ports the process can use. We set it to the end of our
416 * structure, meaning "none". */
417 state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
418
419 /* Some GDT entries are the same across all Guests, so we can
420 * set them up now. */
421 setup_default_gdt_entries(state);
422 /* Most IDT entries are the same for all Guests, too.*/
423 setup_default_idt_entries(state, default_idt_entries);
424
425 /* The Host needs to be able to use the LGUEST segments on this
426 * CPU, too, so put them in the Host GDT. */
427 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
428 get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
429 }
430
431 /* In the Switcher, we want the %cs segment register to use the
432 * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
433 * it will be undisturbed when we switch. To change %cs and jump we
434 * need this structure to feed to Intel's "lcall" instruction. */
435 lguest_entry.offset = (long)switch_to_guest + switcher_offset();
436 lguest_entry.segment = LGUEST_CS;
437
438 /* Finally, we need to turn off "Page Global Enable". PGE is an
439 * optimization where page table entries are specially marked to show
440 * they never change. The Host kernel marks all the kernel pages this
441 * way because it's always present, even when userspace is running.
442 *
443 * Lguest breaks this: unbeknownst to the rest of the Host kernel, we
444 * switch to the Guest kernel. If you don't disable this on all CPUs,
445 * you'll get really weird bugs that you'll chase for two days.
446 *
447 * I used to turn PGE off every time we switched to the Guest and back
448 * on when we return, but that slowed the Switcher down noticibly. */
449
450 /* We don't need the complexity of CPUs coming and going while we're
451 * doing this. */
452 lock_cpu_hotplug();
453 if (cpu_has_pge) { /* We have a broader idea of "global". */
454 /* Remember that this was originally set (for cleanup). */
455 cpu_had_pge = 1;
456 /* adjust_pge is a helper function which sets or unsets the PGE
457 * bit on its CPU, depending on the argument (0 == unset). */
458 on_each_cpu(adjust_pge, (void *)0, 0, 1);
459 /* Turn off the feature in the global feature set. */
460 clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
461 }
462 unlock_cpu_hotplug();
463};
464/*:*/
465
466void __exit lguest_arch_host_fini(void)
467{
468 /* If we had PGE before we started, turn it back on now. */
469 lock_cpu_hotplug();
470 if (cpu_had_pge) {
471 set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
472 /* adjust_pge's argument "1" means set PGE. */
473 on_each_cpu(adjust_pge, (void *)1, 0, 1);
474 }
475 unlock_cpu_hotplug();
476}