Rusty Russell | f938d2c | 2007-07-26 10:41:02 -0700 | [diff] [blame^] | 1 | /*P:900 This is the Switcher: code which sits at 0xFFC00000 to do the low-level |
| 2 | * Guest<->Host switch. It is as simple as it can be made, but it's naturally |
| 3 | * very specific to x86. |
| 4 | * |
| 5 | * You have now completed Preparation. If this has whet your appetite; if you |
| 6 | * are feeling invigorated and refreshed then the next, more challenging stage |
| 7 | * can be found in "make Guest". :*/ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 8 | |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 9 | #include <linux/linkage.h> |
| 10 | #include <asm/asm-offsets.h> |
| 11 | #include "lg.h" |
| 12 | |
| 13 | .text |
| 14 | ENTRY(start_switcher_text) |
| 15 | |
| 16 | /* %eax points to lguest pages for this CPU. %ebx contains cr3 value. |
| 17 | All normal registers can be clobbered! */ |
| 18 | ENTRY(switch_to_guest) |
| 19 | /* Save host segments on host stack. */ |
| 20 | pushl %es |
| 21 | pushl %ds |
| 22 | pushl %gs |
| 23 | pushl %fs |
| 24 | /* With CONFIG_FRAME_POINTER, gcc doesn't let us clobber this! */ |
| 25 | pushl %ebp |
| 26 | /* Save host stack. */ |
| 27 | movl %esp, LGUEST_PAGES_host_sp(%eax) |
| 28 | /* Switch to guest stack: if we get NMI we expect to be there. */ |
| 29 | movl %eax, %edx |
| 30 | addl $LGUEST_PAGES_regs, %edx |
| 31 | movl %edx, %esp |
| 32 | /* Switch to guest's GDT, IDT. */ |
| 33 | lgdt LGUEST_PAGES_guest_gdt_desc(%eax) |
| 34 | lidt LGUEST_PAGES_guest_idt_desc(%eax) |
| 35 | /* Switch to guest's TSS while GDT still writable. */ |
| 36 | movl $(GDT_ENTRY_TSS*8), %edx |
| 37 | ltr %dx |
| 38 | /* Set host's TSS GDT entry to available (clear byte 5 bit 2). */ |
| 39 | movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx |
| 40 | andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx) |
| 41 | /* Switch to guest page tables: lguest_pages->state now read-only. */ |
| 42 | movl %ebx, %cr3 |
| 43 | /* Restore guest regs */ |
| 44 | popl %ebx |
| 45 | popl %ecx |
| 46 | popl %edx |
| 47 | popl %esi |
| 48 | popl %edi |
| 49 | popl %ebp |
| 50 | popl %gs |
| 51 | popl %eax |
| 52 | popl %fs |
| 53 | popl %ds |
| 54 | popl %es |
| 55 | /* Skip error code and trap number */ |
| 56 | addl $8, %esp |
| 57 | iret |
| 58 | |
| 59 | #define SWITCH_TO_HOST \ |
| 60 | /* Save guest state */ \ |
| 61 | pushl %es; \ |
| 62 | pushl %ds; \ |
| 63 | pushl %fs; \ |
| 64 | pushl %eax; \ |
| 65 | pushl %gs; \ |
| 66 | pushl %ebp; \ |
| 67 | pushl %edi; \ |
| 68 | pushl %esi; \ |
| 69 | pushl %edx; \ |
| 70 | pushl %ecx; \ |
| 71 | pushl %ebx; \ |
| 72 | /* Load lguest ds segment for convenience. */ \ |
| 73 | movl $(LGUEST_DS), %eax; \ |
| 74 | movl %eax, %ds; \ |
| 75 | /* Figure out where we are, based on stack (at top of regs). */ \ |
| 76 | movl %esp, %eax; \ |
| 77 | subl $LGUEST_PAGES_regs, %eax; \ |
| 78 | /* Put trap number in %ebx before we switch cr3 and lose it. */ \ |
| 79 | movl LGUEST_PAGES_regs_trapnum(%eax), %ebx; \ |
| 80 | /* Switch to host page tables (host GDT, IDT and stack are in host \ |
| 81 | mem, so need this first) */ \ |
| 82 | movl LGUEST_PAGES_host_cr3(%eax), %edx; \ |
| 83 | movl %edx, %cr3; \ |
| 84 | /* Set guest's TSS to available (clear byte 5 bit 2). */ \ |
| 85 | andb $0xFD, (LGUEST_PAGES_guest_gdt+GDT_ENTRY_TSS*8+5)(%eax); \ |
| 86 | /* Switch to host's GDT & IDT. */ \ |
| 87 | lgdt LGUEST_PAGES_host_gdt_desc(%eax); \ |
| 88 | lidt LGUEST_PAGES_host_idt_desc(%eax); \ |
| 89 | /* Switch to host's stack. */ \ |
| 90 | movl LGUEST_PAGES_host_sp(%eax), %esp; \ |
| 91 | /* Switch to host's TSS */ \ |
| 92 | movl $(GDT_ENTRY_TSS*8), %edx; \ |
| 93 | ltr %dx; \ |
| 94 | popl %ebp; \ |
| 95 | popl %fs; \ |
| 96 | popl %gs; \ |
| 97 | popl %ds; \ |
| 98 | popl %es |
| 99 | |
| 100 | /* Return to run_guest_once. */ |
| 101 | return_to_host: |
| 102 | SWITCH_TO_HOST |
| 103 | iret |
| 104 | |
| 105 | deliver_to_host: |
| 106 | SWITCH_TO_HOST |
| 107 | /* Decode IDT and jump to hosts' irq handler. When that does iret, it |
| 108 | * will return to run_guest_once. This is a feature. */ |
| 109 | movl (LGUEST_PAGES_host_idt_desc+2)(%eax), %edx |
| 110 | leal (%edx,%ebx,8), %eax |
| 111 | movzwl (%eax),%edx |
| 112 | movl 4(%eax), %eax |
| 113 | xorw %ax, %ax |
| 114 | orl %eax, %edx |
| 115 | jmp *%edx |
| 116 | |
| 117 | /* Real hardware interrupts are delivered straight to the host. Others |
| 118 | cause us to return to run_guest_once so it can decide what to do. Note |
| 119 | that some of these are overridden by the guest to deliver directly, and |
| 120 | never enter here (see load_guest_idt_entry). */ |
| 121 | .macro IRQ_STUB N TARGET |
| 122 | .data; .long 1f; .text; 1: |
| 123 | /* Make an error number for most traps, which don't have one. */ |
| 124 | .if (\N <> 8) && (\N < 10 || \N > 14) && (\N <> 17) |
| 125 | pushl $0 |
| 126 | .endif |
| 127 | pushl $\N |
| 128 | jmp \TARGET |
| 129 | ALIGN |
| 130 | .endm |
| 131 | |
| 132 | .macro IRQ_STUBS FIRST LAST TARGET |
| 133 | irq=\FIRST |
| 134 | .rept \LAST-\FIRST+1 |
| 135 | IRQ_STUB irq \TARGET |
| 136 | irq=irq+1 |
| 137 | .endr |
| 138 | .endm |
| 139 | |
| 140 | /* We intercept every interrupt, because we may need to switch back to |
| 141 | * host. Unfortunately we can't tell them apart except by entry |
| 142 | * point, so we need 256 entry points. |
| 143 | */ |
| 144 | .data |
| 145 | .global default_idt_entries |
| 146 | default_idt_entries: |
| 147 | .text |
| 148 | IRQ_STUBS 0 1 return_to_host /* First two traps */ |
| 149 | IRQ_STUB 2 handle_nmi /* NMI */ |
| 150 | IRQ_STUBS 3 31 return_to_host /* Rest of traps */ |
| 151 | IRQ_STUBS 32 127 deliver_to_host /* Real interrupts */ |
| 152 | IRQ_STUB 128 return_to_host /* System call (overridden) */ |
| 153 | IRQ_STUBS 129 255 deliver_to_host /* Other real interrupts */ |
| 154 | |
| 155 | /* We ignore NMI and return. */ |
| 156 | handle_nmi: |
| 157 | addl $8, %esp |
| 158 | iret |
| 159 | |
| 160 | ENTRY(end_switcher_text) |