[PATCH] KVM: MMU: Handle misaligned accesses to write protected guest page tables
A misaligned access affects two shadow ptes instead of just one.
Since a misaligned access is unlikely to occur on a real page table, just zap
the page out of existence, avoiding further trouble.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 53c3643..50b1432 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -954,21 +954,36 @@
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *page;
struct kvm_mmu_page *child;
- struct hlist_node *node;
+ struct hlist_node *node, *n;
struct hlist_head *bucket;
unsigned index;
u64 *spte;
u64 pte;
unsigned offset = offset_in_page(gpa);
+ unsigned pte_size;
unsigned page_offset;
+ unsigned misaligned;
int level;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &vcpu->kvm->mmu_page_hash[index];
- hlist_for_each_entry(page, node, bucket, hash_link) {
+ hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
if (page->gfn != gfn || page->role.metaphysical)
continue;
+ pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
+ misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+ if (misaligned) {
+ /*
+ * Misaligned accesses are too much trouble to fix
+ * up; also, they usually indicate a page is not used
+ * as a page table.
+ */
+ pgprintk("misaligned: gpa %llx bytes %d role %x\n",
+ gpa, bytes, page->role.word);
+ kvm_mmu_zap_page(vcpu, page);
+ continue;
+ }
page_offset = offset;
level = page->role.level;
if (page->role.glevels == PT32_ROOT_LEVEL) {