blob: 5a5602da50910d441f806c6b0a111fcd3e19aab3 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm_host.h>
23#include <linux/highmem.h>
24#include <asm/mmu-44x.h>
25#include <asm/kvm_ppc.h>
26
27#include "44x_tlb.h"
28
29#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
30#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
31
32static unsigned int kvmppc_tlb_44x_pos;
33
34static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
35{
36 /* Mask off reserved bits. */
37 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
38
39 if (!usermode) {
40 /* Guest is in supervisor mode, so we need to translate guest
41 * supervisor permissions into user permissions. */
42 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
43 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
44 }
45
46 /* Make sure host can always access this memory. */
47 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
48
49 return attrib;
50}
51
52/* Search the guest TLB for a matching entry. */
53int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
54 unsigned int as)
55{
56 int i;
57
58 /* XXX Replace loop with fancy data structures. */
59 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
60 struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
61 unsigned int tid;
62
63 if (eaddr < get_tlb_eaddr(tlbe))
64 continue;
65
66 if (eaddr > get_tlb_end(tlbe))
67 continue;
68
69 tid = get_tlb_tid(tlbe);
70 if (tid && (tid != pid))
71 continue;
72
73 if (!get_tlb_v(tlbe))
74 continue;
75
76 if (get_tlb_ts(tlbe) != as)
77 continue;
78
79 return i;
80 }
81
82 return -1;
83}
84
85struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
86{
87 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
88 unsigned int index;
89
90 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
91 if (index == -1)
92 return NULL;
93 return &vcpu->arch.guest_tlb[index];
94}
95
96struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
97{
98 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
99 unsigned int index;
100
101 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
102 if (index == -1)
103 return NULL;
104 return &vcpu->arch.guest_tlb[index];
105}
106
107static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
108{
109 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
110}
111
112/* Must be called with mmap_sem locked for writing. */
113static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
114 unsigned int index)
115{
116 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
117 struct page *page = vcpu->arch.shadow_pages[index];
118
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500119 if (get_tlb_v(stlbe)) {
120 if (kvmppc_44x_tlbe_is_writable(stlbe))
121 kvm_release_page_dirty(page);
122 else
123 kvm_release_page_clean(page);
124 }
125}
126
127/* Caller must ensure that the specified guest TLB entry is safe to insert into
128 * the shadow TLB. */
129void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
130 u32 flags)
131{
132 struct page *new_page;
133 struct tlbe *stlbe;
134 hpa_t hpaddr;
135 unsigned int victim;
136
137 /* Future optimization: don't overwrite the TLB entry containing the
138 * current PC (or stack?). */
139 victim = kvmppc_tlb_44x_pos++;
140 if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
141 kvmppc_tlb_44x_pos = 0;
142 stlbe = &vcpu->arch.shadow_tlb[victim];
143
144 /* Get reference to new page. */
Hollis Blanchard905fa4b2008-05-21 18:22:54 -0500145 down_read(&current->mm->mmap_sem);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500146 new_page = gfn_to_page(vcpu->kvm, gfn);
147 if (is_error_page(new_page)) {
Hollis Blanchard9dcb40e2008-05-21 18:22:55 -0500148 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500149 kvm_release_page_clean(new_page);
Hollis Blanchard905fa4b2008-05-21 18:22:54 -0500150 up_read(&current->mm->mmap_sem);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500151 return;
152 }
153 hpaddr = page_to_phys(new_page);
154
155 /* Drop reference to old page. */
156 kvmppc_44x_shadow_release(vcpu, victim);
Hollis Blanchard905fa4b2008-05-21 18:22:54 -0500157 up_read(&current->mm->mmap_sem);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500158
159 vcpu->arch.shadow_pages[victim] = new_page;
160
161 /* XXX Make sure (va, size) doesn't overlap any other
162 * entries. 440x6 user manual says the result would be
163 * "undefined." */
164
165 /* XXX what about AS? */
166
167 stlbe->tid = asid & 0xff;
168
169 /* Force TS=1 for all guest mappings. */
170 /* For now we hardcode 4KB mappings, but it will be important to
171 * use host large pages in the future. */
172 stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
173 | PPC44x_TLB_4K;
174
175 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
176 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
177 vcpu->arch.msr & MSR_PR);
178}
179
Hollis Blanchardcc044542008-07-25 13:54:50 -0500180void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
181 gva_t eend, u32 asid)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500182{
183 unsigned int pid = asid & 0xff;
184 int i;
185
186 /* XXX Replace loop with fancy data structures. */
187 down_write(&current->mm->mmap_sem);
188 for (i = 0; i <= tlb_44x_hwater; i++) {
189 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
190 unsigned int tid;
191
192 if (!get_tlb_v(stlbe))
193 continue;
194
Hollis Blanchardcc044542008-07-25 13:54:50 -0500195 if (eend < get_tlb_eaddr(stlbe))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500196 continue;
197
198 if (eaddr > get_tlb_end(stlbe))
199 continue;
200
201 tid = get_tlb_tid(stlbe);
202 if (tid && (tid != pid))
203 continue;
204
205 kvmppc_44x_shadow_release(vcpu, i);
206 stlbe->word0 = 0;
207 }
208 up_write(&current->mm->mmap_sem);
209}
210
211/* Invalidate all mappings, so that when they fault back in they will get the
212 * proper permission bits. */
213void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
214{
215 int i;
216
217 /* XXX Replace loop with fancy data structures. */
218 down_write(&current->mm->mmap_sem);
219 for (i = 0; i <= tlb_44x_hwater; i++) {
220 kvmppc_44x_shadow_release(vcpu, i);
221 vcpu->arch.shadow_tlb[i].word0 = 0;
222 }
223 up_write(&current->mm->mmap_sem);
224}