blob: e2ab8a747fbe7d7ded25d80535ecd42ececfd855 [file] [log] [blame]
Alexander Graf0d8dc682009-10-30 05:47:11 +00001/*
2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22#include <linux/kvm_host.h>
23
24#include <asm/kvm_ppc.h>
25#include <asm/kvm_book3s.h>
26#include <asm/mmu-hash64.h>
27#include <asm/machdep.h>
28#include <asm/mmu_context.h>
29#include <asm/hw_irq.h>
Alexander Graf82fdee72010-08-02 11:38:54 +020030#include "trace.h"
Alexander Graf0d8dc682009-10-30 05:47:11 +000031
32#define PTE_SIZE 12
Alexander Graf0d8dc682009-10-30 05:47:11 +000033
Alexander Graffef093be2010-06-30 15:18:46 +020034void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
Alexander Graf0d8dc682009-10-30 05:47:11 +000035{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +000036 ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
Paul Mackerrasc9029c32013-09-20 14:52:45 +100037 pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
Alexander Graf0d8dc682009-10-30 05:47:11 +000038 false);
Alexander Graf0d8dc682009-10-30 05:47:11 +000039}
40
41/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
42 * a hash, so we don't waste cycles on looping */
43static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
44{
Alexander Grafb9877ce2010-08-02 21:48:53 +020045 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
46 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
47 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
48 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
49 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
50 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
51 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
52 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
Alexander Graf0d8dc682009-10-30 05:47:11 +000053}
54
Alexander Grafb9877ce2010-08-02 21:48:53 +020055
Alexander Graf0d8dc682009-10-30 05:47:11 +000056static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
57{
58 struct kvmppc_sid_map *map;
59 u16 sid_map_mask;
60
Alexander Graf666e7252010-07-29 14:47:43 +020061 if (vcpu->arch.shared->msr & MSR_PR)
Alexander Graf0d8dc682009-10-30 05:47:11 +000062 gvsid |= VSID_PR;
63
64 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
65 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
Alexander Grafc22c3192010-08-02 13:38:18 +020066 if (map->valid && (map->guest_vsid == gvsid)) {
Alexander Graf928d78b2010-08-02 21:25:33 +020067 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
Alexander Graf0d8dc682009-10-30 05:47:11 +000068 return map;
69 }
70
71 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
Alexander Grafc22c3192010-08-02 13:38:18 +020072 if (map->valid && (map->guest_vsid == gvsid)) {
Alexander Graf928d78b2010-08-02 21:25:33 +020073 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
Alexander Graf0d8dc682009-10-30 05:47:11 +000074 return map;
75 }
76
Alexander Graf928d78b2010-08-02 21:25:33 +020077 trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
Alexander Graf0d8dc682009-10-30 05:47:11 +000078 return NULL;
79}
80
Paul Mackerras93b159b2013-09-20 14:52:51 +100081int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
82 bool iswrite)
Alexander Graf0d8dc682009-10-30 05:47:11 +000083{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +000084 unsigned long vpn;
Alexander Graf0d8dc682009-10-30 05:47:11 +000085 pfn_t hpaddr;
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +000086 ulong hash, hpteg;
Alexander Graf0d8dc682009-10-30 05:47:11 +000087 u64 vsid;
88 int ret;
89 int rflags = 0x192;
90 int vflags = 0;
91 int attempt = 0;
92 struct kvmppc_sid_map *map;
Alexander Graf468a12c2011-12-09 14:44:13 +010093 int r = 0;
Paul Mackerrasc9029c32013-09-20 14:52:45 +100094 int hpsize = MMU_PAGE_4K;
Paul Mackerras93b159b2013-09-20 14:52:51 +100095 bool writable;
Paul Mackerrasd78bca72013-09-20 14:52:52 +100096 unsigned long mmu_seq;
97 struct kvm *kvm = vcpu->kvm;
98 struct hpte_cache *cpte;
Paul Mackerrasadc0baf2013-09-20 14:52:53 +100099 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
100 unsigned long pfn;
Paul Mackerrasd78bca72013-09-20 14:52:52 +1000101
102 /* used to check for invalidations in progress */
103 mmu_seq = kvm->mmu_notifier_seq;
104 smp_rmb();
Alexander Graf0d8dc682009-10-30 05:47:11 +0000105
106 /* Get host physical address for gpa */
Paul Mackerrasadc0baf2013-09-20 14:52:53 +1000107 pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
108 if (is_error_noslot_pfn(pfn)) {
109 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
Alexander Graf468a12c2011-12-09 14:44:13 +0100110 r = -EINVAL;
111 goto out;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000112 }
Paul Mackerrasadc0baf2013-09-20 14:52:53 +1000113 hpaddr = pfn << PAGE_SHIFT;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000114
115 /* and write the mapping ea -> hpa into the pt */
116 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
117 map = find_sid_vsid(vcpu, vsid);
118 if (!map) {
Alexander Grafac214672010-04-20 02:49:50 +0200119 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
120 WARN_ON(ret < 0);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000121 map = find_sid_vsid(vcpu, vsid);
122 }
Alexander Grafac214672010-04-20 02:49:50 +0200123 if (!map) {
124 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
125 vsid, orig_pte->eaddr);
126 WARN_ON(true);
Alexander Graf468a12c2011-12-09 14:44:13 +0100127 r = -EINVAL;
128 goto out;
Alexander Grafac214672010-04-20 02:49:50 +0200129 }
Alexander Graf0d8dc682009-10-30 05:47:11 +0000130
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000131 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000132
Paul Mackerrasadc0baf2013-09-20 14:52:53 +1000133 kvm_set_pfn_accessed(pfn);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000134 if (!orig_pte->may_write || !writable)
Paul Mackerrasadc0baf2013-09-20 14:52:53 +1000135 rflags |= PP_RXRX;
136 else {
137 mark_page_dirty(vcpu->kvm, gfn);
138 kvm_set_pfn_dirty(pfn);
139 }
Alexander Graf0d8dc682009-10-30 05:47:11 +0000140
141 if (!orig_pte->may_execute)
142 rflags |= HPTE_R_N;
Alexander Graf249ba1e2012-08-03 13:56:33 +0200143 else
Paul Mackerrasadc0baf2013-09-20 14:52:53 +1000144 kvmppc_mmu_flush_icache(pfn);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000145
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000146 /*
147 * Use 64K pages if possible; otherwise, on 64K page kernels,
148 * we need to transfer 4 more bits from guest real to host real addr.
149 */
150 if (vsid & VSID_64K)
151 hpsize = MMU_PAGE_64K;
152 else
153 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
154
155 hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000156
Paul Mackerrasd78bca72013-09-20 14:52:52 +1000157 cpte = kvmppc_mmu_hpte_cache_next(vcpu);
158
159 spin_lock(&kvm->mmu_lock);
160 if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
161 r = -EAGAIN;
162 goto out_unlock;
163 }
164
Alexander Graf0d8dc682009-10-30 05:47:11 +0000165map_again:
166 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
167
168 /* In case we tried normal mapping already, let's nuke old entries */
169 if (attempt > 1)
Alexander Graf468a12c2011-12-09 14:44:13 +0100170 if (ppc_md.hpte_remove(hpteg) < 0) {
171 r = -1;
Paul Mackerrasd78bca72013-09-20 14:52:52 +1000172 goto out_unlock;
Alexander Graf468a12c2011-12-09 14:44:13 +0100173 }
Alexander Graf0d8dc682009-10-30 05:47:11 +0000174
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000175 ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000176 hpsize, hpsize, MMU_SEGSIZE_256M);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000177
178 if (ret < 0) {
179 /* If we couldn't map a primary PTE, try a secondary */
Alexander Graf0d8dc682009-10-30 05:47:11 +0000180 hash = ~hash;
Alexander Graf20a340a2010-02-19 11:00:46 +0100181 vflags ^= HPTE_V_SECONDARY;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000182 attempt++;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000183 goto map_again;
184 } else {
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000185 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
186 vpn, hpaddr, orig_pte);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000187
Alexander Grafa1eda282010-03-24 21:48:34 +0100188 /* The ppc_md code may give us a secondary entry even though we
189 asked for a primary. Fix up. */
190 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
191 hash = ~hash;
192 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
193 }
194
Paul Mackerrasd78bca72013-09-20 14:52:52 +1000195 cpte->slot = hpteg + (ret & 7);
196 cpte->host_vpn = vpn;
197 cpte->pte = *orig_pte;
Paul Mackerrasadc0baf2013-09-20 14:52:53 +1000198 cpte->pfn = pfn;
Paul Mackerrasd78bca72013-09-20 14:52:52 +1000199 cpte->pagesize = hpsize;
Alexander Graffef093be2010-06-30 15:18:46 +0200200
Paul Mackerrasd78bca72013-09-20 14:52:52 +1000201 kvmppc_mmu_hpte_cache_map(vcpu, cpte);
202 cpte = NULL;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000203 }
Paul Mackerrasd78bca72013-09-20 14:52:52 +1000204
205out_unlock:
206 spin_unlock(&kvm->mmu_lock);
Paul Mackerrasadc0baf2013-09-20 14:52:53 +1000207 kvm_release_pfn_clean(pfn);
Paul Mackerrasd78bca72013-09-20 14:52:52 +1000208 if (cpte)
209 kvmppc_mmu_hpte_cache_free(cpte);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000210
Alexander Graf468a12c2011-12-09 14:44:13 +0100211out:
212 return r;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000213}
214
Paul Mackerras93b159b2013-09-20 14:52:51 +1000215void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
216{
217 u64 mask = 0xfffffffffULL;
218 u64 vsid;
219
220 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
221 if (vsid & VSID_64K)
222 mask = 0xffffffff0ULL;
223 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
224}
225
Alexander Graf0d8dc682009-10-30 05:47:11 +0000226static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
227{
228 struct kvmppc_sid_map *map;
229 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
230 u16 sid_map_mask;
231 static int backwards_map = 0;
232
Alexander Graf666e7252010-07-29 14:47:43 +0200233 if (vcpu->arch.shared->msr & MSR_PR)
Alexander Graf0d8dc682009-10-30 05:47:11 +0000234 gvsid |= VSID_PR;
235
236 /* We might get collisions that trap in preceding order, so let's
237 map them differently */
238
239 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
240 if (backwards_map)
241 sid_map_mask = SID_MAP_MASK - sid_map_mask;
242
243 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
244
245 /* Make sure we're taking the other map next time */
246 backwards_map = !backwards_map;
247
248 /* Uh-oh ... out of mappings. Let's flush! */
Benjamin Herrenschmidtffe36492012-03-23 11:21:14 +1100249 if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
250 vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000251 memset(vcpu_book3s->sid_map, 0,
252 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
253 kvmppc_mmu_pte_flush(vcpu, 0, 0);
254 kvmppc_mmu_flush_segments(vcpu);
255 }
Benjamin Herrenschmidtffe36492012-03-23 11:21:14 +1100256 map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000257
258 map->guest_vsid = gvsid;
259 map->valid = true;
260
Alexander Graf928d78b2010-08-02 21:25:33 +0200261 trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
Alexander Graf5156f272010-04-20 02:49:52 +0200262
Alexander Graf0d8dc682009-10-30 05:47:11 +0000263 return map;
264}
265
266static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
267{
Alexander Graf468a12c2011-12-09 14:44:13 +0100268 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000269 int i;
270 int max_slb_size = 64;
271 int found_inval = -1;
272 int r;
273
Alexander Graf468a12c2011-12-09 14:44:13 +0100274 if (!svcpu->slb_max)
275 svcpu->slb_max = 1;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000276
277 /* Are we overwriting? */
Alexander Graf468a12c2011-12-09 14:44:13 +0100278 for (i = 1; i < svcpu->slb_max; i++) {
279 if (!(svcpu->slb[i].esid & SLB_ESID_V))
Alexander Graf0d8dc682009-10-30 05:47:11 +0000280 found_inval = i;
Alexander Graf468a12c2011-12-09 14:44:13 +0100281 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
282 r = i;
283 goto out;
284 }
Alexander Graf0d8dc682009-10-30 05:47:11 +0000285 }
286
287 /* Found a spare entry that was invalidated before */
Alexander Graf468a12c2011-12-09 14:44:13 +0100288 if (found_inval > 0) {
289 r = found_inval;
290 goto out;
291 }
Alexander Graf0d8dc682009-10-30 05:47:11 +0000292
293 /* No spare invalid entry, so create one */
294
295 if (mmu_slb_size < 64)
296 max_slb_size = mmu_slb_size;
297
298 /* Overflowing -> purge */
Alexander Graf468a12c2011-12-09 14:44:13 +0100299 if ((svcpu->slb_max) == max_slb_size)
Alexander Graf0d8dc682009-10-30 05:47:11 +0000300 kvmppc_mmu_flush_segments(vcpu);
301
Alexander Graf468a12c2011-12-09 14:44:13 +0100302 r = svcpu->slb_max;
303 svcpu->slb_max++;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000304
Alexander Graf468a12c2011-12-09 14:44:13 +0100305out:
306 svcpu_put(svcpu);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000307 return r;
308}
309
310int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
311{
Alexander Graf468a12c2011-12-09 14:44:13 +0100312 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000313 u64 esid = eaddr >> SID_SHIFT;
314 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
315 u64 slb_vsid = SLB_VSID_USER;
316 u64 gvsid;
317 int slb_index;
318 struct kvmppc_sid_map *map;
Alexander Graf468a12c2011-12-09 14:44:13 +0100319 int r = 0;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000320
321 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
322
323 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
324 /* Invalidate an entry */
Alexander Graf468a12c2011-12-09 14:44:13 +0100325 svcpu->slb[slb_index].esid = 0;
326 r = -ENOENT;
327 goto out;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000328 }
329
330 map = find_sid_vsid(vcpu, gvsid);
331 if (!map)
332 map = create_sid_map(vcpu, gvsid);
333
334 map->guest_esid = esid;
335
336 slb_vsid |= (map->host_vsid << 12);
337 slb_vsid &= ~SLB_VSID_KP;
338 slb_esid |= slb_index;
339
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000340#ifdef CONFIG_PPC_64K_PAGES
341 /* Set host segment base page size to 64K if possible */
342 if (gvsid & VSID_64K)
343 slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
344#endif
345
Alexander Graf468a12c2011-12-09 14:44:13 +0100346 svcpu->slb[slb_index].esid = slb_esid;
347 svcpu->slb[slb_index].vsid = slb_vsid;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000348
Alexander Graf928d78b2010-08-02 21:25:33 +0200349 trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000350
Alexander Graf468a12c2011-12-09 14:44:13 +0100351out:
352 svcpu_put(svcpu);
353 return r;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000354}
355
Paul Mackerras0f296822013-06-22 17:16:32 +1000356void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
357{
358 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
359 ulong seg_mask = -seg_size;
360 int i;
361
362 for (i = 1; i < svcpu->slb_max; i++) {
363 if ((svcpu->slb[i].esid & SLB_ESID_V) &&
364 (svcpu->slb[i].esid & seg_mask) == ea) {
365 /* Invalidate this entry */
366 svcpu->slb[i].esid = 0;
367 }
368 }
369
370 svcpu_put(svcpu);
371}
372
Alexander Graf0d8dc682009-10-30 05:47:11 +0000373void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
374{
Alexander Graf468a12c2011-12-09 14:44:13 +0100375 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
376 svcpu->slb_max = 1;
377 svcpu->slb[0].esid = 0;
378 svcpu_put(svcpu);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000379}
380
381void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
382{
Alexander Graffef093be2010-06-30 15:18:46 +0200383 kvmppc_mmu_hpte_destroy(vcpu);
Alexander Graf8b6db3b2010-08-15 08:04:24 +0200384 __destroy_context(to_book3s(vcpu)->context_id[0]);
Alexander Graf9cc5e952010-04-16 00:11:45 +0200385}
386
387int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
388{
389 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
390 int err;
391
392 err = __init_new_context();
393 if (err < 0)
394 return -1;
Alexander Graf8b6db3b2010-08-15 08:04:24 +0200395 vcpu3s->context_id[0] = err;
Alexander Graf9cc5e952010-04-16 00:11:45 +0200396
Paul Mackerras8ed7b7e2013-06-22 17:13:32 +1000397 vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
Aneesh Kumar K.Vaf81d782013-03-13 03:34:55 +0000398 << ESID_BITS) - 1;
Paul Mackerras8ed7b7e2013-06-22 17:13:32 +1000399 vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
Benjamin Herrenschmidtffe36492012-03-23 11:21:14 +1100400 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
Alexander Graf9cc5e952010-04-16 00:11:45 +0200401
Alexander Graffef093be2010-06-30 15:18:46 +0200402 kvmppc_mmu_hpte_init(vcpu);
403
Alexander Graf9cc5e952010-04-16 00:11:45 +0200404 return 0;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000405}