Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. |
| 3 | * |
| 4 | * Authors: |
| 5 | * Alexander Graf <agraf@suse.de> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License, version 2, as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program; if not, write to the Free Software |
| 18 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 19 | */ |
| 20 | |
| 21 | #include <linux/kvm_host.h> |
| 22 | |
| 23 | #include <asm/kvm_ppc.h> |
| 24 | #include <asm/kvm_book3s.h> |
| 25 | #include <asm/mmu-hash32.h> |
| 26 | #include <asm/machdep.h> |
| 27 | #include <asm/mmu_context.h> |
| 28 | #include <asm/hw_irq.h> |
| 29 | |
| 30 | /* #define DEBUG_MMU */ |
| 31 | /* #define DEBUG_SR */ |
| 32 | |
| 33 | #ifdef DEBUG_MMU |
| 34 | #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) |
| 35 | #else |
| 36 | #define dprintk_mmu(a, ...) do { } while(0) |
| 37 | #endif |
| 38 | |
| 39 | #ifdef DEBUG_SR |
| 40 | #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__) |
| 41 | #else |
| 42 | #define dprintk_sr(a, ...) do { } while(0) |
| 43 | #endif |
| 44 | |
| 45 | #if PAGE_SHIFT != 12 |
| 46 | #error Unknown page size |
| 47 | #endif |
| 48 | |
| 49 | #ifdef CONFIG_SMP |
| 50 | #error XXX need to grab mmu_hash_lock |
| 51 | #endif |
| 52 | |
| 53 | #ifdef CONFIG_PTE_64BIT |
| 54 | #error Only 32 bit pages are supported for now |
| 55 | #endif |
| 56 | |
Alexander Graf | 251585b | 2010-04-20 02:49:53 +0200 | [diff] [blame^] | 57 | static ulong htab; |
| 58 | static u32 htabmask; |
| 59 | |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 60 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
| 61 | { |
| 62 | volatile u32 *pteg; |
| 63 | |
| 64 | dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n", |
| 65 | pte->pte.eaddr, pte->pte.vpage, pte->host_va); |
| 66 | |
| 67 | pteg = (u32*)pte->slot; |
| 68 | |
| 69 | pteg[0] = 0; |
| 70 | asm volatile ("sync"); |
| 71 | asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); |
| 72 | asm volatile ("sync"); |
| 73 | asm volatile ("tlbsync"); |
| 74 | |
| 75 | pte->host_va = 0; |
| 76 | |
| 77 | if (pte->pte.may_write) |
| 78 | kvm_release_pfn_dirty(pte->pfn); |
| 79 | else |
| 80 | kvm_release_pfn_clean(pte->pfn); |
| 81 | } |
| 82 | |
Alexander Graf | af7b4d1 | 2010-04-20 02:49:46 +0200 | [diff] [blame] | 83 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 84 | { |
| 85 | int i; |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 86 | |
| 87 | dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n", |
| 88 | vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); |
| 89 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); |
| 90 | |
| 91 | guest_ea &= ea_mask; |
| 92 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { |
| 93 | struct hpte_cache *pte; |
| 94 | |
| 95 | pte = &vcpu->arch.hpte_cache[i]; |
| 96 | if (!pte->host_va) |
| 97 | continue; |
| 98 | |
| 99 | if ((pte->pte.eaddr & ea_mask) == guest_ea) { |
| 100 | invalidate_pte(vcpu, pte); |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | /* Doing a complete flush -> start from scratch */ |
| 105 | if (!ea_mask) |
| 106 | vcpu->arch.hpte_cache_offset = 0; |
| 107 | } |
| 108 | |
| 109 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) |
| 110 | { |
| 111 | int i; |
| 112 | |
| 113 | dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", |
| 114 | vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); |
| 115 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); |
| 116 | |
| 117 | guest_vp &= vp_mask; |
| 118 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { |
| 119 | struct hpte_cache *pte; |
| 120 | |
| 121 | pte = &vcpu->arch.hpte_cache[i]; |
| 122 | if (!pte->host_va) |
| 123 | continue; |
| 124 | |
| 125 | if ((pte->pte.vpage & vp_mask) == guest_vp) { |
| 126 | invalidate_pte(vcpu, pte); |
| 127 | } |
| 128 | } |
| 129 | } |
| 130 | |
Alexander Graf | af7b4d1 | 2010-04-20 02:49:46 +0200 | [diff] [blame] | 131 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 132 | { |
| 133 | int i; |
| 134 | |
| 135 | dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n", |
| 136 | vcpu->arch.hpte_cache_offset, pa_start, pa_end); |
| 137 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); |
| 138 | |
| 139 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { |
| 140 | struct hpte_cache *pte; |
| 141 | |
| 142 | pte = &vcpu->arch.hpte_cache[i]; |
| 143 | if (!pte->host_va) |
| 144 | continue; |
| 145 | |
| 146 | if ((pte->pte.raddr >= pa_start) && |
| 147 | (pte->pte.raddr < pa_end)) { |
| 148 | invalidate_pte(vcpu, pte); |
| 149 | } |
| 150 | } |
| 151 | } |
| 152 | |
| 153 | struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) |
| 154 | { |
| 155 | int i; |
| 156 | u64 guest_vp; |
| 157 | |
| 158 | guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); |
| 159 | for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { |
| 160 | struct hpte_cache *pte; |
| 161 | |
| 162 | pte = &vcpu->arch.hpte_cache[i]; |
| 163 | if (!pte->host_va) |
| 164 | continue; |
| 165 | |
| 166 | if (pte->pte.vpage == guest_vp) |
| 167 | return &pte->pte; |
| 168 | } |
| 169 | |
| 170 | return NULL; |
| 171 | } |
| 172 | |
| 173 | static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) |
| 174 | { |
| 175 | if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) |
| 176 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
| 177 | |
| 178 | return vcpu->arch.hpte_cache_offset++; |
| 179 | } |
| 180 | |
| 181 | /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using |
| 182 | * a hash, so we don't waste cycles on looping */ |
| 183 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) |
| 184 | { |
| 185 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ |
| 186 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ |
| 187 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ |
| 188 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ |
| 189 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ |
| 190 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ |
| 191 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ |
| 192 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); |
| 193 | } |
| 194 | |
| 195 | |
| 196 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) |
| 197 | { |
| 198 | struct kvmppc_sid_map *map; |
| 199 | u16 sid_map_mask; |
| 200 | |
| 201 | if (vcpu->arch.msr & MSR_PR) |
| 202 | gvsid |= VSID_PR; |
| 203 | |
| 204 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); |
| 205 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; |
| 206 | if (map->guest_vsid == gvsid) { |
| 207 | dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", |
| 208 | gvsid, map->host_vsid); |
| 209 | return map; |
| 210 | } |
| 211 | |
| 212 | map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; |
| 213 | if (map->guest_vsid == gvsid) { |
| 214 | dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", |
| 215 | gvsid, map->host_vsid); |
| 216 | return map; |
| 217 | } |
| 218 | |
| 219 | dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid); |
| 220 | return NULL; |
| 221 | } |
| 222 | |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 223 | static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, |
| 224 | bool primary) |
| 225 | { |
Alexander Graf | 251585b | 2010-04-20 02:49:53 +0200 | [diff] [blame^] | 226 | u32 page, hash; |
| 227 | ulong pteg = htab; |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 228 | |
| 229 | page = (eaddr & ~ESID_MASK) >> 12; |
| 230 | |
| 231 | hash = ((vsid ^ page) << 6); |
| 232 | if (!primary) |
| 233 | hash = ~hash; |
| 234 | |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 235 | hash &= htabmask; |
| 236 | |
| 237 | pteg |= hash; |
| 238 | |
Alexander Graf | 251585b | 2010-04-20 02:49:53 +0200 | [diff] [blame^] | 239 | dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n", |
| 240 | htab, hash, htabmask, pteg); |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 241 | |
| 242 | return (u32*)pteg; |
| 243 | } |
| 244 | |
| 245 | extern char etext[]; |
| 246 | |
| 247 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) |
| 248 | { |
| 249 | pfn_t hpaddr; |
| 250 | u64 va; |
| 251 | u64 vsid; |
| 252 | struct kvmppc_sid_map *map; |
| 253 | volatile u32 *pteg; |
| 254 | u32 eaddr = orig_pte->eaddr; |
| 255 | u32 pteg0, pteg1; |
| 256 | register int rr = 0; |
| 257 | bool primary = false; |
| 258 | bool evict = false; |
| 259 | int hpte_id; |
| 260 | struct hpte_cache *pte; |
| 261 | |
| 262 | /* Get host physical address for gpa */ |
| 263 | hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); |
| 264 | if (kvm_is_error_hva(hpaddr)) { |
Alexander Graf | af7b4d1 | 2010-04-20 02:49:46 +0200 | [diff] [blame] | 265 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 266 | orig_pte->eaddr); |
| 267 | return -EINVAL; |
| 268 | } |
| 269 | hpaddr <<= PAGE_SHIFT; |
| 270 | |
| 271 | /* and write the mapping ea -> hpa into the pt */ |
| 272 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); |
| 273 | map = find_sid_vsid(vcpu, vsid); |
| 274 | if (!map) { |
| 275 | kvmppc_mmu_map_segment(vcpu, eaddr); |
| 276 | map = find_sid_vsid(vcpu, vsid); |
| 277 | } |
| 278 | BUG_ON(!map); |
| 279 | |
| 280 | vsid = map->host_vsid; |
| 281 | va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); |
| 282 | |
| 283 | next_pteg: |
| 284 | if (rr == 16) { |
| 285 | primary = !primary; |
| 286 | evict = true; |
| 287 | rr = 0; |
| 288 | } |
| 289 | |
| 290 | pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); |
| 291 | |
| 292 | /* not evicting yet */ |
| 293 | if (!evict && (pteg[rr] & PTE_V)) { |
| 294 | rr += 2; |
| 295 | goto next_pteg; |
| 296 | } |
| 297 | |
| 298 | dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr); |
| 299 | dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); |
| 300 | dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); |
| 301 | dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); |
| 302 | dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); |
| 303 | dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); |
| 304 | dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); |
| 305 | dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); |
| 306 | dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); |
| 307 | |
| 308 | pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | |
| 309 | (primary ? 0 : PTE_SEC); |
| 310 | pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; |
| 311 | |
| 312 | if (orig_pte->may_write) { |
| 313 | pteg1 |= PP_RWRW; |
| 314 | mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); |
| 315 | } else { |
| 316 | pteg1 |= PP_RWRX; |
| 317 | } |
| 318 | |
| 319 | local_irq_disable(); |
| 320 | |
| 321 | if (pteg[rr]) { |
| 322 | pteg[rr] = 0; |
| 323 | asm volatile ("sync"); |
| 324 | } |
| 325 | pteg[rr + 1] = pteg1; |
| 326 | pteg[rr] = pteg0; |
| 327 | asm volatile ("sync"); |
| 328 | |
| 329 | local_irq_enable(); |
| 330 | |
| 331 | dprintk_mmu("KVM: new PTEG: %p\n", pteg); |
| 332 | dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); |
| 333 | dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); |
| 334 | dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); |
| 335 | dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); |
| 336 | dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); |
| 337 | dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); |
| 338 | dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); |
| 339 | dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); |
| 340 | |
| 341 | |
| 342 | /* Now tell our Shadow PTE code about the new page */ |
| 343 | |
| 344 | hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); |
| 345 | pte = &vcpu->arch.hpte_cache[hpte_id]; |
| 346 | |
| 347 | dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", |
| 348 | orig_pte->may_write ? 'w' : '-', |
| 349 | orig_pte->may_execute ? 'x' : '-', |
| 350 | orig_pte->eaddr, (ulong)pteg, va, |
| 351 | orig_pte->vpage, hpaddr); |
| 352 | |
| 353 | pte->slot = (ulong)&pteg[rr]; |
| 354 | pte->host_va = va; |
| 355 | pte->pte = *orig_pte; |
| 356 | pte->pfn = hpaddr >> PAGE_SHIFT; |
| 357 | |
| 358 | return 0; |
| 359 | } |
| 360 | |
| 361 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) |
| 362 | { |
| 363 | struct kvmppc_sid_map *map; |
| 364 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
| 365 | u16 sid_map_mask; |
| 366 | static int backwards_map = 0; |
| 367 | |
| 368 | if (vcpu->arch.msr & MSR_PR) |
| 369 | gvsid |= VSID_PR; |
| 370 | |
| 371 | /* We might get collisions that trap in preceding order, so let's |
| 372 | map them differently */ |
| 373 | |
| 374 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); |
| 375 | if (backwards_map) |
| 376 | sid_map_mask = SID_MAP_MASK - sid_map_mask; |
| 377 | |
| 378 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; |
| 379 | |
| 380 | /* Make sure we're taking the other map next time */ |
| 381 | backwards_map = !backwards_map; |
| 382 | |
| 383 | /* Uh-oh ... out of mappings. Let's flush! */ |
| 384 | if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) { |
| 385 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; |
| 386 | memset(vcpu_book3s->sid_map, 0, |
| 387 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); |
| 388 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
| 389 | kvmppc_mmu_flush_segments(vcpu); |
| 390 | } |
| 391 | map->host_vsid = vcpu_book3s->vsid_next; |
| 392 | |
| 393 | /* Would have to be 111 to be completely aligned with the rest of |
| 394 | Linux, but that is just way too little space! */ |
| 395 | vcpu_book3s->vsid_next+=1; |
| 396 | |
| 397 | map->guest_vsid = gvsid; |
| 398 | map->valid = true; |
| 399 | |
| 400 | return map; |
| 401 | } |
| 402 | |
| 403 | int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) |
| 404 | { |
| 405 | u32 esid = eaddr >> SID_SHIFT; |
| 406 | u64 gvsid; |
| 407 | u32 sr; |
| 408 | struct kvmppc_sid_map *map; |
| 409 | struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); |
| 410 | |
| 411 | if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { |
| 412 | /* Invalidate an entry */ |
| 413 | svcpu->sr[esid] = SR_INVALID; |
| 414 | return -ENOENT; |
| 415 | } |
| 416 | |
| 417 | map = find_sid_vsid(vcpu, gvsid); |
| 418 | if (!map) |
| 419 | map = create_sid_map(vcpu, gvsid); |
| 420 | |
| 421 | map->guest_esid = esid; |
| 422 | sr = map->host_vsid | SR_KP; |
| 423 | svcpu->sr[esid] = sr; |
| 424 | |
| 425 | dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); |
| 426 | |
| 427 | return 0; |
| 428 | } |
| 429 | |
| 430 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) |
| 431 | { |
| 432 | int i; |
| 433 | struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); |
| 434 | |
| 435 | dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); |
| 436 | for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) |
| 437 | svcpu->sr[i] = SR_INVALID; |
| 438 | } |
| 439 | |
| 440 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
| 441 | { |
| 442 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
| 443 | preempt_disable(); |
| 444 | __destroy_context(to_book3s(vcpu)->context_id); |
| 445 | preempt_enable(); |
| 446 | } |
| 447 | |
| 448 | /* From mm/mmu_context_hash32.c */ |
| 449 | #define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff) |
| 450 | |
| 451 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) |
| 452 | { |
| 453 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
| 454 | int err; |
Alexander Graf | 251585b | 2010-04-20 02:49:53 +0200 | [diff] [blame^] | 455 | ulong sdr1; |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 456 | |
| 457 | err = __init_new_context(); |
| 458 | if (err < 0) |
| 459 | return -1; |
| 460 | vcpu3s->context_id = err; |
| 461 | |
| 462 | vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1; |
| 463 | vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id); |
| 464 | |
| 465 | #if 0 /* XXX still doesn't guarantee uniqueness */ |
| 466 | /* We could collide with the Linux vsid space because the vsid |
| 467 | * wraps around at 24 bits. We're safe if we do our own space |
| 468 | * though, so let's always set the highest bit. */ |
| 469 | |
| 470 | vcpu3s->vsid_max |= 0x00800000; |
| 471 | vcpu3s->vsid_first |= 0x00800000; |
| 472 | #endif |
| 473 | BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first); |
| 474 | |
| 475 | vcpu3s->vsid_next = vcpu3s->vsid_first; |
| 476 | |
Alexander Graf | 251585b | 2010-04-20 02:49:53 +0200 | [diff] [blame^] | 477 | /* Remember where the HTAB is */ |
| 478 | asm ( "mfsdr1 %0" : "=r"(sdr1) ); |
| 479 | htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; |
| 480 | htab = (ulong)__va(sdr1 & 0xffff0000); |
| 481 | |
Alexander Graf | d32154f | 2010-04-16 00:11:33 +0200 | [diff] [blame] | 482 | return 0; |
| 483 | } |