blob: cf1d325eae8be814953650cf6b94fd349c0fdd12 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * native hashtable management.
3 *
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110012
13#undef DEBUG_LOW
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/spinlock.h>
16#include <linux/bitops.h>
Michael Ellermanbeacc6d2012-07-25 21:20:03 +000017#include <linux/of.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/threads.h>
19#include <linux/smp.h>
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/machdep.h>
22#include <asm/mmu.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/tlbflush.h>
26#include <asm/tlb.h>
27#include <asm/cputable.h>
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110028#include <asm/udbg.h>
Luke Browning71bf08b2007-05-03 00:19:11 +100029#include <asm/kexec.h>
Milton Miller60dbf432009-04-29 20:58:01 +000030#include <asm/ppc-opcode.h>
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110031
32#ifdef DEBUG_LOW
33#define DBG_LOW(fmt...) udbg_printf(fmt)
34#else
35#define DBG_LOW(fmt...)
36#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Anton Blanchard12f04f22013-09-23 12:04:36 +100038#ifdef __BIG_ENDIAN__
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#define HPTE_LOCK_BIT 3
Anton Blanchard12f04f22013-09-23 12:04:36 +100040#else
41#define HPTE_LOCK_BIT (56+3)
42#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Paul Mackerras9e368f22011-06-29 00:40:08 +000044DEFINE_RAW_SPINLOCK(native_tlbie_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +000046static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110047{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +000048 unsigned long va;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110049 unsigned int penc;
Aneesh Kumar K.Vde640952013-07-04 10:34:45 +053050 unsigned long sllp;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110051
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +000052 /*
53 * We need 14 to 65 bits of va for a tlibe of 4K page
54 * With vpn we ignore the lower VPN_SHIFT bits already.
55 * And top two bits are already ignored because we can
56 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
57 * of 12.
58 */
59 va = vpn << VPN_SHIFT;
60 /*
61 * clear top 16 bits of 64bit va, non SLS segment
62 * Older versions of the architecture (2.02 and earler) require the
63 * masking of the top 16 bits.
64 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110065 va &= ~(0xffffULL << 48);
66
67 switch (psize) {
68 case MMU_PAGE_4K:
Aneesh Kumar K.V1f6aaac2013-04-28 09:37:39 +000069 /* clear out bits after (52) [0....52.....63] */
70 va &= ~((1ul << (64 - 52)) - 1);
Paul Mackerras1189be62007-10-11 20:37:10 +100071 va |= ssize << 8;
Aneesh Kumar K.Vde640952013-07-04 10:34:45 +053072 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
73 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
74 va |= sllp << 5;
Michael Neulinga32e2522011-04-06 18:23:29 +000075 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
Paul Mackerras969391c2011-06-29 00:26:11 +000076 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
Milton Miller60dbf432009-04-29 20:58:01 +000077 : "memory");
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110078 break;
79 default:
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +000080 /* We need 14 to 14 + i bits of va */
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +000081 penc = mmu_psize_defs[psize].penc[apsize];
Aneesh Kumar K.V1f6aaac2013-04-28 09:37:39 +000082 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
Arnd Bergmann19242b22006-06-15 21:15:44 +100083 va |= penc << 12;
Paul Mackerras1189be62007-10-11 20:37:10 +100084 va |= ssize << 8;
Aneesh Kumar K.V29ef7a32014-04-21 10:37:36 +053085 /*
86 * AVAL bits:
87 * We don't need all the bits, but rest of the bits
88 * must be ignored by the processor.
89 * vpn cover upto 65 bits of va. (0...65) and we need
90 * 58..64 bits of va.
91 */
92 va |= (vpn & 0xfe); /* AVAL */
Milton Miller60dbf432009-04-29 20:58:01 +000093 va |= 1; /* L */
Michael Neulinga32e2522011-04-06 18:23:29 +000094 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
Paul Mackerras969391c2011-06-29 00:26:11 +000095 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
Milton Miller60dbf432009-04-29 20:58:01 +000096 : "memory");
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110097 break;
98 }
99}
100
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000101static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100102{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000103 unsigned long va;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100104 unsigned int penc;
Aneesh Kumar K.Vde640952013-07-04 10:34:45 +0530105 unsigned long sllp;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100106
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000107 /* VPN_SHIFT can be atmost 12 */
108 va = vpn << VPN_SHIFT;
109 /*
110 * clear top 16 bits of 64 bit va, non SLS segment
111 * Older versions of the architecture (2.02 and earler) require the
112 * masking of the top 16 bits.
113 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100114 va &= ~(0xffffULL << 48);
115
116 switch (psize) {
117 case MMU_PAGE_4K:
Aneesh Kumar K.V1f6aaac2013-04-28 09:37:39 +0000118 /* clear out bits after(52) [0....52.....63] */
119 va &= ~((1ul << (64 - 52)) - 1);
Paul Mackerras1189be62007-10-11 20:37:10 +1000120 va |= ssize << 8;
Aneesh Kumar K.Vde640952013-07-04 10:34:45 +0530121 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
122 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
123 va |= sllp << 5;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100124 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
125 : : "r"(va) : "memory");
126 break;
127 default:
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000128 /* We need 14 to 14 + i bits of va */
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000129 penc = mmu_psize_defs[psize].penc[apsize];
Aneesh Kumar K.V1f6aaac2013-04-28 09:37:39 +0000130 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
Arnd Bergmann19242b22006-06-15 21:15:44 +1000131 va |= penc << 12;
Paul Mackerras1189be62007-10-11 20:37:10 +1000132 va |= ssize << 8;
Aneesh Kumar K.V29ef7a32014-04-21 10:37:36 +0530133 /*
134 * AVAL bits:
135 * We don't need all the bits, but rest of the bits
136 * must be ignored by the processor.
137 * vpn cover upto 65 bits of va. (0...65) and we need
138 * 58..64 bits of va.
139 */
140 va |= (vpn & 0xfe);
Milton Miller60dbf432009-04-29 20:58:01 +0000141 va |= 1; /* L */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100142 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
143 : : "r"(va) : "memory");
144 break;
145 }
146
147}
148
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000149static inline void tlbie(unsigned long vpn, int psize, int apsize,
150 int ssize, int local)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100151{
Matt Evans44ae3ab2011-04-06 19:48:50 +0000152 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
153 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100154
155 if (use_local)
156 use_local = mmu_psize_defs[psize].tlbiel;
157 if (lock_tlbie && !use_local)
Thomas Gleixner6b9c9b82010-02-18 02:22:35 +0000158 raw_spin_lock(&native_tlbie_lock);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100159 asm volatile("ptesync": : :"memory");
160 if (use_local) {
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000161 __tlbiel(vpn, psize, apsize, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100162 asm volatile("ptesync": : :"memory");
163 } else {
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000164 __tlbie(vpn, psize, apsize, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100165 asm volatile("eieio; tlbsync; ptesync": : :"memory");
166 }
167 if (lock_tlbie && !use_local)
Thomas Gleixner6b9c9b82010-02-18 02:22:35 +0000168 raw_spin_unlock(&native_tlbie_lock);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100169}
170
David Gibson8e561e72007-06-13 14:52:56 +1000171static inline void native_lock_hpte(struct hash_pte *hptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
Anton Blanchard12f04f22013-09-23 12:04:36 +1000173 unsigned long *word = (unsigned long *)&hptep->v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
175 while (1) {
Anton Blanchard66d99b82010-02-10 01:03:06 +0000176 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 break;
178 while(test_bit(HPTE_LOCK_BIT, word))
179 cpu_relax();
180 }
181}
182
David Gibson8e561e72007-06-13 14:52:56 +1000183static inline void native_unlock_hpte(struct hash_pte *hptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Anton Blanchard12f04f22013-09-23 12:04:36 +1000185 unsigned long *word = (unsigned long *)&hptep->v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Anton Blanchard66d99b82010-02-10 01:03:06 +0000187 clear_bit_unlock(HPTE_LOCK_BIT, word);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000190static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100191 unsigned long pa, unsigned long rflags,
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000192 unsigned long vflags, int psize, int apsize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
David Gibson8e561e72007-06-13 14:52:56 +1000194 struct hash_pte *hptep = htab_address + hpte_group;
David Gibson96e28442005-07-13 01:11:42 -0700195 unsigned long hpte_v, hpte_r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 int i;
197
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100198 if (!(vflags & HPTE_V_BOLTED)) {
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000199 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100200 " rflags=%lx, vflags=%lx, psize=%d)\n",
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000201 hpte_group, vpn, pa, rflags, vflags, psize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100202 }
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 for (i = 0; i < HPTES_PER_GROUP; i++) {
Anton Blanchard12f04f22013-09-23 12:04:36 +1000205 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 /* retry with lock held */
207 native_lock_hpte(hptep);
Anton Blanchard12f04f22013-09-23 12:04:36 +1000208 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 break;
210 native_unlock_hpte(hptep);
211 }
212
213 hptep++;
214 }
215
216 if (i == HPTES_PER_GROUP)
217 return -1;
218
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000219 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
220 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100221
222 if (!(vflags & HPTE_V_BOLTED)) {
223 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
224 i, hpte_v, hpte_r);
225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Anton Blanchard12f04f22013-09-23 12:04:36 +1000227 hptep->r = cpu_to_be64(hpte_r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 /* Guarantee the second dword is visible before the valid bit */
Kumar Gala74a0ba62007-07-09 23:49:09 -0500229 eieio();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 /*
231 * Now set the first dword including the valid bit
232 * NOTE: this also unlocks the hpte
233 */
Anton Blanchard12f04f22013-09-23 12:04:36 +1000234 hptep->v = cpu_to_be64(hpte_v);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
236 __asm__ __volatile__ ("ptesync" : : : "memory");
237
David Gibson96e28442005-07-13 01:11:42 -0700238 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239}
240
241static long native_hpte_remove(unsigned long hpte_group)
242{
David Gibson8e561e72007-06-13 14:52:56 +1000243 struct hash_pte *hptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 int i;
245 int slot_offset;
David Gibson96e28442005-07-13 01:11:42 -0700246 unsigned long hpte_v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100248 DBG_LOW(" remove(group=%lx)\n", hpte_group);
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 /* pick a random entry to start at */
251 slot_offset = mftb() & 0x7;
252
253 for (i = 0; i < HPTES_PER_GROUP; i++) {
254 hptep = htab_address + hpte_group + slot_offset;
Anton Blanchard12f04f22013-09-23 12:04:36 +1000255 hpte_v = be64_to_cpu(hptep->v);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
David Gibson96e28442005-07-13 01:11:42 -0700257 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 /* retry with lock held */
259 native_lock_hpte(hptep);
Anton Blanchard12f04f22013-09-23 12:04:36 +1000260 hpte_v = be64_to_cpu(hptep->v);
David Gibson96e28442005-07-13 01:11:42 -0700261 if ((hpte_v & HPTE_V_VALID)
262 && !(hpte_v & HPTE_V_BOLTED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 break;
264 native_unlock_hpte(hptep);
265 }
266
267 slot_offset++;
268 slot_offset &= 0x7;
269 }
270
271 if (i == HPTES_PER_GROUP)
272 return -1;
273
274 /* Invalidate the hpte. NOTE: this also unlocks it */
David Gibson96e28442005-07-13 01:11:42 -0700275 hptep->v = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277 return i;
278}
279
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100280static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530281 unsigned long vpn, int bpsize,
282 int apsize, int ssize, int local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283{
David Gibson8e561e72007-06-13 14:52:56 +1000284 struct hash_pte *hptep = htab_address + slot;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100285 unsigned long hpte_v, want_v;
286 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530288 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100289
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000290 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
291 vpn, want_v & HPTE_V_AVPN, slot, newpp);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100292
293 native_lock_hpte(hptep);
294
Anton Blanchard12f04f22013-09-23 12:04:36 +1000295 hpte_v = be64_to_cpu(hptep->v);
Aneesh Kumar K.V0608d692013-05-31 01:03:24 +0000296 /*
297 * We need to invalidate the TLB always because hpte_remove doesn't do
298 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
299 * random entry from it. When we do that we don't invalidate the TLB
300 * (hpte_remove) because we assume the old translation is still
301 * technically "valid".
302 */
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530303 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100304 DBG_LOW(" -> miss\n");
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100305 ret = -1;
306 } else {
307 DBG_LOW(" -> hit\n");
308 /* Update the HPTE */
Anton Blanchard12f04f22013-09-23 12:04:36 +1000309 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) |
310 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)));
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100311 }
Jon Tollefson3f1df7a2007-05-18 04:49:22 +1000312 native_unlock_hpte(hptep);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100313
314 /* Ensure it is out of the tlb too. */
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530315 tlbie(vpn, bpsize, apsize, ssize, local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100316
317 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000320static long native_hpte_find(unsigned long vpn, int psize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
David Gibson8e561e72007-06-13 14:52:56 +1000322 struct hash_pte *hptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 unsigned long hash;
Paul Mackerras1189be62007-10-11 20:37:10 +1000324 unsigned long i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 long slot;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100326 unsigned long want_v, hpte_v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000328 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
Aneesh Kumar K.V74f227b2013-04-28 09:37:34 +0000329 want_v = hpte_encode_avpn(vpn, psize, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Paul Mackerras1189be62007-10-11 20:37:10 +1000331 /* Bolted mappings are only ever in the primary group */
332 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
333 for (i = 0; i < HPTES_PER_GROUP; i++) {
334 hptep = htab_address + slot;
Anton Blanchard12f04f22013-09-23 12:04:36 +1000335 hpte_v = be64_to_cpu(hptep->v);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Paul Mackerras1189be62007-10-11 20:37:10 +1000337 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
338 /* HPTE matches */
339 return slot;
340 ++slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 }
342
343 return -1;
344}
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346/*
347 * Update the page protection bits. Intended to be used to create
348 * guard pages for kernel data structures on pages which are bolted
349 * in the HPT. Assumes pages being operated on will not be stolen.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 *
351 * No need to lock here because we should be the only user.
352 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100353static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
Paul Mackerras1189be62007-10-11 20:37:10 +1000354 int psize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000356 unsigned long vpn;
357 unsigned long vsid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 long slot;
David Gibson8e561e72007-06-13 14:52:56 +1000359 struct hash_pte *hptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
Paul Mackerras1189be62007-10-11 20:37:10 +1000361 vsid = get_kernel_vsid(ea, ssize);
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000362 vpn = hpt_vpn(ea, vsid, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000364 slot = native_hpte_find(vpn, psize, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 if (slot == -1)
366 panic("could not find page to bolt\n");
367 hptep = htab_address + slot;
368
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100369 /* Update the HPTE */
Anton Blanchard12f04f22013-09-23 12:04:36 +1000370 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
371 ~(HPTE_R_PP | HPTE_R_N)) |
372 (newpp & (HPTE_R_PP | HPTE_R_N)));
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530373 /*
374 * Ensure it is out of the tlb too. Bolted entries base and
375 * actual page size will be same.
376 */
377 tlbie(vpn, psize, psize, ssize, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000380static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530381 int bpsize, int apsize, int ssize, int local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
David Gibson8e561e72007-06-13 14:52:56 +1000383 struct hash_pte *hptep = htab_address + slot;
David Gibson96e28442005-07-13 01:11:42 -0700384 unsigned long hpte_v;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100385 unsigned long want_v;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000390 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100391
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530392 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100393 native_lock_hpte(hptep);
Anton Blanchard12f04f22013-09-23 12:04:36 +1000394 hpte_v = be64_to_cpu(hptep->v);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Aneesh Kumar K.V0608d692013-05-31 01:03:24 +0000396 /*
397 * We need to invalidate the TLB always because hpte_remove doesn't do
398 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
399 * random entry from it. When we do that we don't invalidate the TLB
400 * (hpte_remove) because we assume the old translation is still
401 * technically "valid".
402 */
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530403 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 native_unlock_hpte(hptep);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100405 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 /* Invalidate the hpte. NOTE: this also unlocks it */
David Gibson96e28442005-07-13 01:11:42 -0700407 hptep->v = 0;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100408
409 /* Invalidate the TLB */
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530410 tlbie(vpn, bpsize, apsize, ssize, local);
411
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100412 local_irq_restore(flags);
413}
414
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530415static void native_hugepage_invalidate(struct mm_struct *mm,
416 unsigned char *hpte_slot_array,
417 unsigned long addr, int psize)
418{
419 int ssize = 0, i;
420 int lock_tlbie;
421 struct hash_pte *hptep;
422 int actual_psize = MMU_PAGE_16M;
423 unsigned int max_hpte_count, valid;
424 unsigned long flags, s_addr = addr;
425 unsigned long hpte_v, want_v, shift;
426 unsigned long hidx, vpn = 0, vsid, hash, slot;
427
428 shift = mmu_psize_defs[psize].shift;
429 max_hpte_count = 1U << (PMD_SHIFT - shift);
430
431 local_irq_save(flags);
432 for (i = 0; i < max_hpte_count; i++) {
433 valid = hpte_valid(hpte_slot_array, i);
434 if (!valid)
435 continue;
436 hidx = hpte_hash_index(hpte_slot_array, i);
437
438 /* get the vpn */
439 addr = s_addr + (i * (1ul << shift));
440 if (!is_kernel_addr(addr)) {
441 ssize = user_segment_size(addr);
442 vsid = get_vsid(mm->context.id, addr, ssize);
443 WARN_ON(vsid == 0);
444 } else {
445 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
446 ssize = mmu_kernel_ssize;
447 }
448
449 vpn = hpt_vpn(addr, vsid, ssize);
450 hash = hpt_hash(vpn, shift, ssize);
451 if (hidx & _PTEIDX_SECONDARY)
452 hash = ~hash;
453
454 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
455 slot += hidx & _PTEIDX_GROUP_IX;
456
457 hptep = htab_address + slot;
458 want_v = hpte_encode_avpn(vpn, psize, ssize);
459 native_lock_hpte(hptep);
Anton Blanchard12f04f22013-09-23 12:04:36 +1000460 hpte_v = be64_to_cpu(hptep->v);
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530461
462 /* Even if we miss, we need to invalidate the TLB */
463 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
464 native_unlock_hpte(hptep);
465 else
466 /* Invalidate the hpte. NOTE: this also unlocks it */
467 hptep->v = 0;
468 }
469 /*
470 * Since this is a hugepage, we just need a single tlbie.
471 * use the last vpn.
472 */
473 lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
474 if (lock_tlbie)
475 raw_spin_lock(&native_tlbie_lock);
476
477 asm volatile("ptesync":::"memory");
478 __tlbie(vpn, psize, actual_psize, ssize);
479 asm volatile("eieio; tlbsync; ptesync":::"memory");
480
481 if (lock_tlbie)
482 raw_spin_unlock(&native_tlbie_lock);
483
484 local_irq_restore(flags);
485}
486
Aneesh Kumar K.Vdb3d8532013-06-20 14:30:13 +0530487static inline int __hpte_actual_psize(unsigned int lp, int psize)
488{
489 int i, shift;
490 unsigned int mask;
491
492 /* start from 1 ignoring MMU_PAGE_4K */
493 for (i = 1; i < MMU_PAGE_COUNT; i++) {
494
495 /* invalid penc */
496 if (mmu_psize_defs[psize].penc[i] == -1)
497 continue;
498 /*
499 * encoding bits per actual page size
500 * PTE LP actual page size
501 * rrrr rrrz >=8KB
502 * rrrr rrzz >=16KB
503 * rrrr rzzz >=32KB
504 * rrrr zzzz >=64KB
505 * .......
506 */
507 shift = mmu_psize_defs[i].shift - LP_SHIFT;
508 if (shift > LP_BITS)
509 shift = LP_BITS;
510 mask = (1 << shift) - 1;
511 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
512 return i;
513 }
514 return -1;
515}
516
David Gibson8e561e72007-06-13 14:52:56 +1000517static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000518 int *psize, int *apsize, int *ssize, unsigned long *vpn)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100519{
Aneesh Kumar K.Vdcda2872012-09-10 02:52:49 +0000520 unsigned long avpn, pteg, vpi;
Anton Blanchard12f04f22013-09-23 12:04:36 +1000521 unsigned long hpte_v = be64_to_cpu(hpte->v);
522 unsigned long hpte_r = be64_to_cpu(hpte->r);
Aneesh Kumar K.Vdcda2872012-09-10 02:52:49 +0000523 unsigned long vsid, seg_off;
Aneesh Kumar K.V7e74c392013-04-28 09:37:36 +0000524 int size, a_size, shift;
525 /* Look at the 8 bit LP value */
Anton Blanchard12f04f22013-09-23 12:04:36 +1000526 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100527
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000528 if (!(hpte_v & HPTE_V_LARGE)) {
529 size = MMU_PAGE_4K;
530 a_size = MMU_PAGE_4K;
531 } else {
Luke Browning71bf08b2007-05-03 00:19:11 +1000532 for (size = 0; size < MMU_PAGE_COUNT; size++) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100533
Luke Browning71bf08b2007-05-03 00:19:11 +1000534 /* valid entries have a shift value */
535 if (!mmu_psize_defs[size].shift)
536 continue;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100537
Aneesh Kumar K.V7e74c392013-04-28 09:37:36 +0000538 a_size = __hpte_actual_psize(lp, size);
539 if (a_size != -1)
540 break;
Luke Browning71bf08b2007-05-03 00:19:11 +1000541 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 }
Paul Mackerras2454c7e2007-05-10 15:28:44 +1000543 /* This works for all page sizes, and for 256M and 1T segments */
Paul Mackerras1189be62007-10-11 20:37:10 +1000544 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
Aneesh Kumar K.Vdcda2872012-09-10 02:52:49 +0000545 shift = mmu_psize_defs[size].shift;
546
547 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
548 pteg = slot / HPTES_PER_GROUP;
549 if (hpte_v & HPTE_V_SECONDARY)
550 pteg = ~pteg;
551
552 switch (*ssize) {
553 case MMU_SEGSIZE_256M:
554 /* We only have 28 - 23 bits of seg_off in avpn */
555 seg_off = (avpn & 0x1f) << 23;
556 vsid = avpn >> 5;
557 /* We can find more bits from the pteg value */
558 if (shift < 23) {
559 vpi = (vsid ^ pteg) & htab_hash_mask;
560 seg_off |= vpi << shift;
561 }
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000562 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
Aneesh Kumar K.V83383b72013-07-03 13:50:03 +0530563 break;
Aneesh Kumar K.Vdcda2872012-09-10 02:52:49 +0000564 case MMU_SEGSIZE_1T:
565 /* We only have 40 - 23 bits of seg_off in avpn */
566 seg_off = (avpn & 0x1ffff) << 23;
567 vsid = avpn >> 17;
568 if (shift < 23) {
569 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
570 seg_off |= vpi << shift;
571 }
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000572 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
Aneesh Kumar K.V83383b72013-07-03 13:50:03 +0530573 break;
Aneesh Kumar K.Vdcda2872012-09-10 02:52:49 +0000574 default:
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000575 *vpn = size = 0;
Aneesh Kumar K.Vdcda2872012-09-10 02:52:49 +0000576 }
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000577 *psize = size;
578 *apsize = a_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579}
580
R Sharadaf4c82d52005-06-25 14:58:08 -0700581/*
582 * clear all mappings on kexec. All cpus are in real mode (or they will
583 * be when they isi), and we are the only one left. We rely on our kernel
584 * mapping being 0xC0's and the hardware ignoring those two real bits.
585 *
586 * TODO: add batching support when enabled. remember, no dynamic memory here,
587 * athough there is the control page available...
588 */
589static void native_hpte_clear(void)
590{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000591 unsigned long vpn = 0;
R Sharadaf4c82d52005-06-25 14:58:08 -0700592 unsigned long slot, slots, flags;
David Gibson8e561e72007-06-13 14:52:56 +1000593 struct hash_pte *hptep = htab_address;
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000594 unsigned long hpte_v;
R Sharadaf4c82d52005-06-25 14:58:08 -0700595 unsigned long pteg_count;
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000596 int psize, apsize, ssize;
R Sharadaf4c82d52005-06-25 14:58:08 -0700597
598 pteg_count = htab_hash_mask + 1;
599
600 local_irq_save(flags);
601
602 /* we take the tlbie lock and hold it. Some hardware will
603 * deadlock if we try to tlbie from two processors at once.
604 */
Thomas Gleixner6b9c9b82010-02-18 02:22:35 +0000605 raw_spin_lock(&native_tlbie_lock);
R Sharadaf4c82d52005-06-25 14:58:08 -0700606
607 slots = pteg_count * HPTES_PER_GROUP;
608
609 for (slot = 0; slot < slots; slot++, hptep++) {
610 /*
611 * we could lock the pte here, but we are the only cpu
612 * running, right? and for crash dump, we probably
613 * don't want to wait for a maybe bad cpu.
614 */
Anton Blanchard12f04f22013-09-23 12:04:36 +1000615 hpte_v = be64_to_cpu(hptep->v);
R Sharadaf4c82d52005-06-25 14:58:08 -0700616
R Sharada47f78a42006-02-22 21:43:08 +0530617 /*
618 * Call __tlbie() here rather than tlbie() since we
619 * already hold the native_tlbie_lock.
620 */
David Gibson96e28442005-07-13 01:11:42 -0700621 if (hpte_v & HPTE_V_VALID) {
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000622 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
David Gibson96e28442005-07-13 01:11:42 -0700623 hptep->v = 0;
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000624 __tlbie(vpn, psize, apsize, ssize);
R Sharadaf4c82d52005-06-25 14:58:08 -0700625 }
626 }
627
R Sharada47f78a42006-02-22 21:43:08 +0530628 asm volatile("eieio; tlbsync; ptesync":::"memory");
Thomas Gleixner6b9c9b82010-02-18 02:22:35 +0000629 raw_spin_unlock(&native_tlbie_lock);
R Sharadaf4c82d52005-06-25 14:58:08 -0700630 local_irq_restore(flags);
631}
632
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100633/*
634 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
635 * the lock all the time
636 */
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +1000637static void native_flush_hash_range(unsigned long number, int local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000639 unsigned long vpn;
640 unsigned long hash, index, hidx, shift, slot;
David Gibson8e561e72007-06-13 14:52:56 +1000641 struct hash_pte *hptep;
David Gibson96e28442005-07-13 01:11:42 -0700642 unsigned long hpte_v;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100643 unsigned long want_v;
644 unsigned long flags;
645 real_pte_t pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100647 unsigned long psize = batch->psize;
Paul Mackerras1189be62007-10-11 20:37:10 +1000648 int ssize = batch->ssize;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100649 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
651 local_irq_save(flags);
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 for (i = 0; i < number; i++) {
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000654 vpn = batch->vpn[i];
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100655 pte = batch->pte[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000657 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
658 hash = hpt_hash(vpn, shift, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100659 hidx = __rpte_to_hidx(pte, index);
660 if (hidx & _PTEIDX_SECONDARY)
661 hash = ~hash;
662 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
663 slot += hidx & _PTEIDX_GROUP_IX;
664 hptep = htab_address + slot;
Aneesh Kumar K.V74f227b2013-04-28 09:37:34 +0000665 want_v = hpte_encode_avpn(vpn, psize, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100666 native_lock_hpte(hptep);
Anton Blanchard12f04f22013-09-23 12:04:36 +1000667 hpte_v = be64_to_cpu(hptep->v);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100668 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
669 !(hpte_v & HPTE_V_VALID))
670 native_unlock_hpte(hptep);
671 else
672 hptep->v = 0;
673 } pte_iterate_hashed_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 }
675
Matt Evans44ae3ab2011-04-06 19:48:50 +0000676 if (mmu_has_feature(MMU_FTR_TLBIEL) &&
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100677 mmu_psize_defs[psize].tlbiel && local) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 asm volatile("ptesync":::"memory");
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100679 for (i = 0; i < number; i++) {
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000680 vpn = batch->vpn[i];
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100681 pte = batch->pte[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000683 pte_iterate_hashed_subpages(pte, psize,
684 vpn, index, shift) {
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000685 __tlbiel(vpn, psize, psize, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100686 } pte_iterate_hashed_end();
687 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 asm volatile("ptesync":::"memory");
689 } else {
Matt Evans44ae3ab2011-04-06 19:48:50 +0000690 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692 if (lock_tlbie)
Thomas Gleixner6b9c9b82010-02-18 02:22:35 +0000693 raw_spin_lock(&native_tlbie_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695 asm volatile("ptesync":::"memory");
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100696 for (i = 0; i < number; i++) {
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000697 vpn = batch->vpn[i];
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100698 pte = batch->pte[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000700 pte_iterate_hashed_subpages(pte, psize,
701 vpn, index, shift) {
Aneesh Kumar K.Vb1022fb2013-04-28 09:37:35 +0000702 __tlbie(vpn, psize, psize, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100703 } pte_iterate_hashed_end();
704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 asm volatile("eieio; tlbsync; ptesync":::"memory");
706
707 if (lock_tlbie)
Thomas Gleixner6b9c9b82010-02-18 02:22:35 +0000708 raw_spin_unlock(&native_tlbie_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 }
710
711 local_irq_restore(flags);
712}
713
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000714void __init hpte_init_native(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715{
716 ppc_md.hpte_invalidate = native_hpte_invalidate;
717 ppc_md.hpte_updatepp = native_hpte_updatepp;
718 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
719 ppc_md.hpte_insert = native_hpte_insert;
R Sharadaf4c82d52005-06-25 14:58:08 -0700720 ppc_md.hpte_remove = native_hpte_remove;
721 ppc_md.hpte_clear_all = native_hpte_clear;
Michael Ellerman8e166992012-09-20 22:08:28 +0000722 ppc_md.flush_hash_range = native_flush_hash_range;
Aneesh Kumar K.V1a527282013-06-20 14:30:27 +0530723 ppc_md.hugepage_invalidate = native_hugepage_invalidate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724}