Aneesh Kumar K.V | 11a6f6a | 2016-04-29 23:25:41 +1000 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ |
| 2 | #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 3 | /* |
| 4 | * PowerPC64 memory management structures |
| 5 | * |
| 6 | * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> |
| 7 | * PPC64 rework. |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or |
| 10 | * modify it under the terms of the GNU General Public License |
| 11 | * as published by the Free Software Foundation; either version |
| 12 | * 2 of the License, or (at your option) any later version. |
| 13 | */ |
| 14 | |
| 15 | #include <asm/asm-compat.h> |
| 16 | #include <asm/page.h> |
Aneesh Kumar K.V | 891121e | 2015-10-09 08:32:21 +0530 | [diff] [blame] | 17 | #include <asm/bug.h> |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 18 | |
| 19 | /* |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 20 | * This is necessary to get the definition of PGTABLE_RANGE which we |
| 21 | * need for various slices related matters. Note that this isn't the |
| 22 | * complete pgtable.h but only a portion of it. |
| 23 | */ |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 24 | #include <asm/book3s/64/pgtable.h> |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 25 | #include <asm/bug.h> |
Aneesh Kumar K.V | dad6f37 | 2014-07-15 20:22:30 +0530 | [diff] [blame] | 26 | #include <asm/processor.h> |
Kevin Hao | b92a226 | 2016-07-23 14:42:40 +0530 | [diff] [blame] | 27 | #include <asm/cpu_has_feature.h> |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 28 | |
| 29 | /* |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 30 | * SLB |
| 31 | */ |
| 32 | |
| 33 | #define SLB_NUM_BOLTED 3 |
| 34 | #define SLB_CACHE_ENTRIES 8 |
Brian King | 46db2f8 | 2009-08-28 12:06:29 +0000 | [diff] [blame] | 35 | #define SLB_MIN_SIZE 32 |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 36 | |
| 37 | /* Bits in the SLB ESID word */ |
| 38 | #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ |
| 39 | |
| 40 | /* Bits in the SLB VSID word */ |
| 41 | #define SLB_VSID_SHIFT 12 |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 42 | #define SLB_VSID_SHIFT_1T 24 |
| 43 | #define SLB_VSID_SSIZE_SHIFT 62 |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 44 | #define SLB_VSID_B ASM_CONST(0xc000000000000000) |
| 45 | #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) |
| 46 | #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) |
| 47 | #define SLB_VSID_KS ASM_CONST(0x0000000000000800) |
| 48 | #define SLB_VSID_KP ASM_CONST(0x0000000000000400) |
| 49 | #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ |
| 50 | #define SLB_VSID_L ASM_CONST(0x0000000000000100) |
| 51 | #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ |
| 52 | #define SLB_VSID_LP ASM_CONST(0x0000000000000030) |
| 53 | #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) |
| 54 | #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) |
| 55 | #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) |
| 56 | #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) |
| 57 | #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) |
| 58 | |
| 59 | #define SLB_VSID_KERNEL (SLB_VSID_KP) |
| 60 | #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) |
| 61 | |
| 62 | #define SLBIE_C (0x08000000) |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 63 | #define SLBIE_SSIZE_SHIFT 25 |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 64 | |
| 65 | /* |
| 66 | * Hash table |
| 67 | */ |
| 68 | |
| 69 | #define HPTES_PER_GROUP 8 |
| 70 | |
Paul Mackerras | 2454c7e | 2007-05-10 15:28:44 +1000 | [diff] [blame] | 71 | #define HPTE_V_SSIZE_SHIFT 62 |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 72 | #define HPTE_V_AVPN_SHIFT 7 |
Paul Mackerras | 2454c7e | 2007-05-10 15:28:44 +1000 | [diff] [blame] | 73 | #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 74 | #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) |
Geert Uytterhoeven | 91bbbe2 | 2007-11-27 03:24:43 +1100 | [diff] [blame] | 75 | #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL)) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 76 | #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) |
| 77 | #define HPTE_V_LOCK ASM_CONST(0x0000000000000008) |
| 78 | #define HPTE_V_LARGE ASM_CONST(0x0000000000000004) |
| 79 | #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) |
| 80 | #define HPTE_V_VALID ASM_CONST(0x0000000000000001) |
| 81 | |
Aneesh Kumar K.V | 50de596 | 2016-04-29 23:25:43 +1000 | [diff] [blame] | 82 | /* |
| 83 | * ISA 3.0 have a different HPTE format. |
| 84 | */ |
| 85 | #define HPTE_R_3_0_SSIZE_SHIFT 58 |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 86 | #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) |
| 87 | #define HPTE_R_TS ASM_CONST(0x4000000000000000) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 88 | #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 89 | #define HPTE_R_RPN_SHIFT 12 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 90 | #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 91 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) |
Aneesh Kumar K.V | 8550e2f | 2016-06-08 19:55:55 +0530 | [diff] [blame] | 92 | #define HPTE_R_PPP ASM_CONST(0x8000000000000003) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 93 | #define HPTE_R_N ASM_CONST(0x0000000000000004) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 94 | #define HPTE_R_G ASM_CONST(0x0000000000000008) |
| 95 | #define HPTE_R_M ASM_CONST(0x0000000000000010) |
| 96 | #define HPTE_R_I ASM_CONST(0x0000000000000020) |
| 97 | #define HPTE_R_W ASM_CONST(0x0000000000000040) |
| 98 | #define HPTE_R_WIMG ASM_CONST(0x0000000000000078) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 99 | #define HPTE_R_C ASM_CONST(0x0000000000000080) |
| 100 | #define HPTE_R_R ASM_CONST(0x0000000000000100) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 101 | #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 102 | |
Sachin P. Sant | b7abc5c | 2007-06-14 15:31:34 +1000 | [diff] [blame] | 103 | #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) |
| 104 | #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) |
| 105 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 106 | /* Values for PP (assumes Ks=0, Kp=1) */ |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 107 | #define PP_RWXX 0 /* Supervisor read/write, User none */ |
| 108 | #define PP_RWRX 1 /* Supervisor read/write, User read */ |
| 109 | #define PP_RWRW 2 /* Supervisor read/write, User read/write */ |
| 110 | #define PP_RXRX 3 /* Supervisor read, User read */ |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 111 | #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */ |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 112 | |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 113 | /* Fields for tlbiel instruction in architecture 2.06 */ |
| 114 | #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */ |
| 115 | #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */ |
| 116 | #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */ |
| 117 | #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */ |
| 118 | #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */ |
| 119 | #define TLBIEL_INVAL_SET_SHIFT 12 |
| 120 | |
| 121 | #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */ |
Mahesh Salgaonkar | 45706bb | 2014-12-19 08:41:05 +0530 | [diff] [blame] | 122 | #define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */ |
Michael Neuling | c3ab300 | 2016-02-19 11:16:24 +1100 | [diff] [blame] | 123 | #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 124 | #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */ |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 125 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 126 | #ifndef __ASSEMBLY__ |
| 127 | |
Benjamin Herrenschmidt | 7025776 | 2016-07-05 15:03:58 +1000 | [diff] [blame] | 128 | struct mmu_hash_ops { |
| 129 | void (*hpte_invalidate)(unsigned long slot, |
| 130 | unsigned long vpn, |
| 131 | int bpsize, int apsize, |
| 132 | int ssize, int local); |
| 133 | long (*hpte_updatepp)(unsigned long slot, |
| 134 | unsigned long newpp, |
| 135 | unsigned long vpn, |
| 136 | int bpsize, int apsize, |
| 137 | int ssize, unsigned long flags); |
| 138 | void (*hpte_updateboltedpp)(unsigned long newpp, |
| 139 | unsigned long ea, |
| 140 | int psize, int ssize); |
| 141 | long (*hpte_insert)(unsigned long hpte_group, |
| 142 | unsigned long vpn, |
| 143 | unsigned long prpn, |
| 144 | unsigned long rflags, |
| 145 | unsigned long vflags, |
| 146 | int psize, int apsize, |
| 147 | int ssize); |
| 148 | long (*hpte_remove)(unsigned long hpte_group); |
| 149 | int (*hpte_removebolted)(unsigned long ea, |
| 150 | int psize, int ssize); |
| 151 | void (*flush_hash_range)(unsigned long number, int local); |
| 152 | void (*hugepage_invalidate)(unsigned long vsid, |
| 153 | unsigned long addr, |
| 154 | unsigned char *hpte_slot_array, |
| 155 | int psize, int ssize, int local); |
| 156 | /* |
| 157 | * Special for kexec. |
| 158 | * To be called in real mode with interrupts disabled. No locks are |
| 159 | * taken as such, concurrent access on pre POWER5 hardware could result |
| 160 | * in a deadlock. |
| 161 | * The linear mapping is destroyed as well. |
| 162 | */ |
| 163 | void (*hpte_clear_all)(void); |
| 164 | }; |
| 165 | extern struct mmu_hash_ops mmu_hash_ops; |
| 166 | |
David Gibson | 8e561e7 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 167 | struct hash_pte { |
Anton Blanchard | 12f04f2 | 2013-09-23 12:04:36 +1000 | [diff] [blame] | 168 | __be64 v; |
| 169 | __be64 r; |
David Gibson | 8e561e7 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 170 | }; |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 171 | |
David Gibson | 8e561e7 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 172 | extern struct hash_pte *htab_address; |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 173 | extern unsigned long htab_size_bytes; |
| 174 | extern unsigned long htab_hash_mask; |
| 175 | |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 176 | |
| 177 | static inline int shift_to_mmu_psize(unsigned int shift) |
| 178 | { |
| 179 | int psize; |
| 180 | |
| 181 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) |
| 182 | if (mmu_psize_defs[psize].shift == shift) |
| 183 | return psize; |
| 184 | return -1; |
| 185 | } |
| 186 | |
| 187 | static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) |
| 188 | { |
| 189 | if (mmu_psize_defs[mmu_psize].shift) |
| 190 | return mmu_psize_defs[mmu_psize].shift; |
| 191 | BUG(); |
| 192 | } |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 193 | |
Aneesh Kumar K.V | 138ee7e | 2016-07-13 15:06:37 +0530 | [diff] [blame] | 194 | static inline unsigned long get_sllp_encoding(int psize) |
| 195 | { |
| 196 | unsigned long sllp; |
| 197 | |
| 198 | sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) | |
| 199 | ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4); |
| 200 | return sllp; |
| 201 | } |
| 202 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 203 | #endif /* __ASSEMBLY__ */ |
| 204 | |
| 205 | /* |
Paul Mackerras | 2454c7e | 2007-05-10 15:28:44 +1000 | [diff] [blame] | 206 | * Segment sizes. |
| 207 | * These are the values used by hardware in the B field of |
| 208 | * SLB entries and the first dword of MMU hashtable entries. |
| 209 | * The B field is 2 bits; the values 2 and 3 are unused and reserved. |
| 210 | */ |
| 211 | #define MMU_SEGSIZE_256M 0 |
| 212 | #define MMU_SEGSIZE_1T 1 |
| 213 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 214 | /* |
| 215 | * encode page number shift. |
| 216 | * in order to fit the 78 bit va in a 64 bit variable we shift the va by |
| 217 | * 12 bits. This enable us to address upto 76 bit va. |
| 218 | * For hpt hash from a va we can ignore the page size bits of va and for |
| 219 | * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure |
| 220 | * we work in all cases including 4k page size. |
| 221 | */ |
| 222 | #define VPN_SHIFT 12 |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 223 | |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 224 | /* |
| 225 | * HPTE Large Page (LP) details |
| 226 | */ |
| 227 | #define LP_SHIFT 12 |
| 228 | #define LP_BITS 8 |
| 229 | #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) |
| 230 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 231 | #ifndef __ASSEMBLY__ |
| 232 | |
Ian Munsie | 73d16a6 | 2014-10-08 19:54:51 +1100 | [diff] [blame] | 233 | static inline int slb_vsid_shift(int ssize) |
| 234 | { |
| 235 | if (ssize == MMU_SEGSIZE_256M) |
| 236 | return SLB_VSID_SHIFT; |
| 237 | return SLB_VSID_SHIFT_1T; |
| 238 | } |
| 239 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 240 | static inline int segment_shift(int ssize) |
| 241 | { |
| 242 | if (ssize == MMU_SEGSIZE_256M) |
| 243 | return SID_SHIFT; |
| 244 | return SID_SHIFT_1T; |
| 245 | } |
| 246 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 247 | /* |
Paul Mackerras | 0eeede0 | 2016-09-02 17:20:43 +1000 | [diff] [blame] | 248 | * This array is indexed by the LP field of the HPTE second dword. |
| 249 | * Since this field may contain some RPN bits, some entries are |
| 250 | * replicated so that we get the same value irrespective of RPN. |
| 251 | * The top 4 bits are the page size index (MMU_PAGE_*) for the |
| 252 | * actual page size, the bottom 4 bits are the base page size. |
| 253 | */ |
| 254 | extern u8 hpte_page_sizes[1 << LP_BITS]; |
| 255 | |
| 256 | static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l, |
| 257 | bool is_base_size) |
| 258 | { |
| 259 | unsigned int i, lp; |
| 260 | |
| 261 | if (!(h & HPTE_V_LARGE)) |
| 262 | return 1ul << 12; |
| 263 | |
| 264 | /* Look at the 8 bit LP value */ |
| 265 | lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); |
| 266 | i = hpte_page_sizes[lp]; |
| 267 | if (!i) |
| 268 | return 0; |
| 269 | if (!is_base_size) |
| 270 | i >>= 4; |
| 271 | return 1ul << mmu_psize_defs[i & 0xf].shift; |
| 272 | } |
| 273 | |
| 274 | static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) |
| 275 | { |
| 276 | return __hpte_page_size(h, l, 0); |
| 277 | } |
| 278 | |
| 279 | static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l) |
| 280 | { |
| 281 | return __hpte_page_size(h, l, 1); |
| 282 | } |
| 283 | |
| 284 | /* |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 285 | * The current system page and segment sizes |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 286 | */ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 287 | extern int mmu_kernel_ssize; |
| 288 | extern int mmu_highuser_ssize; |
Michael Neuling | 584f8b7 | 2007-12-06 17:24:48 +1100 | [diff] [blame] | 289 | extern u16 mmu_slb_size; |
Michael Ellerman | 572fb57 | 2008-05-08 14:27:08 +1000 | [diff] [blame] | 290 | extern unsigned long tce_alloc_start, tce_alloc_end; |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 291 | |
| 292 | /* |
| 293 | * If the processor supports 64k normal pages but not 64k cache |
| 294 | * inhibited pages, we have to be prepared to switch processes |
| 295 | * to use 4k pages when they create cache-inhibited mappings. |
| 296 | * If this is the case, mmu_ci_restrictions will be set to 1. |
| 297 | */ |
| 298 | extern int mmu_ci_restrictions; |
| 299 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 300 | /* |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 301 | * This computes the AVPN and B fields of the first dword of a HPTE, |
| 302 | * for use when we want to match an existing PTE. The bottom 7 bits |
| 303 | * of the returned value are zero. |
| 304 | */ |
| 305 | static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, |
| 306 | int ssize) |
| 307 | { |
| 308 | unsigned long v; |
| 309 | /* |
| 310 | * The AVA field omits the low-order 23 bits of the 78 bits VA. |
| 311 | * These bits are not needed in the PTE, because the |
| 312 | * low-order b of these bits are part of the byte offset |
| 313 | * into the virtual page and, if b < 23, the high-order |
| 314 | * 23-b of these bits are always used in selecting the |
| 315 | * PTEGs to be searched |
| 316 | */ |
| 317 | v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); |
| 318 | v <<= HPTE_V_AVPN_SHIFT; |
Aneesh Kumar K.V | 50de596 | 2016-04-29 23:25:43 +1000 | [diff] [blame] | 319 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
| 320 | v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 321 | return v; |
| 322 | } |
| 323 | |
| 324 | /* |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 325 | * This function sets the AVPN and L fields of the HPTE appropriately |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 326 | * using the base page size and actual page size. |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 327 | */ |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 328 | static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize, |
| 329 | int actual_psize, int ssize) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 330 | { |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 331 | unsigned long v; |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 332 | v = hpte_encode_avpn(vpn, base_psize, ssize); |
| 333 | if (actual_psize != MMU_PAGE_4K) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 334 | v |= HPTE_V_LARGE; |
| 335 | return v; |
| 336 | } |
| 337 | |
| 338 | /* |
| 339 | * This function sets the ARPN, and LP fields of the HPTE appropriately |
| 340 | * for the page size. We assume the pa is already "clean" that is properly |
| 341 | * aligned for the requested page size |
| 342 | */ |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 343 | static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize, |
Aneesh Kumar K.V | 50de596 | 2016-04-29 23:25:43 +1000 | [diff] [blame] | 344 | int actual_psize, int ssize) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 345 | { |
Aneesh Kumar K.V | 50de596 | 2016-04-29 23:25:43 +1000 | [diff] [blame] | 346 | |
| 347 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
| 348 | pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT; |
| 349 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 350 | /* A 4K page needs no special encoding */ |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 351 | if (actual_psize == MMU_PAGE_4K) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 352 | return pa & HPTE_R_RPN; |
| 353 | else { |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 354 | unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize]; |
| 355 | unsigned int shift = mmu_psize_defs[actual_psize].shift; |
| 356 | return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT); |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 357 | } |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 358 | } |
| 359 | |
| 360 | /* |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 361 | * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size. |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 362 | */ |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 363 | static inline unsigned long hpt_vpn(unsigned long ea, |
| 364 | unsigned long vsid, int ssize) |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 365 | { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 366 | unsigned long mask; |
| 367 | int s_shift = segment_shift(ssize); |
| 368 | |
| 369 | mask = (1ul << (s_shift - VPN_SHIFT)) - 1; |
| 370 | return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | /* |
| 374 | * This hashes a virtual address |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 375 | */ |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 376 | static inline unsigned long hpt_hash(unsigned long vpn, |
| 377 | unsigned int shift, int ssize) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 378 | { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 379 | int mask; |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 380 | unsigned long hash, vsid; |
| 381 | |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 382 | /* VPN_SHIFT can be atmost 12 */ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 383 | if (ssize == MMU_SEGSIZE_256M) { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 384 | mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1; |
| 385 | hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^ |
| 386 | ((vpn & mask) >> (shift - VPN_SHIFT)); |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 387 | } else { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 388 | mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1; |
| 389 | vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); |
| 390 | hash = vsid ^ (vsid << 25) ^ |
| 391 | ((vpn & mask) >> (shift - VPN_SHIFT)) ; |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 392 | } |
| 393 | return hash & 0x7fffffffffUL; |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 394 | } |
| 395 | |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 396 | #define HPTE_LOCAL_UPDATE 0x1 |
| 397 | #define HPTE_NOHPTE_UPDATE 0x2 |
| 398 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 399 | extern int __hash_page_4K(unsigned long ea, unsigned long access, |
| 400 | unsigned long vsid, pte_t *ptep, unsigned long trap, |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 401 | unsigned long flags, int ssize, int subpage_prot); |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 402 | extern int __hash_page_64K(unsigned long ea, unsigned long access, |
| 403 | unsigned long vsid, pte_t *ptep, unsigned long trap, |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 404 | unsigned long flags, int ssize); |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 405 | struct mm_struct; |
David Gibson | 0895ecd | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 406 | unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 407 | extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, |
| 408 | unsigned long access, unsigned long trap, |
| 409 | unsigned long flags); |
| 410 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap, |
| 411 | unsigned long dsisr); |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 412 | int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 413 | pte_t *ptep, unsigned long trap, unsigned long flags, |
| 414 | int ssize, unsigned int shift, unsigned int mmu_psize); |
Aneesh Kumar K.V | 6d492ec | 2013-06-20 14:30:21 +0530 | [diff] [blame] | 415 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 416 | extern int __hash_page_thp(unsigned long ea, unsigned long access, |
| 417 | unsigned long vsid, pmd_t *pmdp, unsigned long trap, |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 418 | unsigned long flags, int ssize, unsigned int psize); |
Aneesh Kumar K.V | 6d492ec | 2013-06-20 14:30:21 +0530 | [diff] [blame] | 419 | #else |
| 420 | static inline int __hash_page_thp(unsigned long ea, unsigned long access, |
| 421 | unsigned long vsid, pmd_t *pmdp, |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 422 | unsigned long trap, unsigned long flags, |
Aneesh Kumar K.V | 6d492ec | 2013-06-20 14:30:21 +0530 | [diff] [blame] | 423 | int ssize, unsigned int psize) |
| 424 | { |
| 425 | BUG(); |
Nathan Fontenot | ff1e768 | 2013-06-24 09:35:55 -0500 | [diff] [blame] | 426 | return -1; |
Aneesh Kumar K.V | 6d492ec | 2013-06-20 14:30:21 +0530 | [diff] [blame] | 427 | } |
| 428 | #endif |
Benjamin Herrenschmidt | 4b8692c | 2010-07-23 10:31:13 +1000 | [diff] [blame] | 429 | extern void hash_failure_debug(unsigned long ea, unsigned long access, |
| 430 | unsigned long vsid, unsigned long trap, |
Aneesh Kumar K.V | d8139eb | 2013-04-28 09:37:37 +0000 | [diff] [blame] | 431 | int ssize, int psize, int lpsize, |
| 432 | unsigned long pte); |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 433 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, |
Benjamin Herrenschmidt | bc033b6 | 2008-08-05 16:19:56 +1000 | [diff] [blame] | 434 | unsigned long pstart, unsigned long prot, |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 435 | int psize, int ssize); |
Anton Blanchard | f6026df | 2014-08-20 08:55:21 +1000 | [diff] [blame] | 436 | int htab_remove_mapping(unsigned long vstart, unsigned long vend, |
| 437 | int psize, int ssize); |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 438 | extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 439 | extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 440 | |
Michael Ellerman | 6364e84 | 2016-07-26 10:33:03 +1000 | [diff] [blame] | 441 | #ifdef CONFIG_PPC_PSERIES |
| 442 | void hpte_init_pseries(void); |
| 443 | #else |
| 444 | static inline void hpte_init_pseries(void) { } |
| 445 | #endif |
| 446 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 447 | extern void hpte_init_native(void); |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 448 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 449 | extern void slb_initialize(void); |
| 450 | extern void slb_flush_and_rebolt(void); |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 451 | |
Michael Neuling | 67439b7 | 2007-08-03 11:55:39 +1000 | [diff] [blame] | 452 | extern void slb_vmalloc_update(void); |
Brian King | 46db2f8 | 2009-08-28 12:06:29 +0000 | [diff] [blame] | 453 | extern void slb_set_size(u16 size); |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 454 | #endif /* __ASSEMBLY__ */ |
| 455 | |
| 456 | /* |
Aneesh Kumar K.V | f033d65 | 2012-09-10 02:52:56 +0000 | [diff] [blame] | 457 | * VSID allocation (256MB segment) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 458 | * |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 459 | * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated |
| 460 | * from mmu context id and effective segment id of the address. |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 461 | * |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 462 | * For user processes max context id is limited to ((1ul << 19) - 5) |
| 463 | * for kernel space, we use the top 4 context ids to map address as below |
| 464 | * NOTE: each context only support 64TB now. |
| 465 | * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] |
| 466 | * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] |
| 467 | * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] |
| 468 | * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 469 | * |
| 470 | * The proto-VSIDs are then scrambled into real VSIDs with the |
| 471 | * multiplicative hash: |
| 472 | * |
| 473 | * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 474 | * |
Aneesh Kumar K.V | f033d65 | 2012-09-10 02:52:56 +0000 | [diff] [blame] | 475 | * VSID_MULTIPLIER is prime, so in particular it is |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 476 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. |
| 477 | * Because the modulus is 2^n-1 we can compute it efficiently without |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 478 | * a divide or extra multiply (see below). The scramble function gives |
| 479 | * robust scattering in the hash table (at least based on some initial |
| 480 | * results). |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 481 | * |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 482 | * We also consider VSID 0 special. We use VSID 0 for slb entries mapping |
| 483 | * bad address. This enables us to consolidate bad address handling in |
| 484 | * hash_page. |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 485 | * |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 486 | * We also need to avoid the last segment of the last context, because that |
| 487 | * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 |
| 488 | * because of the modulo operation in vsid scramble. But the vmemmap |
| 489 | * (which is what uses region 0xf) will never be close to 64TB in size |
| 490 | * (it's 56 bytes per page of system memory). |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 491 | */ |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 492 | |
Aneesh Kumar K.V | e39d1a4 | 2013-03-13 03:34:53 +0000 | [diff] [blame] | 493 | #define CONTEXT_BITS 19 |
Aneesh Kumar K.V | af81d78 | 2013-03-13 03:34:55 +0000 | [diff] [blame] | 494 | #define ESID_BITS 18 |
| 495 | #define ESID_BITS_1T 6 |
Aneesh Kumar K.V | e39d1a4 | 2013-03-13 03:34:53 +0000 | [diff] [blame] | 496 | |
Aneesh Kumar K.V | 048ee09 | 2012-09-10 02:52:55 +0000 | [diff] [blame] | 497 | /* |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 498 | * 256MB segment |
Aneesh Kumar K.V | af81d78 | 2013-03-13 03:34:55 +0000 | [diff] [blame] | 499 | * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 500 | * available for user + kernel mapping. The top 4 contexts are used for |
| 501 | * kernel mapping. Each segment contains 2^28 bytes. Each |
| 502 | * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts |
| 503 | * (19 == 37 + 28 - 46). |
| 504 | */ |
| 505 | #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) |
| 506 | |
| 507 | /* |
Aneesh Kumar K.V | 048ee09 | 2012-09-10 02:52:55 +0000 | [diff] [blame] | 508 | * This should be computed such that protovosid * vsid_mulitplier |
| 509 | * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus |
| 510 | */ |
| 511 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ |
Aneesh Kumar K.V | af81d78 | 2013-03-13 03:34:55 +0000 | [diff] [blame] | 512 | #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 513 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 514 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 515 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ |
Aneesh Kumar K.V | af81d78 | 2013-03-13 03:34:55 +0000 | [diff] [blame] | 516 | #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T) |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 517 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) |
| 518 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 519 | |
Aneesh Kumar K.V | af81d78 | 2013-03-13 03:34:55 +0000 | [diff] [blame] | 520 | #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 521 | |
| 522 | /* |
| 523 | * This macro generates asm code to compute the VSID scramble |
| 524 | * function. Used in slb_allocate() and do_stab_bolted. The function |
| 525 | * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS |
| 526 | * |
Michael Ellerman | 027dfac | 2016-06-01 16:34:37 +1000 | [diff] [blame] | 527 | * rt = register containing the proto-VSID and into which the |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 528 | * VSID will be stored |
| 529 | * rx = scratch register (clobbered) |
| 530 | * |
| 531 | * - rt and rx must be different registers |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 532 | * - The answer will end up in the low VSID_BITS bits of rt. The higher |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 533 | * bits may contain other garbage, so you may need to mask the |
| 534 | * result. |
| 535 | */ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 536 | #define ASM_VSID_SCRAMBLE(rt, rx, size) \ |
| 537 | lis rx,VSID_MULTIPLIER_##size@h; \ |
| 538 | ori rx,rx,VSID_MULTIPLIER_##size@l; \ |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 539 | mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ |
| 540 | \ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 541 | srdi rx,rt,VSID_BITS_##size; \ |
| 542 | clrldi rt,rt,(64-VSID_BITS_##size); \ |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 543 | add rt,rt,rx; /* add high and low bits */ \ |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 544 | /* NOTE: explanation based on VSID_BITS_##size = 36 \ |
| 545 | * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 546 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ |
| 547 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ |
| 548 | * the bit clear, r3 already has the answer we want, if it \ |
| 549 | * doesn't, the answer is the low 36 bits of r3+1. So in all \ |
| 550 | * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ |
| 551 | addi rx,rt,1; \ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 552 | srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 553 | add rt,rt,rx |
| 554 | |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 555 | /* 4 bits per slice and we have one slice per 1TB */ |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 556 | #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 557 | |
| 558 | #ifndef __ASSEMBLY__ |
| 559 | |
David Gibson | d28513b | 2009-11-26 18:56:04 +0000 | [diff] [blame] | 560 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
| 561 | /* |
| 562 | * For the sub-page protection option, we extend the PGD with one of |
| 563 | * these. Basically we have a 3-level tree, with the top level being |
| 564 | * the protptrs array. To optimize speed and memory consumption when |
| 565 | * only addresses < 4GB are being protected, pointers to the first |
| 566 | * four pages of sub-page protection words are stored in the low_prot |
| 567 | * array. |
| 568 | * Each page of sub-page protection words protects 1GB (4 bytes |
| 569 | * protects 64k). For the 3-level tree, each page of pointers then |
| 570 | * protects 8TB. |
| 571 | */ |
| 572 | struct subpage_prot_table { |
| 573 | unsigned long maxaddr; /* only addresses < this are protected */ |
Aneesh Kumar K.V | dad6f37 | 2014-07-15 20:22:30 +0530 | [diff] [blame] | 574 | unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)]; |
David Gibson | d28513b | 2009-11-26 18:56:04 +0000 | [diff] [blame] | 575 | unsigned int *low_prot[4]; |
| 576 | }; |
| 577 | |
| 578 | #define SBP_L1_BITS (PAGE_SHIFT - 2) |
| 579 | #define SBP_L2_BITS (PAGE_SHIFT - 3) |
| 580 | #define SBP_L1_COUNT (1 << SBP_L1_BITS) |
| 581 | #define SBP_L2_COUNT (1 << SBP_L2_BITS) |
| 582 | #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) |
| 583 | #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) |
| 584 | |
| 585 | extern void subpage_prot_free(struct mm_struct *mm); |
| 586 | extern void subpage_prot_init_new_context(struct mm_struct *mm); |
| 587 | #else |
| 588 | static inline void subpage_prot_free(struct mm_struct *mm) {} |
| 589 | static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } |
| 590 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ |
| 591 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 592 | #if 0 |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 593 | /* |
| 594 | * The code below is equivalent to this function for arguments |
| 595 | * < 2^VSID_BITS, which is all this should ever be called |
| 596 | * with. However gcc is not clever enough to compute the |
| 597 | * modulus (2^n-1) without a second multiply. |
| 598 | */ |
Anton Blanchard | 3469270 | 2010-08-02 20:35:18 +0000 | [diff] [blame] | 599 | #define vsid_scramble(protovsid, size) \ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 600 | ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 601 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 602 | #else /* 1 */ |
| 603 | #define vsid_scramble(protovsid, size) \ |
| 604 | ({ \ |
| 605 | unsigned long x; \ |
| 606 | x = (protovsid) * VSID_MULTIPLIER_##size; \ |
| 607 | x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \ |
| 608 | (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \ |
| 609 | }) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 610 | #endif /* 1 */ |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 611 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 612 | /* Returns the segment size indicator for a user address */ |
| 613 | static inline int user_segment_size(unsigned long addr) |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 614 | { |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 615 | /* Use 1T segments if possible for addresses >= 1T */ |
| 616 | if (addr >= (1UL << SID_SHIFT_1T)) |
| 617 | return mmu_highuser_ssize; |
| 618 | return MMU_SEGSIZE_256M; |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 619 | } |
| 620 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 621 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea, |
| 622 | int ssize) |
| 623 | { |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 624 | /* |
| 625 | * Bad address. We return VSID 0 for that |
| 626 | */ |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 627 | if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE) |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 628 | return 0; |
| 629 | |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 630 | if (ssize == MMU_SEGSIZE_256M) |
Aneesh Kumar K.V | af81d78 | 2013-03-13 03:34:55 +0000 | [diff] [blame] | 631 | return vsid_scramble((context << ESID_BITS) |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 632 | | (ea >> SID_SHIFT), 256M); |
Aneesh Kumar K.V | af81d78 | 2013-03-13 03:34:55 +0000 | [diff] [blame] | 633 | return vsid_scramble((context << ESID_BITS_1T) |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 634 | | (ea >> SID_SHIFT_1T), 1T); |
| 635 | } |
| 636 | |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 637 | /* |
| 638 | * This is only valid for addresses >= PAGE_OFFSET |
| 639 | * |
| 640 | * For kernel space, we use the top 4 context ids to map address as below |
| 641 | * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] |
| 642 | * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] |
| 643 | * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] |
| 644 | * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] |
| 645 | */ |
| 646 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) |
| 647 | { |
| 648 | unsigned long context; |
| 649 | |
| 650 | /* |
| 651 | * kernel take the top 4 context from the available range |
| 652 | */ |
| 653 | context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; |
| 654 | return get_vsid(context, ea, ssize); |
| 655 | } |
David Gibson | 5c3c7ed | 2016-02-09 13:32:43 +1000 | [diff] [blame] | 656 | |
| 657 | unsigned htab_shift_for_mem_size(unsigned long mem_size); |
| 658 | |
David Gibson | 8d2169e | 2007-04-27 11:53:52 +1000 | [diff] [blame] | 659 | #endif /* __ASSEMBLY__ */ |
| 660 | |
Aneesh Kumar K.V | 11a6f6a | 2016-04-29 23:25:41 +1000 | [diff] [blame] | 661 | #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */ |