Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 1 | /* |
| 2 | * pgtable.h: SpitFire page table operations. |
| 3 | * |
| 4 | * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu) |
| 5 | * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
| 6 | */ |
| 7 | |
| 8 | #ifndef _SPARC64_PGTABLE_H |
| 9 | #define _SPARC64_PGTABLE_H |
| 10 | |
| 11 | /* This file contains the functions and defines necessary to modify and use |
| 12 | * the SpitFire page tables. |
| 13 | */ |
| 14 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 15 | #include <linux/compiler.h> |
| 16 | #include <linux/const.h> |
| 17 | #include <asm/types.h> |
| 18 | #include <asm/spitfire.h> |
| 19 | #include <asm/asi.h> |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 20 | #include <asm/page.h> |
| 21 | #include <asm/processor.h> |
| 22 | |
Aaro Koskinen | 2533e82 | 2012-04-01 08:54:38 +0000 | [diff] [blame] | 23 | #include <asm-generic/pgtable-nopud.h> |
| 24 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 25 | /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). |
| 26 | * The page copy blockops can use 0x6000000 to 0x8000000. |
| 27 | * The TSB is mapped in the 0x8000000 to 0xa000000 range. |
| 28 | * The PROM resides in an area spanning 0xf0000000 to 0x100000000. |
| 29 | * The vmalloc area spans 0x100000000 to 0x200000000. |
| 30 | * Since modules need to be in the lowest 32-bits of the address space, |
| 31 | * we place them right before the OBP area from 0x10000000 to 0xf0000000. |
| 32 | * There is a single static kernel PMD which maps from 0x0 to address |
| 33 | * 0x400000000. |
| 34 | */ |
| 35 | #define TLBTEMP_BASE _AC(0x0000000006000000,UL) |
| 36 | #define TSBMAP_BASE _AC(0x0000000008000000,UL) |
| 37 | #define MODULES_VADDR _AC(0x0000000010000000,UL) |
| 38 | #define MODULES_LEN _AC(0x00000000e0000000,UL) |
| 39 | #define MODULES_END _AC(0x00000000f0000000,UL) |
| 40 | #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) |
| 41 | #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) |
| 42 | #define VMALLOC_START _AC(0x0000000100000000,UL) |
David S. Miller | 1b6b9d6 | 2009-09-28 14:39:58 -0700 | [diff] [blame] | 43 | #define VMALLOC_END _AC(0x0000010000000000,UL) |
| 44 | #define VMEMMAP_BASE _AC(0x0000010000000000,UL) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 45 | |
| 46 | #define vmemmap ((struct page *)VMEMMAP_BASE) |
| 47 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 48 | /* PMD_SHIFT determines the size of the area a second-level page |
| 49 | * table can map |
| 50 | */ |
David Miller | 56a70b8 | 2012-10-08 16:34:20 -0700 | [diff] [blame] | 51 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4)) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 52 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) |
| 53 | #define PMD_MASK (~(PMD_SIZE-1)) |
| 54 | #define PMD_BITS (PAGE_SHIFT - 2) |
| 55 | |
| 56 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
David Miller | 56a70b8 | 2012-10-08 16:34:20 -0700 | [diff] [blame] | 57 | #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4) + PMD_BITS) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 58 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) |
| 59 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 60 | #define PGDIR_BITS (PAGE_SHIFT - 2) |
| 61 | |
David Miller | 56a70b8 | 2012-10-08 16:34:20 -0700 | [diff] [blame] | 62 | #if (PGDIR_SHIFT + PGDIR_BITS) != 44 |
| 63 | #error Page table parameters do not cover virtual address space properly. |
| 64 | #endif |
| 65 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 66 | #if (PMD_SHIFT != HPAGE_SHIFT) |
| 67 | #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages. |
| 68 | #endif |
| 69 | |
David Miller | dbc9fdf0 | 2012-10-08 16:34:23 -0700 | [diff] [blame] | 70 | /* PMDs point to PTE tables which are 4K aligned. */ |
| 71 | #define PMD_PADDR _AC(0xfffffffe,UL) |
| 72 | #define PMD_PADDR_SHIFT _AC(11,UL) |
| 73 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 74 | #define PMD_ISHUGE _AC(0x00000001,UL) |
| 75 | |
| 76 | /* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge |
| 77 | * pages, this frees up a bunch of bits in the layout that we can |
| 78 | * use for the protection settings and software metadata. |
| 79 | */ |
| 80 | #define PMD_HUGE_PADDR _AC(0xfffff800,UL) |
| 81 | #define PMD_HUGE_PROTBITS _AC(0x000007ff,UL) |
| 82 | #define PMD_HUGE_PRESENT _AC(0x00000400,UL) |
| 83 | #define PMD_HUGE_WRITE _AC(0x00000200,UL) |
| 84 | #define PMD_HUGE_DIRTY _AC(0x00000100,UL) |
| 85 | #define PMD_HUGE_ACCESSED _AC(0x00000080,UL) |
| 86 | #define PMD_HUGE_EXEC _AC(0x00000040,UL) |
| 87 | #define PMD_HUGE_SPLITTING _AC(0x00000020,UL) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 88 | |
David Miller | dbc9fdf0 | 2012-10-08 16:34:23 -0700 | [diff] [blame] | 89 | /* PGDs point to PMD tables which are 8K aligned. */ |
| 90 | #define PGD_PADDR _AC(0xfffffffc,UL) |
| 91 | #define PGD_PADDR_SHIFT _AC(11,UL) |
| 92 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 93 | #ifndef __ASSEMBLY__ |
| 94 | |
| 95 | #include <linux/sched.h> |
| 96 | |
| 97 | /* Entries per page directory level. */ |
David Miller | 56a70b8 | 2012-10-08 16:34:20 -0700 | [diff] [blame] | 98 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-4)) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 99 | #define PTRS_PER_PMD (1UL << PMD_BITS) |
| 100 | #define PTRS_PER_PGD (1UL << PGDIR_BITS) |
| 101 | |
| 102 | /* Kernel has a separate 44bit address space. */ |
| 103 | #define FIRST_USER_ADDRESS 0 |
| 104 | |
| 105 | #define pte_ERROR(e) __builtin_trap() |
| 106 | #define pmd_ERROR(e) __builtin_trap() |
| 107 | #define pgd_ERROR(e) __builtin_trap() |
| 108 | |
| 109 | #endif /* !(__ASSEMBLY__) */ |
| 110 | |
| 111 | /* PTE bits which are the same in SUN4U and SUN4V format. */ |
| 112 | #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ |
| 113 | #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ |
David S. Miller | 683d2fa | 2011-07-25 17:12:21 -0700 | [diff] [blame] | 114 | #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ |
| 115 | |
| 116 | /* Advertise support for _PAGE_SPECIAL */ |
| 117 | #define __HAVE_ARCH_PTE_SPECIAL |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 118 | |
| 119 | /* SUN4U pte bits... */ |
| 120 | #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */ |
| 121 | #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */ |
| 122 | #define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */ |
| 123 | #define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */ |
| 124 | #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */ |
| 125 | #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ |
| 126 | #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ |
David S. Miller | 683d2fa | 2011-07-25 17:12:21 -0700 | [diff] [blame] | 127 | #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */ |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 128 | #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ |
| 129 | #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ |
| 130 | #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ |
| 131 | #define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */ |
| 132 | #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ |
| 133 | #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */ |
| 134 | #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */ |
| 135 | #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */ |
| 136 | #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */ |
| 137 | #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */ |
| 138 | #define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */ |
| 139 | #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ |
| 140 | #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */ |
| 141 | #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */ |
| 142 | #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */ |
| 143 | #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */ |
| 144 | #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ |
| 145 | #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ |
| 146 | #define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */ |
| 147 | #define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */ |
| 148 | #define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */ |
| 149 | |
| 150 | /* SUN4V pte bits... */ |
| 151 | #define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */ |
| 152 | #define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */ |
| 153 | #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */ |
| 154 | #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */ |
| 155 | #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ |
| 156 | #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ |
David S. Miller | 683d2fa | 2011-07-25 17:12:21 -0700 | [diff] [blame] | 157 | #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */ |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 158 | #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ |
| 159 | #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ |
| 160 | #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ |
| 161 | #define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */ |
| 162 | #define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */ |
| 163 | #define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */ |
| 164 | #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */ |
| 165 | #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */ |
| 166 | #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */ |
| 167 | #define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */ |
| 168 | #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */ |
| 169 | #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */ |
| 170 | #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */ |
| 171 | #define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */ |
| 172 | #define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */ |
| 173 | #define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */ |
| 174 | #define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */ |
| 175 | #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */ |
| 176 | #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */ |
| 177 | #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ |
| 178 | #define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */ |
| 179 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 180 | #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U |
| 181 | #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 182 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 183 | #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U |
| 184 | #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 185 | |
| 186 | /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ |
| 187 | #define __P000 __pgprot(0) |
| 188 | #define __P001 __pgprot(0) |
| 189 | #define __P010 __pgprot(0) |
| 190 | #define __P011 __pgprot(0) |
| 191 | #define __P100 __pgprot(0) |
| 192 | #define __P101 __pgprot(0) |
| 193 | #define __P110 __pgprot(0) |
| 194 | #define __P111 __pgprot(0) |
| 195 | |
| 196 | #define __S000 __pgprot(0) |
| 197 | #define __S001 __pgprot(0) |
| 198 | #define __S010 __pgprot(0) |
| 199 | #define __S011 __pgprot(0) |
| 200 | #define __S100 __pgprot(0) |
| 201 | #define __S101 __pgprot(0) |
| 202 | #define __S110 __pgprot(0) |
| 203 | #define __S111 __pgprot(0) |
| 204 | |
| 205 | #ifndef __ASSEMBLY__ |
| 206 | |
| 207 | extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); |
| 208 | |
| 209 | extern unsigned long pte_sz_bits(unsigned long size); |
| 210 | |
| 211 | extern pgprot_t PAGE_KERNEL; |
| 212 | extern pgprot_t PAGE_KERNEL_LOCKED; |
| 213 | extern pgprot_t PAGE_COPY; |
| 214 | extern pgprot_t PAGE_SHARED; |
| 215 | |
| 216 | /* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */ |
| 217 | extern unsigned long _PAGE_IE; |
| 218 | extern unsigned long _PAGE_E; |
| 219 | extern unsigned long _PAGE_CACHE; |
| 220 | |
| 221 | extern unsigned long pg_iobits; |
| 222 | extern unsigned long _PAGE_ALL_SZ_BITS; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 223 | |
| 224 | extern struct page *mem_map_zero; |
| 225 | #define ZERO_PAGE(vaddr) (mem_map_zero) |
| 226 | |
| 227 | /* PFNs are real physical page numbers. However, mem_map only begins to record |
| 228 | * per-page information starting at pfn_base. This is to handle systems where |
| 229 | * the first physical page in the machine is at some huge physical address, |
| 230 | * such as 4GB. This is common on a partitioned E10000, for example. |
| 231 | */ |
| 232 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) |
| 233 | { |
| 234 | unsigned long paddr = pfn << PAGE_SHIFT; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 235 | |
David Miller | 15b9350 | 2012-10-08 16:34:19 -0700 | [diff] [blame] | 236 | BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL); |
| 237 | return __pte(paddr | pgprot_val(prot)); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 238 | } |
| 239 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
| 240 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 241 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 242 | extern pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot); |
| 243 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
| 244 | |
| 245 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); |
| 246 | |
| 247 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
| 248 | { |
| 249 | /* Do nothing, mk_pmd() does this part. */ |
| 250 | return pmd; |
| 251 | } |
| 252 | #endif |
| 253 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 254 | /* This one can be done with two shifts. */ |
| 255 | static inline unsigned long pte_pfn(pte_t pte) |
| 256 | { |
| 257 | unsigned long ret; |
| 258 | |
| 259 | __asm__ __volatile__( |
| 260 | "\n661: sllx %1, %2, %0\n" |
| 261 | " srlx %0, %3, %0\n" |
| 262 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 263 | " .word 661b\n" |
| 264 | " sllx %1, %4, %0\n" |
| 265 | " srlx %0, %5, %0\n" |
| 266 | " .previous\n" |
| 267 | : "=r" (ret) |
| 268 | : "r" (pte_val(pte)), |
| 269 | "i" (21), "i" (21 + PAGE_SHIFT), |
| 270 | "i" (8), "i" (8 + PAGE_SHIFT)); |
| 271 | |
| 272 | return ret; |
| 273 | } |
| 274 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 275 | |
| 276 | static inline pte_t pte_modify(pte_t pte, pgprot_t prot) |
| 277 | { |
| 278 | unsigned long mask, tmp; |
| 279 | |
| 280 | /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) |
| 281 | * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) |
| 282 | * |
| 283 | * Even if we use negation tricks the result is still a 6 |
| 284 | * instruction sequence, so don't try to play fancy and just |
| 285 | * do the most straightforward implementation. |
| 286 | * |
| 287 | * Note: We encode this into 3 sun4v 2-insn patch sequences. |
| 288 | */ |
| 289 | |
David Miller | 15b9350 | 2012-10-08 16:34:19 -0700 | [diff] [blame] | 290 | BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 291 | __asm__ __volatile__( |
| 292 | "\n661: sethi %%uhi(%2), %1\n" |
| 293 | " sethi %%hi(%2), %0\n" |
| 294 | "\n662: or %1, %%ulo(%2), %1\n" |
| 295 | " or %0, %%lo(%2), %0\n" |
| 296 | "\n663: sllx %1, 32, %1\n" |
| 297 | " or %0, %1, %0\n" |
| 298 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 299 | " .word 661b\n" |
| 300 | " sethi %%uhi(%3), %1\n" |
| 301 | " sethi %%hi(%3), %0\n" |
| 302 | " .word 662b\n" |
| 303 | " or %1, %%ulo(%3), %1\n" |
| 304 | " or %0, %%lo(%3), %0\n" |
| 305 | " .word 663b\n" |
| 306 | " sllx %1, 32, %1\n" |
| 307 | " or %0, %1, %0\n" |
| 308 | " .previous\n" |
| 309 | : "=r" (mask), "=r" (tmp) |
| 310 | : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | |
| 311 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | |
David Miller | 15b9350 | 2012-10-08 16:34:19 -0700 | [diff] [blame] | 312 | _PAGE_SPECIAL), |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 313 | "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | |
| 314 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | |
David Miller | 15b9350 | 2012-10-08 16:34:19 -0700 | [diff] [blame] | 315 | _PAGE_SPECIAL)); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 316 | |
| 317 | return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); |
| 318 | } |
| 319 | |
| 320 | static inline pte_t pgoff_to_pte(unsigned long off) |
| 321 | { |
| 322 | off <<= PAGE_SHIFT; |
| 323 | |
| 324 | __asm__ __volatile__( |
| 325 | "\n661: or %0, %2, %0\n" |
| 326 | " .section .sun4v_1insn_patch, \"ax\"\n" |
| 327 | " .word 661b\n" |
| 328 | " or %0, %3, %0\n" |
| 329 | " .previous\n" |
| 330 | : "=r" (off) |
| 331 | : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); |
| 332 | |
| 333 | return __pte(off); |
| 334 | } |
| 335 | |
| 336 | static inline pgprot_t pgprot_noncached(pgprot_t prot) |
| 337 | { |
| 338 | unsigned long val = pgprot_val(prot); |
| 339 | |
| 340 | __asm__ __volatile__( |
| 341 | "\n661: andn %0, %2, %0\n" |
| 342 | " or %0, %3, %0\n" |
| 343 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 344 | " .word 661b\n" |
| 345 | " andn %0, %4, %0\n" |
| 346 | " or %0, %5, %0\n" |
| 347 | " .previous\n" |
| 348 | : "=r" (val) |
| 349 | : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), |
| 350 | "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); |
| 351 | |
| 352 | return __pgprot(val); |
| 353 | } |
| 354 | /* Various pieces of code check for platform support by ifdef testing |
| 355 | * on "pgprot_noncached". That's broken and should be fixed, but for |
| 356 | * now... |
| 357 | */ |
| 358 | #define pgprot_noncached pgprot_noncached |
| 359 | |
| 360 | #ifdef CONFIG_HUGETLB_PAGE |
| 361 | static inline pte_t pte_mkhuge(pte_t pte) |
| 362 | { |
| 363 | unsigned long mask; |
| 364 | |
| 365 | __asm__ __volatile__( |
| 366 | "\n661: sethi %%uhi(%1), %0\n" |
| 367 | " sllx %0, 32, %0\n" |
| 368 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 369 | " .word 661b\n" |
| 370 | " mov %2, %0\n" |
| 371 | " nop\n" |
| 372 | " .previous\n" |
| 373 | : "=r" (mask) |
| 374 | : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V)); |
| 375 | |
| 376 | return __pte(pte_val(pte) | mask); |
| 377 | } |
| 378 | #endif |
| 379 | |
| 380 | static inline pte_t pte_mkdirty(pte_t pte) |
| 381 | { |
| 382 | unsigned long val = pte_val(pte), tmp; |
| 383 | |
| 384 | __asm__ __volatile__( |
| 385 | "\n661: or %0, %3, %0\n" |
| 386 | " nop\n" |
| 387 | "\n662: nop\n" |
| 388 | " nop\n" |
| 389 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 390 | " .word 661b\n" |
| 391 | " sethi %%uhi(%4), %1\n" |
| 392 | " sllx %1, 32, %1\n" |
| 393 | " .word 662b\n" |
| 394 | " or %1, %%lo(%4), %1\n" |
| 395 | " or %0, %1, %0\n" |
| 396 | " .previous\n" |
| 397 | : "=r" (val), "=r" (tmp) |
| 398 | : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), |
| 399 | "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); |
| 400 | |
| 401 | return __pte(val); |
| 402 | } |
| 403 | |
| 404 | static inline pte_t pte_mkclean(pte_t pte) |
| 405 | { |
| 406 | unsigned long val = pte_val(pte), tmp; |
| 407 | |
| 408 | __asm__ __volatile__( |
| 409 | "\n661: andn %0, %3, %0\n" |
| 410 | " nop\n" |
| 411 | "\n662: nop\n" |
| 412 | " nop\n" |
| 413 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 414 | " .word 661b\n" |
| 415 | " sethi %%uhi(%4), %1\n" |
| 416 | " sllx %1, 32, %1\n" |
| 417 | " .word 662b\n" |
| 418 | " or %1, %%lo(%4), %1\n" |
| 419 | " andn %0, %1, %0\n" |
| 420 | " .previous\n" |
| 421 | : "=r" (val), "=r" (tmp) |
| 422 | : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U), |
| 423 | "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V)); |
| 424 | |
| 425 | return __pte(val); |
| 426 | } |
| 427 | |
| 428 | static inline pte_t pte_mkwrite(pte_t pte) |
| 429 | { |
| 430 | unsigned long val = pte_val(pte), mask; |
| 431 | |
| 432 | __asm__ __volatile__( |
| 433 | "\n661: mov %1, %0\n" |
| 434 | " nop\n" |
| 435 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 436 | " .word 661b\n" |
| 437 | " sethi %%uhi(%2), %0\n" |
| 438 | " sllx %0, 32, %0\n" |
| 439 | " .previous\n" |
| 440 | : "=r" (mask) |
| 441 | : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); |
| 442 | |
| 443 | return __pte(val | mask); |
| 444 | } |
| 445 | |
| 446 | static inline pte_t pte_wrprotect(pte_t pte) |
| 447 | { |
| 448 | unsigned long val = pte_val(pte), tmp; |
| 449 | |
| 450 | __asm__ __volatile__( |
| 451 | "\n661: andn %0, %3, %0\n" |
| 452 | " nop\n" |
| 453 | "\n662: nop\n" |
| 454 | " nop\n" |
| 455 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 456 | " .word 661b\n" |
| 457 | " sethi %%uhi(%4), %1\n" |
| 458 | " sllx %1, 32, %1\n" |
| 459 | " .word 662b\n" |
| 460 | " or %1, %%lo(%4), %1\n" |
| 461 | " andn %0, %1, %0\n" |
| 462 | " .previous\n" |
| 463 | : "=r" (val), "=r" (tmp) |
| 464 | : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U), |
| 465 | "i" (_PAGE_WRITE_4V | _PAGE_W_4V)); |
| 466 | |
| 467 | return __pte(val); |
| 468 | } |
| 469 | |
| 470 | static inline pte_t pte_mkold(pte_t pte) |
| 471 | { |
| 472 | unsigned long mask; |
| 473 | |
| 474 | __asm__ __volatile__( |
| 475 | "\n661: mov %1, %0\n" |
| 476 | " nop\n" |
| 477 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 478 | " .word 661b\n" |
| 479 | " sethi %%uhi(%2), %0\n" |
| 480 | " sllx %0, 32, %0\n" |
| 481 | " .previous\n" |
| 482 | : "=r" (mask) |
| 483 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); |
| 484 | |
| 485 | mask |= _PAGE_R; |
| 486 | |
| 487 | return __pte(pte_val(pte) & ~mask); |
| 488 | } |
| 489 | |
| 490 | static inline pte_t pte_mkyoung(pte_t pte) |
| 491 | { |
| 492 | unsigned long mask; |
| 493 | |
| 494 | __asm__ __volatile__( |
| 495 | "\n661: mov %1, %0\n" |
| 496 | " nop\n" |
| 497 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 498 | " .word 661b\n" |
| 499 | " sethi %%uhi(%2), %0\n" |
| 500 | " sllx %0, 32, %0\n" |
| 501 | " .previous\n" |
| 502 | : "=r" (mask) |
| 503 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); |
| 504 | |
| 505 | mask |= _PAGE_R; |
| 506 | |
| 507 | return __pte(pte_val(pte) | mask); |
| 508 | } |
| 509 | |
| 510 | static inline pte_t pte_mkspecial(pte_t pte) |
| 511 | { |
David S. Miller | 683d2fa | 2011-07-25 17:12:21 -0700 | [diff] [blame] | 512 | pte_val(pte) |= _PAGE_SPECIAL; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 513 | return pte; |
| 514 | } |
| 515 | |
| 516 | static inline unsigned long pte_young(pte_t pte) |
| 517 | { |
| 518 | unsigned long mask; |
| 519 | |
| 520 | __asm__ __volatile__( |
| 521 | "\n661: mov %1, %0\n" |
| 522 | " nop\n" |
| 523 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 524 | " .word 661b\n" |
| 525 | " sethi %%uhi(%2), %0\n" |
| 526 | " sllx %0, 32, %0\n" |
| 527 | " .previous\n" |
| 528 | : "=r" (mask) |
| 529 | : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V)); |
| 530 | |
| 531 | return (pte_val(pte) & mask); |
| 532 | } |
| 533 | |
| 534 | static inline unsigned long pte_dirty(pte_t pte) |
| 535 | { |
| 536 | unsigned long mask; |
| 537 | |
| 538 | __asm__ __volatile__( |
| 539 | "\n661: mov %1, %0\n" |
| 540 | " nop\n" |
| 541 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 542 | " .word 661b\n" |
| 543 | " sethi %%uhi(%2), %0\n" |
| 544 | " sllx %0, 32, %0\n" |
| 545 | " .previous\n" |
| 546 | : "=r" (mask) |
| 547 | : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V)); |
| 548 | |
| 549 | return (pte_val(pte) & mask); |
| 550 | } |
| 551 | |
| 552 | static inline unsigned long pte_write(pte_t pte) |
| 553 | { |
| 554 | unsigned long mask; |
| 555 | |
| 556 | __asm__ __volatile__( |
| 557 | "\n661: mov %1, %0\n" |
| 558 | " nop\n" |
| 559 | " .section .sun4v_2insn_patch, \"ax\"\n" |
| 560 | " .word 661b\n" |
| 561 | " sethi %%uhi(%2), %0\n" |
| 562 | " sllx %0, 32, %0\n" |
| 563 | " .previous\n" |
| 564 | : "=r" (mask) |
| 565 | : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V)); |
| 566 | |
| 567 | return (pte_val(pte) & mask); |
| 568 | } |
| 569 | |
| 570 | static inline unsigned long pte_exec(pte_t pte) |
| 571 | { |
| 572 | unsigned long mask; |
| 573 | |
| 574 | __asm__ __volatile__( |
| 575 | "\n661: sethi %%hi(%1), %0\n" |
| 576 | " .section .sun4v_1insn_patch, \"ax\"\n" |
| 577 | " .word 661b\n" |
| 578 | " mov %2, %0\n" |
| 579 | " .previous\n" |
| 580 | : "=r" (mask) |
| 581 | : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V)); |
| 582 | |
| 583 | return (pte_val(pte) & mask); |
| 584 | } |
| 585 | |
| 586 | static inline unsigned long pte_file(pte_t pte) |
| 587 | { |
| 588 | unsigned long val = pte_val(pte); |
| 589 | |
| 590 | __asm__ __volatile__( |
| 591 | "\n661: and %0, %2, %0\n" |
| 592 | " .section .sun4v_1insn_patch, \"ax\"\n" |
| 593 | " .word 661b\n" |
| 594 | " and %0, %3, %0\n" |
| 595 | " .previous\n" |
| 596 | : "=r" (val) |
| 597 | : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); |
| 598 | |
| 599 | return val; |
| 600 | } |
| 601 | |
| 602 | static inline unsigned long pte_present(pte_t pte) |
| 603 | { |
| 604 | unsigned long val = pte_val(pte); |
| 605 | |
| 606 | __asm__ __volatile__( |
| 607 | "\n661: and %0, %2, %0\n" |
| 608 | " .section .sun4v_1insn_patch, \"ax\"\n" |
| 609 | " .word 661b\n" |
| 610 | " and %0, %3, %0\n" |
| 611 | " .previous\n" |
| 612 | : "=r" (val) |
| 613 | : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V)); |
| 614 | |
| 615 | return val; |
| 616 | } |
| 617 | |
David S. Miller | 4a9d194 | 2012-12-18 16:06:16 -0800 | [diff] [blame] | 618 | #define pte_accessible pte_accessible |
| 619 | static inline unsigned long pte_accessible(pte_t a) |
| 620 | { |
| 621 | return pte_val(a) & _PAGE_VALID; |
| 622 | } |
| 623 | |
David S. Miller | 683d2fa | 2011-07-25 17:12:21 -0700 | [diff] [blame] | 624 | static inline unsigned long pte_special(pte_t pte) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 625 | { |
David S. Miller | 683d2fa | 2011-07-25 17:12:21 -0700 | [diff] [blame] | 626 | return pte_val(pte) & _PAGE_SPECIAL; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 627 | } |
| 628 | |
David S. Miller | 89a7791 | 2013-02-13 12:21:06 -0800 | [diff] [blame] | 629 | static inline int pmd_large(pmd_t pmd) |
| 630 | { |
| 631 | return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == |
| 632 | (PMD_ISHUGE | PMD_HUGE_PRESENT); |
| 633 | } |
| 634 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 635 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 636 | static inline int pmd_young(pmd_t pmd) |
| 637 | { |
| 638 | return pmd_val(pmd) & PMD_HUGE_ACCESSED; |
| 639 | } |
| 640 | |
| 641 | static inline int pmd_write(pmd_t pmd) |
| 642 | { |
| 643 | return pmd_val(pmd) & PMD_HUGE_WRITE; |
| 644 | } |
| 645 | |
| 646 | static inline unsigned long pmd_pfn(pmd_t pmd) |
| 647 | { |
| 648 | unsigned long val = pmd_val(pmd) & PMD_HUGE_PADDR; |
| 649 | |
| 650 | return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); |
| 651 | } |
| 652 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 653 | static inline int pmd_trans_splitting(pmd_t pmd) |
| 654 | { |
| 655 | return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == |
| 656 | (PMD_ISHUGE|PMD_HUGE_SPLITTING); |
| 657 | } |
| 658 | |
| 659 | static inline int pmd_trans_huge(pmd_t pmd) |
| 660 | { |
| 661 | return pmd_val(pmd) & PMD_ISHUGE; |
| 662 | } |
| 663 | |
| 664 | #define has_transparent_hugepage() 1 |
| 665 | |
| 666 | static inline pmd_t pmd_mkold(pmd_t pmd) |
| 667 | { |
| 668 | pmd_val(pmd) &= ~PMD_HUGE_ACCESSED; |
| 669 | return pmd; |
| 670 | } |
| 671 | |
| 672 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
| 673 | { |
| 674 | pmd_val(pmd) &= ~PMD_HUGE_WRITE; |
| 675 | return pmd; |
| 676 | } |
| 677 | |
| 678 | static inline pmd_t pmd_mkdirty(pmd_t pmd) |
| 679 | { |
| 680 | pmd_val(pmd) |= PMD_HUGE_DIRTY; |
| 681 | return pmd; |
| 682 | } |
| 683 | |
| 684 | static inline pmd_t pmd_mkyoung(pmd_t pmd) |
| 685 | { |
| 686 | pmd_val(pmd) |= PMD_HUGE_ACCESSED; |
| 687 | return pmd; |
| 688 | } |
| 689 | |
| 690 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
| 691 | { |
| 692 | pmd_val(pmd) |= PMD_HUGE_WRITE; |
| 693 | return pmd; |
| 694 | } |
| 695 | |
| 696 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
| 697 | { |
| 698 | pmd_val(pmd) &= ~PMD_HUGE_PRESENT; |
| 699 | return pmd; |
| 700 | } |
| 701 | |
| 702 | static inline pmd_t pmd_mksplitting(pmd_t pmd) |
| 703 | { |
| 704 | pmd_val(pmd) |= PMD_HUGE_SPLITTING; |
| 705 | return pmd; |
| 706 | } |
| 707 | |
| 708 | extern pgprot_t pmd_pgprot(pmd_t entry); |
| 709 | #endif |
| 710 | |
| 711 | static inline int pmd_present(pmd_t pmd) |
| 712 | { |
| 713 | return pmd_val(pmd) != 0U; |
| 714 | } |
| 715 | |
| 716 | #define pmd_none(pmd) (!pmd_val(pmd)) |
| 717 | |
| 718 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 719 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 720 | pmd_t *pmdp, pmd_t pmd); |
| 721 | #else |
| 722 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 723 | pmd_t *pmdp, pmd_t pmd) |
| 724 | { |
| 725 | *pmdp = pmd; |
| 726 | } |
| 727 | #endif |
| 728 | |
| 729 | static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) |
| 730 | { |
| 731 | unsigned long val = __pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT; |
| 732 | |
| 733 | pmd_val(*pmdp) = val; |
| 734 | } |
| 735 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 736 | #define pud_set(pudp, pmdp) \ |
David Miller | dbc9fdf0 | 2012-10-08 16:34:23 -0700 | [diff] [blame] | 737 | (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT)) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 738 | static inline unsigned long __pmd_page(pmd_t pmd) |
| 739 | { |
| 740 | unsigned long paddr = (unsigned long) pmd_val(pmd); |
| 741 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 742 | if (pmd_val(pmd) & PMD_ISHUGE) |
| 743 | paddr &= PMD_HUGE_PADDR; |
| 744 | #endif |
| 745 | paddr <<= PMD_PADDR_SHIFT; |
| 746 | return ((unsigned long) __va(paddr)); |
| 747 | } |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 748 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) |
| 749 | #define pud_page_vaddr(pud) \ |
David Miller | dbc9fdf0 | 2012-10-08 16:34:23 -0700 | [diff] [blame] | 750 | ((unsigned long) __va((((unsigned long)pud_val(pud))<<PGD_PADDR_SHIFT))) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 751 | #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 752 | #define pmd_bad(pmd) (0) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 753 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U) |
| 754 | #define pud_none(pud) (!pud_val(pud)) |
| 755 | #define pud_bad(pud) (0) |
| 756 | #define pud_present(pud) (pud_val(pud) != 0U) |
| 757 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0U) |
| 758 | |
| 759 | /* Same in both SUN4V and SUN4U. */ |
| 760 | #define pte_none(pte) (!pte_val(pte)) |
| 761 | |
| 762 | /* to find an entry in a page-table-directory. */ |
| 763 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
| 764 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
| 765 | |
| 766 | /* to find an entry in a kernel page-table-directory */ |
| 767 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 768 | |
| 769 | /* Find an entry in the second-level page table.. */ |
| 770 | #define pmd_offset(pudp, address) \ |
| 771 | ((pmd_t *) pud_page_vaddr(*(pudp)) + \ |
| 772 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))) |
| 773 | |
| 774 | /* Find an entry in the third-level page table.. */ |
| 775 | #define pte_index(dir, address) \ |
| 776 | ((pte_t *) __pmd_page(*(dir)) + \ |
| 777 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) |
| 778 | #define pte_offset_kernel pte_index |
| 779 | #define pte_offset_map pte_index |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 780 | #define pte_unmap(pte) do { } while (0) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 781 | |
| 782 | /* Actual page table PTE updates. */ |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 783 | extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, |
| 784 | pte_t *ptep, pte_t orig, int fullmm); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 785 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 786 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR |
| 787 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, |
| 788 | unsigned long addr, |
| 789 | pmd_t *pmdp) |
| 790 | { |
| 791 | pmd_t pmd = *pmdp; |
| 792 | set_pmd_at(mm, addr, pmdp, __pmd(0U)); |
| 793 | return pmd; |
| 794 | } |
| 795 | |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 796 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 797 | pte_t *ptep, pte_t pte, int fullmm) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 798 | { |
| 799 | pte_t orig = *ptep; |
| 800 | |
| 801 | *ptep = pte; |
| 802 | |
| 803 | /* It is more efficient to let flush_tlb_kernel_range() |
| 804 | * handle init_mm tlb flushes. |
| 805 | * |
| 806 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U |
| 807 | * and SUN4V pte layout, so this inline test is fine. |
| 808 | */ |
David S. Miller | 4a9d194 | 2012-12-18 16:06:16 -0800 | [diff] [blame] | 809 | if (likely(mm != &init_mm) && pte_accessible(orig)) |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 810 | tlb_batch_add(mm, addr, ptep, orig, fullmm); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 811 | } |
| 812 | |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 813 | #define set_pte_at(mm,addr,ptep,pte) \ |
| 814 | __set_pte_at((mm), (addr), (ptep), (pte), 0) |
| 815 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 816 | #define pte_clear(mm,addr,ptep) \ |
| 817 | set_pte_at((mm), (addr), (ptep), __pte(0UL)) |
| 818 | |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 819 | #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL |
| 820 | #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \ |
| 821 | __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm)) |
| 822 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 823 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 824 | #define __HAVE_ARCH_MOVE_PTE |
| 825 | #define move_pte(pte, prot, old_addr, new_addr) \ |
| 826 | ({ \ |
| 827 | pte_t newpte = (pte); \ |
| 828 | if (tlb_type != hypervisor && pte_present(pte)) { \ |
| 829 | unsigned long this_pfn = pte_pfn(pte); \ |
| 830 | \ |
| 831 | if (pfn_valid(this_pfn) && \ |
| 832 | (((old_addr) ^ (new_addr)) & (1 << 13))) \ |
| 833 | flush_dcache_page_all(current->mm, \ |
| 834 | pfn_to_page(this_pfn)); \ |
| 835 | } \ |
| 836 | newpte; \ |
| 837 | }) |
| 838 | #endif |
| 839 | |
| 840 | extern pgd_t swapper_pg_dir[2048]; |
| 841 | extern pmd_t swapper_low_pmd_dir[2048]; |
| 842 | |
| 843 | extern void paging_init(void); |
| 844 | extern unsigned long find_ecache_flush_span(unsigned long size); |
| 845 | |
Sam Ravnborg | cb1b820 | 2011-04-21 15:45:45 -0700 | [diff] [blame] | 846 | struct seq_file; |
| 847 | extern void mmu_info(struct seq_file *); |
| 848 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 849 | struct vm_area_struct; |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 850 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 851 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 852 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 853 | pmd_t *pmd); |
| 854 | |
| 855 | #define __HAVE_ARCH_PGTABLE_DEPOSIT |
| 856 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); |
| 857 | |
| 858 | #define __HAVE_ARCH_PGTABLE_WITHDRAW |
| 859 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); |
| 860 | #endif |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 861 | |
| 862 | /* Encode and de-code a swap entry */ |
| 863 | #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) |
| 864 | #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL)) |
| 865 | #define __swp_entry(type, offset) \ |
| 866 | ( (swp_entry_t) \ |
| 867 | { \ |
| 868 | (((long)(type) << PAGE_SHIFT) | \ |
| 869 | ((long)(offset) << (PAGE_SHIFT + 8UL))) \ |
| 870 | } ) |
| 871 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 872 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 873 | |
| 874 | /* File offset in PTE support. */ |
| 875 | extern unsigned long pte_file(pte_t); |
| 876 | #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) |
| 877 | extern pte_t pgoff_to_pte(unsigned long); |
| 878 | #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) |
| 879 | |
David S. Miller | d8ed1d4 | 2009-08-25 16:47:46 -0700 | [diff] [blame] | 880 | extern unsigned long sparc64_valid_addr_bitmap[]; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 881 | |
| 882 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
David S. Miller | d8ed1d4 | 2009-08-25 16:47:46 -0700 | [diff] [blame] | 883 | static inline bool kern_addr_valid(unsigned long addr) |
| 884 | { |
| 885 | unsigned long paddr = __pa(addr); |
| 886 | |
| 887 | if ((paddr >> 41UL) != 0UL) |
| 888 | return false; |
| 889 | return test_bit(paddr >> 22, sparc64_valid_addr_bitmap); |
| 890 | } |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 891 | |
| 892 | extern int page_in_phys_avail(unsigned long paddr); |
| 893 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 894 | /* |
| 895 | * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in |
| 896 | * its high 4 bits. These macros/functions put it there or get it from there. |
| 897 | */ |
| 898 | #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4))) |
| 899 | #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) |
| 900 | #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL) |
| 901 | |
David S. Miller | 3e37fd3 | 2011-11-17 18:17:59 -0800 | [diff] [blame] | 902 | extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long, |
| 903 | unsigned long, pgprot_t); |
| 904 | |
| 905 | static inline int io_remap_pfn_range(struct vm_area_struct *vma, |
| 906 | unsigned long from, unsigned long pfn, |
| 907 | unsigned long size, pgprot_t prot) |
| 908 | { |
| 909 | unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; |
| 910 | int space = GET_IOSPACE(pfn); |
| 911 | unsigned long phys_base; |
| 912 | |
| 913 | phys_base = offset | (((unsigned long) space) << 32UL); |
| 914 | |
| 915 | return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); |
| 916 | } |
| 917 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 918 | #include <asm-generic/pgtable.h> |
| 919 | |
| 920 | /* We provide our own get_unmapped_area to cope with VA holes and |
| 921 | * SHM area cache aliasing for userland. |
| 922 | */ |
| 923 | #define HAVE_ARCH_UNMAPPED_AREA |
| 924 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
| 925 | |
| 926 | /* We provide a special get_unmapped_area for framebuffer mmaps to try and use |
| 927 | * the largest alignment possible such that larget PTEs can be used. |
| 928 | */ |
| 929 | extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, |
| 930 | unsigned long, unsigned long, |
| 931 | unsigned long); |
| 932 | #define HAVE_ARCH_FB_UNMAPPED_AREA |
| 933 | |
| 934 | extern void pgtable_cache_init(void); |
| 935 | extern void sun4v_register_fault_status(void); |
| 936 | extern void sun4v_ktsb_register(void); |
| 937 | extern void __init cheetah_ecache_flush_init(void); |
| 938 | extern void sun4v_patch_tlb_handlers(void); |
| 939 | |
| 940 | extern unsigned long cmdline_memory_size; |
| 941 | |
David S. Miller | b539c46 | 2008-09-12 00:10:32 -0700 | [diff] [blame] | 942 | extern asmlinkage void do_sparc64_fault(struct pt_regs *regs); |
| 943 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 944 | #endif /* !(__ASSEMBLY__) */ |
| 945 | |
| 946 | #endif /* !(_SPARC64_PGTABLE_H) */ |