blob: a305b22ab5818fef02efe3d7962fc229c7625cea [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001/*
2 * pgtable.h: SpitFire page table operations.
3 *
4 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#ifndef _SPARC64_PGTABLE_H
9#define _SPARC64_PGTABLE_H
10
11/* This file contains the functions and defines necessary to modify and use
12 * the SpitFire page tables.
13 */
14
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070015#include <linux/compiler.h>
16#include <linux/const.h>
17#include <asm/types.h>
18#include <asm/spitfire.h>
19#include <asm/asi.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070020#include <asm/page.h>
21#include <asm/processor.h>
22
23/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
24 * The page copy blockops can use 0x6000000 to 0x8000000.
David S. Millerb18eb2d2014-05-07 14:07:32 -070025 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
26 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070027 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
28 * The vmalloc area spans 0x100000000 to 0x200000000.
29 * Since modules need to be in the lowest 32-bits of the address space,
30 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
31 * There is a single static kernel PMD which maps from 0x0 to address
32 * 0x400000000.
33 */
34#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
David S. Millerb18eb2d2014-05-07 14:07:32 -070035#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
36#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070037#define MODULES_VADDR _AC(0x0000000010000000,UL)
38#define MODULES_LEN _AC(0x00000000e0000000,UL)
39#define MODULES_END _AC(0x00000000f0000000,UL)
40#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
41#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
42#define VMALLOC_START _AC(0x0000000100000000,UL)
David S. Miller1b6b9d62009-09-28 14:39:58 -070043#define VMALLOC_END _AC(0x0000010000000000,UL)
44#define VMEMMAP_BASE _AC(0x0000010000000000,UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070045
46#define vmemmap ((struct page *)VMEMMAP_BASE)
47
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070048/* PMD_SHIFT determines the size of the area a second-level page
49 * table can map
50 */
David S. Miller37b3a8f2013-09-25 13:48:49 -070051#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070052#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
53#define PMD_MASK (~(PMD_SIZE-1))
David S. Miller2b779332013-09-25 14:33:16 -070054#define PMD_BITS (PAGE_SHIFT - 3)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070055
David S. Millerac55c762014-09-26 21:19:46 -070056/* PUD_SHIFT determines the size of the area a third-level page
57 * table can map
58 */
59#define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
60#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
61#define PUD_MASK (~(PUD_SIZE-1))
62#define PUD_BITS (PAGE_SHIFT - 3)
63
64/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
65#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070066#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
67#define PGDIR_MASK (~(PGDIR_SIZE-1))
David S. Miller2b779332013-09-25 14:33:16 -070068#define PGDIR_BITS (PAGE_SHIFT - 3)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070069
David S. Millerac55c762014-09-26 21:19:46 -070070#if (PGDIR_SHIFT + PGDIR_BITS) != 53
David Miller56a70b82012-10-08 16:34:20 -070071#error Page table parameters do not cover virtual address space properly.
72#endif
73
David Miller9e695d22012-10-08 16:34:29 -070074#if (PMD_SHIFT != HPAGE_SHIFT)
75#error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
76#endif
77
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070078#ifndef __ASSEMBLY__
79
80#include <linux/sched.h>
81
David S. Miller0dd5b7b2014-09-24 20:56:11 -070082bool kern_addr_valid(unsigned long addr);
David S. Miller26cf4322014-04-29 13:03:27 -070083
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070084/* Entries per page directory level. */
David S. Miller37b3a8f2013-09-25 13:48:49 -070085#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070086#define PTRS_PER_PMD (1UL << PMD_BITS)
David S. Millerac55c762014-09-26 21:19:46 -070087#define PTRS_PER_PUD (1UL << PUD_BITS)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070088#define PTRS_PER_PGD (1UL << PGDIR_BITS)
89
90/* Kernel has a separate 44bit address space. */
91#define FIRST_USER_ADDRESS 0
92
David S. Millerfe866432014-04-29 13:28:23 -070093#define pmd_ERROR(e) \
94 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
95 __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
David S. Millerac55c762014-09-26 21:19:46 -070096#define pud_ERROR(e) \
97 pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
98 __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
David S. Millerfe866432014-04-29 13:28:23 -070099#define pgd_ERROR(e) \
100 pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
101 __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700102
103#endif /* !(__ASSEMBLY__) */
104
105/* PTE bits which are the same in SUN4U and SUN4V format. */
106#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
107#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
David S. Miller683d2fa2011-07-25 17:12:21 -0700108#define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
David S. Millera7b94032013-09-26 13:45:15 -0700109#define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700110#define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
David S. Miller683d2fa2011-07-25 17:12:21 -0700111
112/* Advertise support for _PAGE_SPECIAL */
113#define __HAVE_ARCH_PTE_SPECIAL
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700114
115/* SUN4U pte bits... */
116#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
117#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
118#define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
119#define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
120#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
121#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
122#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
David S. Miller683d2fa2011-07-25 17:12:21 -0700123#define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */
David S. Millera7b94032013-09-26 13:45:15 -0700124#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700125#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
126#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
127#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
128#define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
129#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
130#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
131#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
132#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
133#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
134#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
135#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
136#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
137#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
138#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
139#define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
140#define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
141#define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
142#define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
143#define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
144#define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
145#define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
146
147/* SUN4V pte bits... */
148#define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
149#define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
150#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
151#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
152#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
153#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
David S. Miller683d2fa2011-07-25 17:12:21 -0700154#define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */
David S. Millera7b94032013-09-26 13:45:15 -0700155#define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700156#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
157#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
158#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
159#define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
160#define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
161#define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
162#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
163#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
164#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
165#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
166#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
167#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
168#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
169#define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
170#define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
171#define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
172#define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
173#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
174#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
175#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
176#define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
177
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700178#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
179#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700180
David S. Miller37b3a8f2013-09-25 13:48:49 -0700181#if REAL_HPAGE_SHIFT != 22
182#error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
183#endif
184
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700185#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
186#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700187
188/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
189#define __P000 __pgprot(0)
190#define __P001 __pgprot(0)
191#define __P010 __pgprot(0)
192#define __P011 __pgprot(0)
193#define __P100 __pgprot(0)
194#define __P101 __pgprot(0)
195#define __P110 __pgprot(0)
196#define __P111 __pgprot(0)
197
198#define __S000 __pgprot(0)
199#define __S001 __pgprot(0)
200#define __S010 __pgprot(0)
201#define __S011 __pgprot(0)
202#define __S100 __pgprot(0)
203#define __S101 __pgprot(0)
204#define __S110 __pgprot(0)
205#define __S111 __pgprot(0)
206
207#ifndef __ASSEMBLY__
208
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200209pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700210
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200211unsigned long pte_sz_bits(unsigned long size);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700212
213extern pgprot_t PAGE_KERNEL;
214extern pgprot_t PAGE_KERNEL_LOCKED;
215extern pgprot_t PAGE_COPY;
216extern pgprot_t PAGE_SHARED;
217
218/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
219extern unsigned long _PAGE_IE;
220extern unsigned long _PAGE_E;
221extern unsigned long _PAGE_CACHE;
222
223extern unsigned long pg_iobits;
224extern unsigned long _PAGE_ALL_SZ_BITS;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700225
226extern struct page *mem_map_zero;
227#define ZERO_PAGE(vaddr) (mem_map_zero)
228
229/* PFNs are real physical page numbers. However, mem_map only begins to record
230 * per-page information starting at pfn_base. This is to handle systems where
231 * the first physical page in the machine is at some huge physical address,
232 * such as 4GB. This is common on a partitioned E10000, for example.
233 */
234static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
235{
236 unsigned long paddr = pfn << PAGE_SHIFT;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700237
David Miller15b93502012-10-08 16:34:19 -0700238 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
239 return __pte(paddr | pgprot_val(prot));
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700240}
241#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
242
David Miller9e695d22012-10-08 16:34:29 -0700243#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David S. Millera7b94032013-09-26 13:45:15 -0700244static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
David Miller9e695d22012-10-08 16:34:29 -0700245{
David S. Millera7b94032013-09-26 13:45:15 -0700246 pte_t pte = pfn_pte(page_nr, pgprot);
247
248 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700249}
David S. Millera7b94032013-09-26 13:45:15 -0700250#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
David Miller9e695d22012-10-08 16:34:29 -0700251#endif
252
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700253/* This one can be done with two shifts. */
254static inline unsigned long pte_pfn(pte_t pte)
255{
256 unsigned long ret;
257
258 __asm__ __volatile__(
259 "\n661: sllx %1, %2, %0\n"
260 " srlx %0, %3, %0\n"
261 " .section .sun4v_2insn_patch, \"ax\"\n"
262 " .word 661b\n"
263 " sllx %1, %4, %0\n"
264 " srlx %0, %5, %0\n"
265 " .previous\n"
266 : "=r" (ret)
267 : "r" (pte_val(pte)),
268 "i" (21), "i" (21 + PAGE_SHIFT),
269 "i" (8), "i" (8 + PAGE_SHIFT));
270
271 return ret;
272}
273#define pte_page(x) pfn_to_page(pte_pfn(x))
274
275static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
276{
277 unsigned long mask, tmp;
278
David S. Millereaf85da2014-04-28 19:11:27 -0700279 /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
280 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700281 *
282 * Even if we use negation tricks the result is still a 6
283 * instruction sequence, so don't try to play fancy and just
284 * do the most straightforward implementation.
285 *
286 * Note: We encode this into 3 sun4v 2-insn patch sequences.
287 */
288
David Miller15b93502012-10-08 16:34:19 -0700289 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700290 __asm__ __volatile__(
291 "\n661: sethi %%uhi(%2), %1\n"
292 " sethi %%hi(%2), %0\n"
293 "\n662: or %1, %%ulo(%2), %1\n"
294 " or %0, %%lo(%2), %0\n"
295 "\n663: sllx %1, 32, %1\n"
296 " or %0, %1, %0\n"
297 " .section .sun4v_2insn_patch, \"ax\"\n"
298 " .word 661b\n"
299 " sethi %%uhi(%3), %1\n"
300 " sethi %%hi(%3), %0\n"
301 " .word 662b\n"
302 " or %1, %%ulo(%3), %1\n"
303 " or %0, %%lo(%3), %0\n"
304 " .word 663b\n"
305 " sllx %1, 32, %1\n"
306 " or %0, %1, %0\n"
307 " .previous\n"
308 : "=r" (mask), "=r" (tmp)
309 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
David S. Millereaf85da2014-04-28 19:11:27 -0700310 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
David S. Millera7b94032013-09-26 13:45:15 -0700311 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700312 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
David S. Millereaf85da2014-04-28 19:11:27 -0700313 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
David S. Millera7b94032013-09-26 13:45:15 -0700314 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700315
316 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
317}
318
David S. Millera7b94032013-09-26 13:45:15 -0700319#ifdef CONFIG_TRANSPARENT_HUGEPAGE
320static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
321{
322 pte_t pte = __pte(pmd_val(pmd));
323
324 pte = pte_modify(pte, newprot);
325
326 return __pmd(pte_val(pte));
327}
328#endif
329
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700330static inline pte_t pgoff_to_pte(unsigned long off)
331{
332 off <<= PAGE_SHIFT;
333
334 __asm__ __volatile__(
335 "\n661: or %0, %2, %0\n"
336 " .section .sun4v_1insn_patch, \"ax\"\n"
337 " .word 661b\n"
338 " or %0, %3, %0\n"
339 " .previous\n"
340 : "=r" (off)
341 : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
342
343 return __pte(off);
344}
345
346static inline pgprot_t pgprot_noncached(pgprot_t prot)
347{
348 unsigned long val = pgprot_val(prot);
349
350 __asm__ __volatile__(
351 "\n661: andn %0, %2, %0\n"
352 " or %0, %3, %0\n"
353 " .section .sun4v_2insn_patch, \"ax\"\n"
354 " .word 661b\n"
355 " andn %0, %4, %0\n"
356 " or %0, %5, %0\n"
357 " .previous\n"
358 : "=r" (val)
359 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
360 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
361
362 return __pgprot(val);
363}
364/* Various pieces of code check for platform support by ifdef testing
365 * on "pgprot_noncached". That's broken and should be fixed, but for
366 * now...
367 */
368#define pgprot_noncached pgprot_noncached
369
David S. Millera7b94032013-09-26 13:45:15 -0700370#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700371static inline pte_t pte_mkhuge(pte_t pte)
372{
373 unsigned long mask;
374
375 __asm__ __volatile__(
376 "\n661: sethi %%uhi(%1), %0\n"
377 " sllx %0, 32, %0\n"
378 " .section .sun4v_2insn_patch, \"ax\"\n"
379 " .word 661b\n"
380 " mov %2, %0\n"
381 " nop\n"
382 " .previous\n"
383 : "=r" (mask)
384 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
385
386 return __pte(pte_val(pte) | mask);
387}
David S. Millera7b94032013-09-26 13:45:15 -0700388#ifdef CONFIG_TRANSPARENT_HUGEPAGE
389static inline pmd_t pmd_mkhuge(pmd_t pmd)
390{
391 pte_t pte = __pte(pmd_val(pmd));
392
393 pte = pte_mkhuge(pte);
394 pte_val(pte) |= _PAGE_PMD_HUGE;
395
396 return __pmd(pte_val(pte));
397}
398#endif
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700399#endif
400
401static inline pte_t pte_mkdirty(pte_t pte)
402{
403 unsigned long val = pte_val(pte), tmp;
404
405 __asm__ __volatile__(
406 "\n661: or %0, %3, %0\n"
407 " nop\n"
408 "\n662: nop\n"
409 " nop\n"
410 " .section .sun4v_2insn_patch, \"ax\"\n"
411 " .word 661b\n"
412 " sethi %%uhi(%4), %1\n"
413 " sllx %1, 32, %1\n"
414 " .word 662b\n"
415 " or %1, %%lo(%4), %1\n"
416 " or %0, %1, %0\n"
417 " .previous\n"
418 : "=r" (val), "=r" (tmp)
419 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
420 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
421
422 return __pte(val);
423}
424
425static inline pte_t pte_mkclean(pte_t pte)
426{
427 unsigned long val = pte_val(pte), tmp;
428
429 __asm__ __volatile__(
430 "\n661: andn %0, %3, %0\n"
431 " nop\n"
432 "\n662: nop\n"
433 " nop\n"
434 " .section .sun4v_2insn_patch, \"ax\"\n"
435 " .word 661b\n"
436 " sethi %%uhi(%4), %1\n"
437 " sllx %1, 32, %1\n"
438 " .word 662b\n"
439 " or %1, %%lo(%4), %1\n"
440 " andn %0, %1, %0\n"
441 " .previous\n"
442 : "=r" (val), "=r" (tmp)
443 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
444 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
445
446 return __pte(val);
447}
448
449static inline pte_t pte_mkwrite(pte_t pte)
450{
451 unsigned long val = pte_val(pte), mask;
452
453 __asm__ __volatile__(
454 "\n661: mov %1, %0\n"
455 " nop\n"
456 " .section .sun4v_2insn_patch, \"ax\"\n"
457 " .word 661b\n"
458 " sethi %%uhi(%2), %0\n"
459 " sllx %0, 32, %0\n"
460 " .previous\n"
461 : "=r" (mask)
462 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
463
464 return __pte(val | mask);
465}
466
467static inline pte_t pte_wrprotect(pte_t pte)
468{
469 unsigned long val = pte_val(pte), tmp;
470
471 __asm__ __volatile__(
472 "\n661: andn %0, %3, %0\n"
473 " nop\n"
474 "\n662: nop\n"
475 " nop\n"
476 " .section .sun4v_2insn_patch, \"ax\"\n"
477 " .word 661b\n"
478 " sethi %%uhi(%4), %1\n"
479 " sllx %1, 32, %1\n"
480 " .word 662b\n"
481 " or %1, %%lo(%4), %1\n"
482 " andn %0, %1, %0\n"
483 " .previous\n"
484 : "=r" (val), "=r" (tmp)
485 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
486 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
487
488 return __pte(val);
489}
490
491static inline pte_t pte_mkold(pte_t pte)
492{
493 unsigned long mask;
494
495 __asm__ __volatile__(
496 "\n661: mov %1, %0\n"
497 " nop\n"
498 " .section .sun4v_2insn_patch, \"ax\"\n"
499 " .word 661b\n"
500 " sethi %%uhi(%2), %0\n"
501 " sllx %0, 32, %0\n"
502 " .previous\n"
503 : "=r" (mask)
504 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
505
506 mask |= _PAGE_R;
507
508 return __pte(pte_val(pte) & ~mask);
509}
510
511static inline pte_t pte_mkyoung(pte_t pte)
512{
513 unsigned long mask;
514
515 __asm__ __volatile__(
516 "\n661: mov %1, %0\n"
517 " nop\n"
518 " .section .sun4v_2insn_patch, \"ax\"\n"
519 " .word 661b\n"
520 " sethi %%uhi(%2), %0\n"
521 " sllx %0, 32, %0\n"
522 " .previous\n"
523 : "=r" (mask)
524 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
525
526 mask |= _PAGE_R;
527
528 return __pte(pte_val(pte) | mask);
529}
530
531static inline pte_t pte_mkspecial(pte_t pte)
532{
David S. Miller683d2fa2011-07-25 17:12:21 -0700533 pte_val(pte) |= _PAGE_SPECIAL;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700534 return pte;
535}
536
537static inline unsigned long pte_young(pte_t pte)
538{
539 unsigned long mask;
540
541 __asm__ __volatile__(
542 "\n661: mov %1, %0\n"
543 " nop\n"
544 " .section .sun4v_2insn_patch, \"ax\"\n"
545 " .word 661b\n"
546 " sethi %%uhi(%2), %0\n"
547 " sllx %0, 32, %0\n"
548 " .previous\n"
549 : "=r" (mask)
550 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
551
552 return (pte_val(pte) & mask);
553}
554
555static inline unsigned long pte_dirty(pte_t pte)
556{
557 unsigned long mask;
558
559 __asm__ __volatile__(
560 "\n661: mov %1, %0\n"
561 " nop\n"
562 " .section .sun4v_2insn_patch, \"ax\"\n"
563 " .word 661b\n"
564 " sethi %%uhi(%2), %0\n"
565 " sllx %0, 32, %0\n"
566 " .previous\n"
567 : "=r" (mask)
568 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
569
570 return (pte_val(pte) & mask);
571}
572
573static inline unsigned long pte_write(pte_t pte)
574{
575 unsigned long mask;
576
577 __asm__ __volatile__(
578 "\n661: mov %1, %0\n"
579 " nop\n"
580 " .section .sun4v_2insn_patch, \"ax\"\n"
581 " .word 661b\n"
582 " sethi %%uhi(%2), %0\n"
583 " sllx %0, 32, %0\n"
584 " .previous\n"
585 : "=r" (mask)
586 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
587
588 return (pte_val(pte) & mask);
589}
590
591static inline unsigned long pte_exec(pte_t pte)
592{
593 unsigned long mask;
594
595 __asm__ __volatile__(
596 "\n661: sethi %%hi(%1), %0\n"
597 " .section .sun4v_1insn_patch, \"ax\"\n"
598 " .word 661b\n"
599 " mov %2, %0\n"
600 " .previous\n"
601 : "=r" (mask)
602 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
603
604 return (pte_val(pte) & mask);
605}
606
607static inline unsigned long pte_file(pte_t pte)
608{
609 unsigned long val = pte_val(pte);
610
611 __asm__ __volatile__(
612 "\n661: and %0, %2, %0\n"
613 " .section .sun4v_1insn_patch, \"ax\"\n"
614 " .word 661b\n"
615 " and %0, %3, %0\n"
616 " .previous\n"
617 : "=r" (val)
618 : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
619
620 return val;
621}
622
623static inline unsigned long pte_present(pte_t pte)
624{
625 unsigned long val = pte_val(pte);
626
627 __asm__ __volatile__(
628 "\n661: and %0, %2, %0\n"
629 " .section .sun4v_1insn_patch, \"ax\"\n"
630 " .word 661b\n"
631 " and %0, %3, %0\n"
632 " .previous\n"
633 : "=r" (val)
634 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
635
636 return val;
637}
638
David S. Miller4a9d1942012-12-18 16:06:16 -0800639#define pte_accessible pte_accessible
Rik van Riel20841402013-12-18 17:08:44 -0800640static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
David S. Miller4a9d1942012-12-18 16:06:16 -0800641{
642 return pte_val(a) & _PAGE_VALID;
643}
644
David S. Miller683d2fa2011-07-25 17:12:21 -0700645static inline unsigned long pte_special(pte_t pte)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700646{
David S. Miller683d2fa2011-07-25 17:12:21 -0700647 return pte_val(pte) & _PAGE_SPECIAL;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700648}
649
David S. Millera7b94032013-09-26 13:45:15 -0700650static inline unsigned long pmd_large(pmd_t pmd)
David S. Miller89a77912013-02-13 12:21:06 -0800651{
David S. Millera7b94032013-09-26 13:45:15 -0700652 pte_t pte = __pte(pmd_val(pmd));
653
David S. Miller04df4192014-04-25 10:21:12 -0700654 return pte_val(pte) & _PAGE_PMD_HUGE;
David S. Miller89a77912013-02-13 12:21:06 -0800655}
656
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700657static inline unsigned long pmd_pfn(pmd_t pmd)
658{
659 pte_t pte = __pte(pmd_val(pmd));
660
661 return pte_pfn(pte);
662}
663
David Miller9e695d22012-10-08 16:34:29 -0700664#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David S. Millera7b94032013-09-26 13:45:15 -0700665static inline unsigned long pmd_young(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700666{
David S. Millera7b94032013-09-26 13:45:15 -0700667 pte_t pte = __pte(pmd_val(pmd));
668
669 return pte_young(pte);
David Miller9e695d22012-10-08 16:34:29 -0700670}
671
David S. Millera7b94032013-09-26 13:45:15 -0700672static inline unsigned long pmd_write(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700673{
David S. Millera7b94032013-09-26 13:45:15 -0700674 pte_t pte = __pte(pmd_val(pmd));
675
676 return pte_write(pte);
David Miller9e695d22012-10-08 16:34:29 -0700677}
678
David S. Millera7b94032013-09-26 13:45:15 -0700679static inline unsigned long pmd_trans_huge(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700680{
David S. Millera7b94032013-09-26 13:45:15 -0700681 pte_t pte = __pte(pmd_val(pmd));
682
683 return pte_val(pte) & _PAGE_PMD_HUGE;
David Miller9e695d22012-10-08 16:34:29 -0700684}
685
David S. Millera7b94032013-09-26 13:45:15 -0700686static inline unsigned long pmd_trans_splitting(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700687{
David S. Millera7b94032013-09-26 13:45:15 -0700688 pte_t pte = __pte(pmd_val(pmd));
689
690 return pmd_trans_huge(pmd) && pte_special(pte);
David Miller9e695d22012-10-08 16:34:29 -0700691}
692
693#define has_transparent_hugepage() 1
694
695static inline pmd_t pmd_mkold(pmd_t pmd)
696{
David S. Millera7b94032013-09-26 13:45:15 -0700697 pte_t pte = __pte(pmd_val(pmd));
698
699 pte = pte_mkold(pte);
700
701 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700702}
703
704static inline pmd_t pmd_wrprotect(pmd_t pmd)
705{
David S. Millera7b94032013-09-26 13:45:15 -0700706 pte_t pte = __pte(pmd_val(pmd));
707
708 pte = pte_wrprotect(pte);
709
710 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700711}
712
713static inline pmd_t pmd_mkdirty(pmd_t pmd)
714{
David S. Millera7b94032013-09-26 13:45:15 -0700715 pte_t pte = __pte(pmd_val(pmd));
716
717 pte = pte_mkdirty(pte);
718
719 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700720}
721
722static inline pmd_t pmd_mkyoung(pmd_t pmd)
723{
David S. Millera7b94032013-09-26 13:45:15 -0700724 pte_t pte = __pte(pmd_val(pmd));
725
726 pte = pte_mkyoung(pte);
727
728 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700729}
730
731static inline pmd_t pmd_mkwrite(pmd_t pmd)
732{
David S. Millera7b94032013-09-26 13:45:15 -0700733 pte_t pte = __pte(pmd_val(pmd));
734
735 pte = pte_mkwrite(pte);
736
737 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700738}
739
David Miller9e695d22012-10-08 16:34:29 -0700740static inline pmd_t pmd_mksplitting(pmd_t pmd)
741{
David S. Millera7b94032013-09-26 13:45:15 -0700742 pte_t pte = __pte(pmd_val(pmd));
743
744 pte = pte_mkspecial(pte);
745
746 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700747}
748
David S. Millera7b94032013-09-26 13:45:15 -0700749static inline pgprot_t pmd_pgprot(pmd_t entry)
750{
751 unsigned long val = pmd_val(entry);
752
753 return __pgprot(val);
754}
David Miller9e695d22012-10-08 16:34:29 -0700755#endif
756
757static inline int pmd_present(pmd_t pmd)
758{
David S. Miller2b779332013-09-25 14:33:16 -0700759 return pmd_val(pmd) != 0UL;
David Miller9e695d22012-10-08 16:34:29 -0700760}
761
762#define pmd_none(pmd) (!pmd_val(pmd))
763
David S. Miller26cf4322014-04-29 13:03:27 -0700764/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
765 * very simple, it's just the physical address. PTE tables are of
766 * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
767 * the top bits outside of the range of any physical address size we
768 * support are clear as well. We also validate the physical itself.
769 */
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700770#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
David S. Miller26cf4322014-04-29 13:03:27 -0700771
772#define pud_none(pud) (!pud_val(pud))
773
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700774#define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
David S. Miller26cf4322014-04-29 13:03:27 -0700775
David S. Millerac55c762014-09-26 21:19:46 -0700776#define pgd_none(pgd) (!pgd_val(pgd))
777
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700778#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK)
David S. Millerac55c762014-09-26 21:19:46 -0700779
David Miller9e695d22012-10-08 16:34:29 -0700780#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200781void set_pmd_at(struct mm_struct *mm, unsigned long addr,
782 pmd_t *pmdp, pmd_t pmd);
David Miller9e695d22012-10-08 16:34:29 -0700783#else
784static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
785 pmd_t *pmdp, pmd_t pmd)
786{
787 *pmdp = pmd;
788}
789#endif
790
791static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
792{
David S. Millera7b94032013-09-26 13:45:15 -0700793 unsigned long val = __pa((unsigned long) (ptep));
David Miller9e695d22012-10-08 16:34:29 -0700794
795 pmd_val(*pmdp) = val;
796}
797
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700798#define pud_set(pudp, pmdp) \
David S. Millera7b94032013-09-26 13:45:15 -0700799 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
David Miller9e695d22012-10-08 16:34:29 -0700800static inline unsigned long __pmd_page(pmd_t pmd)
801{
David S. Millera7b94032013-09-26 13:45:15 -0700802 pte_t pte = __pte(pmd_val(pmd));
803 unsigned long pfn;
804
805 pfn = pte_pfn(pte);
806
807 return ((unsigned long) __va(pfn << PAGE_SHIFT));
David Miller9e695d22012-10-08 16:34:29 -0700808}
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700809#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
810#define pud_page_vaddr(pud) \
David S. Millera7b94032013-09-26 13:45:15 -0700811 ((unsigned long) __va(pud_val(pud)))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700812#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
David S. Miller2b779332013-09-25 14:33:16 -0700813#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700814#define pud_present(pud) (pud_val(pud) != 0U)
David S. Miller2b779332013-09-25 14:33:16 -0700815#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
David S. Millerac55c762014-09-26 21:19:46 -0700816#define pgd_page_vaddr(pgd) \
817 ((unsigned long) __va(pgd_val(pgd)))
818#define pgd_present(pgd) (pgd_val(pgd) != 0U)
819#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700820
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700821static inline unsigned long pud_large(pud_t pud)
822{
823 pte_t pte = __pte(pud_val(pud));
824
825 return pte_val(pte) & _PAGE_PMD_HUGE;
826}
827
828static inline unsigned long pud_pfn(pud_t pud)
829{
830 pte_t pte = __pte(pud_val(pud));
831
832 return pte_pfn(pte);
833}
834
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700835/* Same in both SUN4V and SUN4U. */
836#define pte_none(pte) (!pte_val(pte))
837
David S. Millerac55c762014-09-26 21:19:46 -0700838#define pgd_set(pgdp, pudp) \
839 (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
840
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700841/* to find an entry in a page-table-directory. */
842#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
843#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
844
845/* to find an entry in a kernel page-table-directory */
846#define pgd_offset_k(address) pgd_offset(&init_mm, address)
847
David S. Millerac55c762014-09-26 21:19:46 -0700848/* Find an entry in the third-level page table.. */
849#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
850#define pud_offset(pgdp, address) \
851 ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
852
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700853/* Find an entry in the second-level page table.. */
854#define pmd_offset(pudp, address) \
855 ((pmd_t *) pud_page_vaddr(*(pudp)) + \
856 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
857
858/* Find an entry in the third-level page table.. */
859#define pte_index(dir, address) \
860 ((pte_t *) __pmd_page(*(dir)) + \
861 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
862#define pte_offset_kernel pte_index
863#define pte_offset_map pte_index
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700864#define pte_unmap(pte) do { } while (0)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700865
866/* Actual page table PTE updates. */
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200867void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
868 pte_t *ptep, pte_t orig, int fullmm);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700869
David Miller9e695d22012-10-08 16:34:29 -0700870#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
871static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
872 unsigned long addr,
873 pmd_t *pmdp)
874{
875 pmd_t pmd = *pmdp;
David S. Miller2b779332013-09-25 14:33:16 -0700876 set_pmd_at(mm, addr, pmdp, __pmd(0UL));
David Miller9e695d22012-10-08 16:34:29 -0700877 return pmd;
878}
879
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700880static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
881 pte_t *ptep, pte_t pte, int fullmm)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700882{
883 pte_t orig = *ptep;
884
885 *ptep = pte;
886
887 /* It is more efficient to let flush_tlb_kernel_range()
888 * handle init_mm tlb flushes.
889 *
890 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
891 * and SUN4V pte layout, so this inline test is fine.
892 */
Rik van Riel20841402013-12-18 17:08:44 -0800893 if (likely(mm != &init_mm) && pte_accessible(mm, orig))
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700894 tlb_batch_add(mm, addr, ptep, orig, fullmm);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700895}
896
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700897#define set_pte_at(mm,addr,ptep,pte) \
898 __set_pte_at((mm), (addr), (ptep), (pte), 0)
899
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700900#define pte_clear(mm,addr,ptep) \
901 set_pte_at((mm), (addr), (ptep), __pte(0UL))
902
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700903#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
904#define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
905 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
906
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700907#ifdef DCACHE_ALIASING_POSSIBLE
908#define __HAVE_ARCH_MOVE_PTE
909#define move_pte(pte, prot, old_addr, new_addr) \
910({ \
911 pte_t newpte = (pte); \
912 if (tlb_type != hypervisor && pte_present(pte)) { \
913 unsigned long this_pfn = pte_pfn(pte); \
914 \
915 if (pfn_valid(this_pfn) && \
916 (((old_addr) ^ (new_addr)) & (1 << 13))) \
917 flush_dcache_page_all(current->mm, \
918 pfn_to_page(this_pfn)); \
919 } \
920 newpte; \
921})
922#endif
923
David S. Miller2b779332013-09-25 14:33:16 -0700924extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
925extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700926
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200927void paging_init(void);
928unsigned long find_ecache_flush_span(unsigned long size);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700929
Sam Ravnborgcb1b8202011-04-21 15:45:45 -0700930struct seq_file;
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200931void mmu_info(struct seq_file *);
Sam Ravnborgcb1b8202011-04-21 15:45:45 -0700932
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700933struct vm_area_struct;
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200934void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
David Miller9e695d22012-10-08 16:34:29 -0700935#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200936void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
937 pmd_t *pmd);
David Miller9e695d22012-10-08 16:34:29 -0700938
David S. Miller51e5ef12014-04-24 13:58:02 -0700939#define __HAVE_ARCH_PMDP_INVALIDATE
940extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
941 pmd_t *pmdp);
942
David Miller9e695d22012-10-08 16:34:29 -0700943#define __HAVE_ARCH_PGTABLE_DEPOSIT
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200944void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
945 pgtable_t pgtable);
David Miller9e695d22012-10-08 16:34:29 -0700946
947#define __HAVE_ARCH_PGTABLE_WITHDRAW
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200948pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
David Miller9e695d22012-10-08 16:34:29 -0700949#endif
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700950
951/* Encode and de-code a swap entry */
952#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
953#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
954#define __swp_entry(type, offset) \
955 ( (swp_entry_t) \
956 { \
957 (((long)(type) << PAGE_SHIFT) | \
958 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
959 } )
960#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
961#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
962
963/* File offset in PTE support. */
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200964unsigned long pte_file(pte_t);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700965#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200966pte_t pgoff_to_pte(unsigned long);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700967#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
968
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200969int page_in_phys_avail(unsigned long paddr);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700970
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700971/*
972 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
973 * its high 4 bits. These macros/functions put it there or get it from there.
974 */
975#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
976#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
977#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
978
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200979int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
980 unsigned long, pgprot_t);
David S. Miller3e37fd32011-11-17 18:17:59 -0800981
982static inline int io_remap_pfn_range(struct vm_area_struct *vma,
983 unsigned long from, unsigned long pfn,
984 unsigned long size, pgprot_t prot)
985{
986 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
987 int space = GET_IOSPACE(pfn);
988 unsigned long phys_base;
989
990 phys_base = offset | (((unsigned long) space) << 32UL);
991
992 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
993}
Al Viro40d158e2013-05-11 12:13:10 -0400994#define io_remap_pfn_range io_remap_pfn_range
David S. Miller3e37fd32011-11-17 18:17:59 -0800995
David S. Millerf36391d2013-04-19 17:26:26 -0400996#include <asm/tlbflush.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700997#include <asm-generic/pgtable.h>
998
999/* We provide our own get_unmapped_area to cope with VA holes and
1000 * SHM area cache aliasing for userland.
1001 */
1002#define HAVE_ARCH_UNMAPPED_AREA
1003#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1004
1005/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1006 * the largest alignment possible such that larget PTEs can be used.
1007 */
Sam Ravnborgf05a6862014-05-16 23:25:50 +02001008unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1009 unsigned long, unsigned long,
1010 unsigned long);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001011#define HAVE_ARCH_FB_UNMAPPED_AREA
1012
Sam Ravnborgf05a6862014-05-16 23:25:50 +02001013void pgtable_cache_init(void);
1014void sun4v_register_fault_status(void);
1015void sun4v_ktsb_register(void);
1016void __init cheetah_ecache_flush_init(void);
1017void sun4v_patch_tlb_handlers(void);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001018
1019extern unsigned long cmdline_memory_size;
1020
Sam Ravnborgf05a6862014-05-16 23:25:50 +02001021asmlinkage void do_sparc64_fault(struct pt_regs *regs);
David S. Millerb539c462008-09-12 00:10:32 -07001022
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001023#endif /* !(__ASSEMBLY__) */
1024
1025#endif /* !(_SPARC64_PGTABLE_H) */