blob: dc165ebdf05aef6086bf5d5c5b1dd3a85f686648 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001/*
2 * pgtable.h: SpitFire page table operations.
3 *
4 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#ifndef _SPARC64_PGTABLE_H
9#define _SPARC64_PGTABLE_H
10
11/* This file contains the functions and defines necessary to modify and use
12 * the SpitFire page tables.
13 */
14
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070015#include <linux/compiler.h>
16#include <linux/const.h>
17#include <asm/types.h>
18#include <asm/spitfire.h>
19#include <asm/asi.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070020#include <asm/page.h>
21#include <asm/processor.h>
22
23/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
24 * The page copy blockops can use 0x6000000 to 0x8000000.
David S. Millerb18eb2d2014-05-07 14:07:32 -070025 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
26 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070027 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
28 * The vmalloc area spans 0x100000000 to 0x200000000.
29 * Since modules need to be in the lowest 32-bits of the address space,
30 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
31 * There is a single static kernel PMD which maps from 0x0 to address
32 * 0x400000000.
33 */
34#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
David S. Millerb18eb2d2014-05-07 14:07:32 -070035#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
36#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070037#define MODULES_VADDR _AC(0x0000000010000000,UL)
38#define MODULES_LEN _AC(0x00000000e0000000,UL)
39#define MODULES_END _AC(0x00000000f0000000,UL)
40#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
41#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
42#define VMALLOC_START _AC(0x0000000100000000,UL)
David S. Millerbb4e6e82014-09-27 11:05:21 -070043#define VMEMMAP_BASE VMALLOC_END
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070044
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070045/* PMD_SHIFT determines the size of the area a second-level page
46 * table can map
47 */
David S. Miller37b3a8f2013-09-25 13:48:49 -070048#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070049#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
50#define PMD_MASK (~(PMD_SIZE-1))
David S. Miller2b779332013-09-25 14:33:16 -070051#define PMD_BITS (PAGE_SHIFT - 3)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070052
David S. Millerac55c762014-09-26 21:19:46 -070053/* PUD_SHIFT determines the size of the area a third-level page
54 * table can map
55 */
56#define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
57#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
58#define PUD_MASK (~(PUD_SIZE-1))
59#define PUD_BITS (PAGE_SHIFT - 3)
60
61/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
62#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070063#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
64#define PGDIR_MASK (~(PGDIR_SIZE-1))
David S. Miller2b779332013-09-25 14:33:16 -070065#define PGDIR_BITS (PAGE_SHIFT - 3)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070066
David S. Miller7c0fa0f2014-09-24 21:49:29 -070067#if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
68#error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
69#endif
70
David S. Millerac55c762014-09-26 21:19:46 -070071#if (PGDIR_SHIFT + PGDIR_BITS) != 53
David Miller56a70b82012-10-08 16:34:20 -070072#error Page table parameters do not cover virtual address space properly.
73#endif
74
David Miller9e695d22012-10-08 16:34:29 -070075#if (PMD_SHIFT != HPAGE_SHIFT)
76#error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
77#endif
78
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070079#ifndef __ASSEMBLY__
80
David S. Millerbb4e6e82014-09-27 11:05:21 -070081extern unsigned long VMALLOC_END;
82
83#define vmemmap ((struct page *)VMEMMAP_BASE)
84
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070085#include <linux/sched.h>
86
David S. Miller0dd5b7b2014-09-24 20:56:11 -070087bool kern_addr_valid(unsigned long addr);
David S. Miller26cf4322014-04-29 13:03:27 -070088
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070089/* Entries per page directory level. */
David S. Miller37b3a8f2013-09-25 13:48:49 -070090#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070091#define PTRS_PER_PMD (1UL << PMD_BITS)
David S. Millerac55c762014-09-26 21:19:46 -070092#define PTRS_PER_PUD (1UL << PUD_BITS)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070093#define PTRS_PER_PGD (1UL << PGDIR_BITS)
94
95/* Kernel has a separate 44bit address space. */
Kirill A. Shutemovd016bf72015-02-11 15:26:41 -080096#define FIRST_USER_ADDRESS 0UL
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070097
David S. Millerfe866432014-04-29 13:28:23 -070098#define pmd_ERROR(e) \
99 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
100 __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
David S. Millerac55c762014-09-26 21:19:46 -0700101#define pud_ERROR(e) \
102 pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
103 __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
David S. Millerfe866432014-04-29 13:28:23 -0700104#define pgd_ERROR(e) \
105 pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
106 __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700107
108#endif /* !(__ASSEMBLY__) */
109
110/* PTE bits which are the same in SUN4U and SUN4V format. */
111#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
112#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
David S. Miller683d2fa2011-07-25 17:12:21 -0700113#define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
David S. Millera7b94032013-09-26 13:45:15 -0700114#define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700115#define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
David S. Miller683d2fa2011-07-25 17:12:21 -0700116
117/* Advertise support for _PAGE_SPECIAL */
118#define __HAVE_ARCH_PTE_SPECIAL
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700119
120/* SUN4U pte bits... */
121#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
122#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
123#define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
124#define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
125#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
126#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
127#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
David S. Miller683d2fa2011-07-25 17:12:21 -0700128#define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */
David S. Millera7b94032013-09-26 13:45:15 -0700129#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700130#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
131#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
132#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
133#define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
134#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
135#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
136#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
137#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
138#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
139#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700140#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
141#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
142#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
143#define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
144#define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
145#define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
146#define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
147#define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
148#define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
149#define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
150
151/* SUN4V pte bits... */
152#define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
153#define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
154#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
155#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
156#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
157#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
David S. Miller683d2fa2011-07-25 17:12:21 -0700158#define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */
David S. Millera7b94032013-09-26 13:45:15 -0700159#define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700160#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
161#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
162#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
163#define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
164#define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
165#define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
166#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
167#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
168#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700169#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
170#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
171#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
172#define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
173#define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
174#define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
175#define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
176#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
177#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
178#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
179#define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
180
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700181#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
182#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700183
David S. Miller37b3a8f2013-09-25 13:48:49 -0700184#if REAL_HPAGE_SHIFT != 22
185#error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
186#endif
187
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700188#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
189#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700190
191/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
192#define __P000 __pgprot(0)
193#define __P001 __pgprot(0)
194#define __P010 __pgprot(0)
195#define __P011 __pgprot(0)
196#define __P100 __pgprot(0)
197#define __P101 __pgprot(0)
198#define __P110 __pgprot(0)
199#define __P111 __pgprot(0)
200
201#define __S000 __pgprot(0)
202#define __S001 __pgprot(0)
203#define __S010 __pgprot(0)
204#define __S011 __pgprot(0)
205#define __S100 __pgprot(0)
206#define __S101 __pgprot(0)
207#define __S110 __pgprot(0)
208#define __S111 __pgprot(0)
209
210#ifndef __ASSEMBLY__
211
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200212pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700213
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200214unsigned long pte_sz_bits(unsigned long size);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700215
216extern pgprot_t PAGE_KERNEL;
217extern pgprot_t PAGE_KERNEL_LOCKED;
218extern pgprot_t PAGE_COPY;
219extern pgprot_t PAGE_SHARED;
220
221/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
222extern unsigned long _PAGE_IE;
223extern unsigned long _PAGE_E;
224extern unsigned long _PAGE_CACHE;
225
226extern unsigned long pg_iobits;
227extern unsigned long _PAGE_ALL_SZ_BITS;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700228
229extern struct page *mem_map_zero;
230#define ZERO_PAGE(vaddr) (mem_map_zero)
231
232/* PFNs are real physical page numbers. However, mem_map only begins to record
233 * per-page information starting at pfn_base. This is to handle systems where
234 * the first physical page in the machine is at some huge physical address,
235 * such as 4GB. This is common on a partitioned E10000, for example.
236 */
237static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
238{
239 unsigned long paddr = pfn << PAGE_SHIFT;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700240
David Miller15b93502012-10-08 16:34:19 -0700241 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
242 return __pte(paddr | pgprot_val(prot));
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700243}
244#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
245
David Miller9e695d22012-10-08 16:34:29 -0700246#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David S. Millera7b94032013-09-26 13:45:15 -0700247static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
David Miller9e695d22012-10-08 16:34:29 -0700248{
David S. Millera7b94032013-09-26 13:45:15 -0700249 pte_t pte = pfn_pte(page_nr, pgprot);
250
251 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700252}
David S. Millera7b94032013-09-26 13:45:15 -0700253#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
David Miller9e695d22012-10-08 16:34:29 -0700254#endif
255
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700256/* This one can be done with two shifts. */
257static inline unsigned long pte_pfn(pte_t pte)
258{
259 unsigned long ret;
260
261 __asm__ __volatile__(
262 "\n661: sllx %1, %2, %0\n"
263 " srlx %0, %3, %0\n"
264 " .section .sun4v_2insn_patch, \"ax\"\n"
265 " .word 661b\n"
266 " sllx %1, %4, %0\n"
267 " srlx %0, %5, %0\n"
268 " .previous\n"
269 : "=r" (ret)
270 : "r" (pte_val(pte)),
271 "i" (21), "i" (21 + PAGE_SHIFT),
272 "i" (8), "i" (8 + PAGE_SHIFT));
273
274 return ret;
275}
276#define pte_page(x) pfn_to_page(pte_pfn(x))
277
278static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
279{
280 unsigned long mask, tmp;
281
David S. Millereaf85da2014-04-28 19:11:27 -0700282 /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
283 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700284 *
285 * Even if we use negation tricks the result is still a 6
286 * instruction sequence, so don't try to play fancy and just
287 * do the most straightforward implementation.
288 *
289 * Note: We encode this into 3 sun4v 2-insn patch sequences.
290 */
291
David Miller15b93502012-10-08 16:34:19 -0700292 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700293 __asm__ __volatile__(
294 "\n661: sethi %%uhi(%2), %1\n"
295 " sethi %%hi(%2), %0\n"
296 "\n662: or %1, %%ulo(%2), %1\n"
297 " or %0, %%lo(%2), %0\n"
298 "\n663: sllx %1, 32, %1\n"
299 " or %0, %1, %0\n"
300 " .section .sun4v_2insn_patch, \"ax\"\n"
301 " .word 661b\n"
302 " sethi %%uhi(%3), %1\n"
303 " sethi %%hi(%3), %0\n"
304 " .word 662b\n"
305 " or %1, %%ulo(%3), %1\n"
306 " or %0, %%lo(%3), %0\n"
307 " .word 663b\n"
308 " sllx %1, 32, %1\n"
309 " or %0, %1, %0\n"
310 " .previous\n"
311 : "=r" (mask), "=r" (tmp)
312 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
David S. Millereaf85da2014-04-28 19:11:27 -0700313 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
David S. Millera7b94032013-09-26 13:45:15 -0700314 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700315 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
David S. Millereaf85da2014-04-28 19:11:27 -0700316 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
David S. Millera7b94032013-09-26 13:45:15 -0700317 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700318
319 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
320}
321
David S. Millera7b94032013-09-26 13:45:15 -0700322#ifdef CONFIG_TRANSPARENT_HUGEPAGE
323static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
324{
325 pte_t pte = __pte(pmd_val(pmd));
326
327 pte = pte_modify(pte, newprot);
328
329 return __pmd(pte_val(pte));
330}
331#endif
332
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700333static inline pgprot_t pgprot_noncached(pgprot_t prot)
334{
335 unsigned long val = pgprot_val(prot);
336
337 __asm__ __volatile__(
338 "\n661: andn %0, %2, %0\n"
339 " or %0, %3, %0\n"
340 " .section .sun4v_2insn_patch, \"ax\"\n"
341 " .word 661b\n"
342 " andn %0, %4, %0\n"
343 " or %0, %5, %0\n"
344 " .previous\n"
345 : "=r" (val)
346 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
347 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
348
349 return __pgprot(val);
350}
351/* Various pieces of code check for platform support by ifdef testing
352 * on "pgprot_noncached". That's broken and should be fixed, but for
353 * now...
354 */
355#define pgprot_noncached pgprot_noncached
356
David S. Millera7b94032013-09-26 13:45:15 -0700357#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700358static inline pte_t pte_mkhuge(pte_t pte)
359{
360 unsigned long mask;
361
362 __asm__ __volatile__(
363 "\n661: sethi %%uhi(%1), %0\n"
364 " sllx %0, 32, %0\n"
365 " .section .sun4v_2insn_patch, \"ax\"\n"
366 " .word 661b\n"
367 " mov %2, %0\n"
368 " nop\n"
369 " .previous\n"
370 : "=r" (mask)
371 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
372
373 return __pte(pte_val(pte) | mask);
374}
David S. Millera7b94032013-09-26 13:45:15 -0700375#ifdef CONFIG_TRANSPARENT_HUGEPAGE
376static inline pmd_t pmd_mkhuge(pmd_t pmd)
377{
378 pte_t pte = __pte(pmd_val(pmd));
379
380 pte = pte_mkhuge(pte);
381 pte_val(pte) |= _PAGE_PMD_HUGE;
382
383 return __pmd(pte_val(pte));
384}
385#endif
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700386#endif
387
388static inline pte_t pte_mkdirty(pte_t pte)
389{
390 unsigned long val = pte_val(pte), tmp;
391
392 __asm__ __volatile__(
393 "\n661: or %0, %3, %0\n"
394 " nop\n"
395 "\n662: nop\n"
396 " nop\n"
397 " .section .sun4v_2insn_patch, \"ax\"\n"
398 " .word 661b\n"
399 " sethi %%uhi(%4), %1\n"
400 " sllx %1, 32, %1\n"
401 " .word 662b\n"
402 " or %1, %%lo(%4), %1\n"
403 " or %0, %1, %0\n"
404 " .previous\n"
405 : "=r" (val), "=r" (tmp)
406 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
407 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
408
409 return __pte(val);
410}
411
412static inline pte_t pte_mkclean(pte_t pte)
413{
414 unsigned long val = pte_val(pte), tmp;
415
416 __asm__ __volatile__(
417 "\n661: andn %0, %3, %0\n"
418 " nop\n"
419 "\n662: nop\n"
420 " nop\n"
421 " .section .sun4v_2insn_patch, \"ax\"\n"
422 " .word 661b\n"
423 " sethi %%uhi(%4), %1\n"
424 " sllx %1, 32, %1\n"
425 " .word 662b\n"
426 " or %1, %%lo(%4), %1\n"
427 " andn %0, %1, %0\n"
428 " .previous\n"
429 : "=r" (val), "=r" (tmp)
430 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
431 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
432
433 return __pte(val);
434}
435
436static inline pte_t pte_mkwrite(pte_t pte)
437{
438 unsigned long val = pte_val(pte), mask;
439
440 __asm__ __volatile__(
441 "\n661: mov %1, %0\n"
442 " nop\n"
443 " .section .sun4v_2insn_patch, \"ax\"\n"
444 " .word 661b\n"
445 " sethi %%uhi(%2), %0\n"
446 " sllx %0, 32, %0\n"
447 " .previous\n"
448 : "=r" (mask)
449 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
450
451 return __pte(val | mask);
452}
453
454static inline pte_t pte_wrprotect(pte_t pte)
455{
456 unsigned long val = pte_val(pte), tmp;
457
458 __asm__ __volatile__(
459 "\n661: andn %0, %3, %0\n"
460 " nop\n"
461 "\n662: nop\n"
462 " nop\n"
463 " .section .sun4v_2insn_patch, \"ax\"\n"
464 " .word 661b\n"
465 " sethi %%uhi(%4), %1\n"
466 " sllx %1, 32, %1\n"
467 " .word 662b\n"
468 " or %1, %%lo(%4), %1\n"
469 " andn %0, %1, %0\n"
470 " .previous\n"
471 : "=r" (val), "=r" (tmp)
472 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
473 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
474
475 return __pte(val);
476}
477
478static inline pte_t pte_mkold(pte_t pte)
479{
480 unsigned long mask;
481
482 __asm__ __volatile__(
483 "\n661: mov %1, %0\n"
484 " nop\n"
485 " .section .sun4v_2insn_patch, \"ax\"\n"
486 " .word 661b\n"
487 " sethi %%uhi(%2), %0\n"
488 " sllx %0, 32, %0\n"
489 " .previous\n"
490 : "=r" (mask)
491 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
492
493 mask |= _PAGE_R;
494
495 return __pte(pte_val(pte) & ~mask);
496}
497
498static inline pte_t pte_mkyoung(pte_t pte)
499{
500 unsigned long mask;
501
502 __asm__ __volatile__(
503 "\n661: mov %1, %0\n"
504 " nop\n"
505 " .section .sun4v_2insn_patch, \"ax\"\n"
506 " .word 661b\n"
507 " sethi %%uhi(%2), %0\n"
508 " sllx %0, 32, %0\n"
509 " .previous\n"
510 : "=r" (mask)
511 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
512
513 mask |= _PAGE_R;
514
515 return __pte(pte_val(pte) | mask);
516}
517
518static inline pte_t pte_mkspecial(pte_t pte)
519{
David S. Miller683d2fa2011-07-25 17:12:21 -0700520 pte_val(pte) |= _PAGE_SPECIAL;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700521 return pte;
522}
523
524static inline unsigned long pte_young(pte_t pte)
525{
526 unsigned long mask;
527
528 __asm__ __volatile__(
529 "\n661: mov %1, %0\n"
530 " nop\n"
531 " .section .sun4v_2insn_patch, \"ax\"\n"
532 " .word 661b\n"
533 " sethi %%uhi(%2), %0\n"
534 " sllx %0, 32, %0\n"
535 " .previous\n"
536 : "=r" (mask)
537 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
538
539 return (pte_val(pte) & mask);
540}
541
542static inline unsigned long pte_dirty(pte_t pte)
543{
544 unsigned long mask;
545
546 __asm__ __volatile__(
547 "\n661: mov %1, %0\n"
548 " nop\n"
549 " .section .sun4v_2insn_patch, \"ax\"\n"
550 " .word 661b\n"
551 " sethi %%uhi(%2), %0\n"
552 " sllx %0, 32, %0\n"
553 " .previous\n"
554 : "=r" (mask)
555 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
556
557 return (pte_val(pte) & mask);
558}
559
560static inline unsigned long pte_write(pte_t pte)
561{
562 unsigned long mask;
563
564 __asm__ __volatile__(
565 "\n661: mov %1, %0\n"
566 " nop\n"
567 " .section .sun4v_2insn_patch, \"ax\"\n"
568 " .word 661b\n"
569 " sethi %%uhi(%2), %0\n"
570 " sllx %0, 32, %0\n"
571 " .previous\n"
572 : "=r" (mask)
573 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
574
575 return (pte_val(pte) & mask);
576}
577
578static inline unsigned long pte_exec(pte_t pte)
579{
580 unsigned long mask;
581
582 __asm__ __volatile__(
583 "\n661: sethi %%hi(%1), %0\n"
584 " .section .sun4v_1insn_patch, \"ax\"\n"
585 " .word 661b\n"
586 " mov %2, %0\n"
587 " .previous\n"
588 : "=r" (mask)
589 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
590
591 return (pte_val(pte) & mask);
592}
593
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700594static inline unsigned long pte_present(pte_t pte)
595{
596 unsigned long val = pte_val(pte);
597
598 __asm__ __volatile__(
599 "\n661: and %0, %2, %0\n"
600 " .section .sun4v_1insn_patch, \"ax\"\n"
601 " .word 661b\n"
602 " and %0, %3, %0\n"
603 " .previous\n"
604 : "=r" (val)
605 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
606
607 return val;
608}
609
David S. Miller4a9d1942012-12-18 16:06:16 -0800610#define pte_accessible pte_accessible
Rik van Riel20841402013-12-18 17:08:44 -0800611static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
David S. Miller4a9d1942012-12-18 16:06:16 -0800612{
613 return pte_val(a) & _PAGE_VALID;
614}
615
David S. Miller683d2fa2011-07-25 17:12:21 -0700616static inline unsigned long pte_special(pte_t pte)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700617{
David S. Miller683d2fa2011-07-25 17:12:21 -0700618 return pte_val(pte) & _PAGE_SPECIAL;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700619}
620
David S. Millera7b94032013-09-26 13:45:15 -0700621static inline unsigned long pmd_large(pmd_t pmd)
David S. Miller89a77912013-02-13 12:21:06 -0800622{
David S. Millera7b94032013-09-26 13:45:15 -0700623 pte_t pte = __pte(pmd_val(pmd));
624
David S. Miller04df4192014-04-25 10:21:12 -0700625 return pte_val(pte) & _PAGE_PMD_HUGE;
David S. Miller89a77912013-02-13 12:21:06 -0800626}
627
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700628static inline unsigned long pmd_pfn(pmd_t pmd)
629{
630 pte_t pte = __pte(pmd_val(pmd));
631
632 return pte_pfn(pte);
633}
634
David Miller9e695d22012-10-08 16:34:29 -0700635#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Kirill A. Shutemovc164e032014-12-10 15:44:36 -0800636static inline unsigned long pmd_dirty(pmd_t pmd)
637{
638 pte_t pte = __pte(pmd_val(pmd));
639
640 return pte_dirty(pte);
641}
642
David S. Millera7b94032013-09-26 13:45:15 -0700643static inline unsigned long pmd_young(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700644{
David S. Millera7b94032013-09-26 13:45:15 -0700645 pte_t pte = __pte(pmd_val(pmd));
646
647 return pte_young(pte);
David Miller9e695d22012-10-08 16:34:29 -0700648}
649
David S. Millera7b94032013-09-26 13:45:15 -0700650static inline unsigned long pmd_write(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700651{
David S. Millera7b94032013-09-26 13:45:15 -0700652 pte_t pte = __pte(pmd_val(pmd));
653
654 return pte_write(pte);
David Miller9e695d22012-10-08 16:34:29 -0700655}
656
David S. Millera7b94032013-09-26 13:45:15 -0700657static inline unsigned long pmd_trans_huge(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700658{
David S. Millera7b94032013-09-26 13:45:15 -0700659 pte_t pte = __pte(pmd_val(pmd));
660
661 return pte_val(pte) & _PAGE_PMD_HUGE;
David Miller9e695d22012-10-08 16:34:29 -0700662}
663
David S. Millera7b94032013-09-26 13:45:15 -0700664static inline unsigned long pmd_trans_splitting(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700665{
David S. Millera7b94032013-09-26 13:45:15 -0700666 pte_t pte = __pte(pmd_val(pmd));
667
668 return pmd_trans_huge(pmd) && pte_special(pte);
David Miller9e695d22012-10-08 16:34:29 -0700669}
670
671#define has_transparent_hugepage() 1
672
673static inline pmd_t pmd_mkold(pmd_t pmd)
674{
David S. Millera7b94032013-09-26 13:45:15 -0700675 pte_t pte = __pte(pmd_val(pmd));
676
677 pte = pte_mkold(pte);
678
679 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700680}
681
682static inline pmd_t pmd_wrprotect(pmd_t pmd)
683{
David S. Millera7b94032013-09-26 13:45:15 -0700684 pte_t pte = __pte(pmd_val(pmd));
685
686 pte = pte_wrprotect(pte);
687
688 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700689}
690
691static inline pmd_t pmd_mkdirty(pmd_t pmd)
692{
David S. Millera7b94032013-09-26 13:45:15 -0700693 pte_t pte = __pte(pmd_val(pmd));
694
695 pte = pte_mkdirty(pte);
696
697 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700698}
699
700static inline pmd_t pmd_mkyoung(pmd_t pmd)
701{
David S. Millera7b94032013-09-26 13:45:15 -0700702 pte_t pte = __pte(pmd_val(pmd));
703
704 pte = pte_mkyoung(pte);
705
706 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700707}
708
709static inline pmd_t pmd_mkwrite(pmd_t pmd)
710{
David S. Millera7b94032013-09-26 13:45:15 -0700711 pte_t pte = __pte(pmd_val(pmd));
712
713 pte = pte_mkwrite(pte);
714
715 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700716}
717
David Miller9e695d22012-10-08 16:34:29 -0700718static inline pmd_t pmd_mksplitting(pmd_t pmd)
719{
David S. Millera7b94032013-09-26 13:45:15 -0700720 pte_t pte = __pte(pmd_val(pmd));
721
722 pte = pte_mkspecial(pte);
723
724 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700725}
726
David S. Millera7b94032013-09-26 13:45:15 -0700727static inline pgprot_t pmd_pgprot(pmd_t entry)
728{
729 unsigned long val = pmd_val(entry);
730
731 return __pgprot(val);
732}
David Miller9e695d22012-10-08 16:34:29 -0700733#endif
734
735static inline int pmd_present(pmd_t pmd)
736{
David S. Miller2b779332013-09-25 14:33:16 -0700737 return pmd_val(pmd) != 0UL;
David Miller9e695d22012-10-08 16:34:29 -0700738}
739
740#define pmd_none(pmd) (!pmd_val(pmd))
741
David S. Miller26cf4322014-04-29 13:03:27 -0700742/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
743 * very simple, it's just the physical address. PTE tables are of
744 * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
745 * the top bits outside of the range of any physical address size we
746 * support are clear as well. We also validate the physical itself.
747 */
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700748#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
David S. Miller26cf4322014-04-29 13:03:27 -0700749
750#define pud_none(pud) (!pud_val(pud))
751
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700752#define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
David S. Miller26cf4322014-04-29 13:03:27 -0700753
David S. Millerac55c762014-09-26 21:19:46 -0700754#define pgd_none(pgd) (!pgd_val(pgd))
755
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700756#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK)
David S. Millerac55c762014-09-26 21:19:46 -0700757
David Miller9e695d22012-10-08 16:34:29 -0700758#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200759void set_pmd_at(struct mm_struct *mm, unsigned long addr,
760 pmd_t *pmdp, pmd_t pmd);
David Miller9e695d22012-10-08 16:34:29 -0700761#else
762static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
763 pmd_t *pmdp, pmd_t pmd)
764{
765 *pmdp = pmd;
766}
767#endif
768
769static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
770{
David S. Millera7b94032013-09-26 13:45:15 -0700771 unsigned long val = __pa((unsigned long) (ptep));
David Miller9e695d22012-10-08 16:34:29 -0700772
773 pmd_val(*pmdp) = val;
774}
775
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700776#define pud_set(pudp, pmdp) \
David S. Millera7b94032013-09-26 13:45:15 -0700777 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
David Miller9e695d22012-10-08 16:34:29 -0700778static inline unsigned long __pmd_page(pmd_t pmd)
779{
David S. Millera7b94032013-09-26 13:45:15 -0700780 pte_t pte = __pte(pmd_val(pmd));
781 unsigned long pfn;
782
783 pfn = pte_pfn(pte);
784
785 return ((unsigned long) __va(pfn << PAGE_SHIFT));
David Miller9e695d22012-10-08 16:34:29 -0700786}
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700787#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
788#define pud_page_vaddr(pud) \
David S. Millera7b94032013-09-26 13:45:15 -0700789 ((unsigned long) __va(pud_val(pud)))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700790#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
David S. Miller2b779332013-09-25 14:33:16 -0700791#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700792#define pud_present(pud) (pud_val(pud) != 0U)
David S. Miller2b779332013-09-25 14:33:16 -0700793#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
David S. Millerac55c762014-09-26 21:19:46 -0700794#define pgd_page_vaddr(pgd) \
795 ((unsigned long) __va(pgd_val(pgd)))
796#define pgd_present(pgd) (pgd_val(pgd) != 0U)
797#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700798
David S. Miller0dd5b7b2014-09-24 20:56:11 -0700799static inline unsigned long pud_large(pud_t pud)
800{
801 pte_t pte = __pte(pud_val(pud));
802
803 return pte_val(pte) & _PAGE_PMD_HUGE;
804}
805
806static inline unsigned long pud_pfn(pud_t pud)
807{
808 pte_t pte = __pte(pud_val(pud));
809
810 return pte_pfn(pte);
811}
812
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700813/* Same in both SUN4V and SUN4U. */
814#define pte_none(pte) (!pte_val(pte))
815
David S. Millerac55c762014-09-26 21:19:46 -0700816#define pgd_set(pgdp, pudp) \
817 (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
818
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700819/* to find an entry in a page-table-directory. */
820#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
821#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
822
823/* to find an entry in a kernel page-table-directory */
824#define pgd_offset_k(address) pgd_offset(&init_mm, address)
825
David S. Millerac55c762014-09-26 21:19:46 -0700826/* Find an entry in the third-level page table.. */
827#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
828#define pud_offset(pgdp, address) \
829 ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
830
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700831/* Find an entry in the second-level page table.. */
832#define pmd_offset(pudp, address) \
833 ((pmd_t *) pud_page_vaddr(*(pudp)) + \
834 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
835
836/* Find an entry in the third-level page table.. */
837#define pte_index(dir, address) \
838 ((pte_t *) __pmd_page(*(dir)) + \
839 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
840#define pte_offset_kernel pte_index
841#define pte_offset_map pte_index
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700842#define pte_unmap(pte) do { } while (0)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700843
844/* Actual page table PTE updates. */
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200845void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
846 pte_t *ptep, pte_t orig, int fullmm);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700847
David Miller9e695d22012-10-08 16:34:29 -0700848#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
849static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
850 unsigned long addr,
851 pmd_t *pmdp)
852{
853 pmd_t pmd = *pmdp;
David S. Miller2b779332013-09-25 14:33:16 -0700854 set_pmd_at(mm, addr, pmdp, __pmd(0UL));
David Miller9e695d22012-10-08 16:34:29 -0700855 return pmd;
856}
857
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700858static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
859 pte_t *ptep, pte_t pte, int fullmm)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700860{
861 pte_t orig = *ptep;
862
863 *ptep = pte;
864
865 /* It is more efficient to let flush_tlb_kernel_range()
866 * handle init_mm tlb flushes.
867 *
868 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
869 * and SUN4V pte layout, so this inline test is fine.
870 */
Rik van Riel20841402013-12-18 17:08:44 -0800871 if (likely(mm != &init_mm) && pte_accessible(mm, orig))
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700872 tlb_batch_add(mm, addr, ptep, orig, fullmm);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700873}
874
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700875#define set_pte_at(mm,addr,ptep,pte) \
876 __set_pte_at((mm), (addr), (ptep), (pte), 0)
877
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700878#define pte_clear(mm,addr,ptep) \
879 set_pte_at((mm), (addr), (ptep), __pte(0UL))
880
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700881#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
882#define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
883 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
884
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700885#ifdef DCACHE_ALIASING_POSSIBLE
886#define __HAVE_ARCH_MOVE_PTE
887#define move_pte(pte, prot, old_addr, new_addr) \
888({ \
889 pte_t newpte = (pte); \
890 if (tlb_type != hypervisor && pte_present(pte)) { \
891 unsigned long this_pfn = pte_pfn(pte); \
892 \
893 if (pfn_valid(this_pfn) && \
894 (((old_addr) ^ (new_addr)) & (1 << 13))) \
895 flush_dcache_page_all(current->mm, \
896 pfn_to_page(this_pfn)); \
897 } \
898 newpte; \
899})
900#endif
901
David S. Miller2b779332013-09-25 14:33:16 -0700902extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700903
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200904void paging_init(void);
905unsigned long find_ecache_flush_span(unsigned long size);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700906
Sam Ravnborgcb1b8202011-04-21 15:45:45 -0700907struct seq_file;
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200908void mmu_info(struct seq_file *);
Sam Ravnborgcb1b8202011-04-21 15:45:45 -0700909
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700910struct vm_area_struct;
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200911void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
David Miller9e695d22012-10-08 16:34:29 -0700912#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200913void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
914 pmd_t *pmd);
David Miller9e695d22012-10-08 16:34:29 -0700915
David S. Miller51e5ef12014-04-24 13:58:02 -0700916#define __HAVE_ARCH_PMDP_INVALIDATE
917extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
918 pmd_t *pmdp);
919
David Miller9e695d22012-10-08 16:34:29 -0700920#define __HAVE_ARCH_PGTABLE_DEPOSIT
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200921void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
922 pgtable_t pgtable);
David Miller9e695d22012-10-08 16:34:29 -0700923
924#define __HAVE_ARCH_PGTABLE_WITHDRAW
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200925pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
David Miller9e695d22012-10-08 16:34:29 -0700926#endif
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700927
928/* Encode and de-code a swap entry */
929#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
930#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
931#define __swp_entry(type, offset) \
932 ( (swp_entry_t) \
933 { \
934 (((long)(type) << PAGE_SHIFT) | \
935 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
936 } )
937#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
938#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
939
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200940int page_in_phys_avail(unsigned long paddr);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700941
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700942/*
943 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
944 * its high 4 bits. These macros/functions put it there or get it from there.
945 */
946#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
947#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
948#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
949
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200950int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
951 unsigned long, pgprot_t);
David S. Miller3e37fd32011-11-17 18:17:59 -0800952
953static inline int io_remap_pfn_range(struct vm_area_struct *vma,
954 unsigned long from, unsigned long pfn,
955 unsigned long size, pgprot_t prot)
956{
957 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
958 int space = GET_IOSPACE(pfn);
959 unsigned long phys_base;
960
961 phys_base = offset | (((unsigned long) space) << 32UL);
962
963 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
964}
Al Viro40d158e2013-05-11 12:13:10 -0400965#define io_remap_pfn_range io_remap_pfn_range
David S. Miller3e37fd32011-11-17 18:17:59 -0800966
David S. Millerf36391d2013-04-19 17:26:26 -0400967#include <asm/tlbflush.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700968#include <asm-generic/pgtable.h>
969
970/* We provide our own get_unmapped_area to cope with VA holes and
971 * SHM area cache aliasing for userland.
972 */
973#define HAVE_ARCH_UNMAPPED_AREA
974#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
975
976/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
977 * the largest alignment possible such that larget PTEs can be used.
978 */
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200979unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
980 unsigned long, unsigned long,
981 unsigned long);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700982#define HAVE_ARCH_FB_UNMAPPED_AREA
983
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200984void pgtable_cache_init(void);
985void sun4v_register_fault_status(void);
986void sun4v_ktsb_register(void);
987void __init cheetah_ecache_flush_init(void);
988void sun4v_patch_tlb_handlers(void);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700989
990extern unsigned long cmdline_memory_size;
991
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200992asmlinkage void do_sparc64_fault(struct pt_regs *regs);
David S. Millerb539c462008-09-12 00:10:32 -0700993
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700994#endif /* !(__ASSEMBLY__) */
995
996#endif /* !(_SPARC64_PGTABLE_H) */