blob: 1a49ffdf9da91056cb24357b6fdefea772658201 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001/*
2 * pgtable.h: SpitFire page table operations.
3 *
4 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#ifndef _SPARC64_PGTABLE_H
9#define _SPARC64_PGTABLE_H
10
11/* This file contains the functions and defines necessary to modify and use
12 * the SpitFire page tables.
13 */
14
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070015#include <linux/compiler.h>
16#include <linux/const.h>
17#include <asm/types.h>
18#include <asm/spitfire.h>
19#include <asm/asi.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070020#include <asm/page.h>
21#include <asm/processor.h>
22
Aaro Koskinen2533e822012-04-01 08:54:38 +000023#include <asm-generic/pgtable-nopud.h>
24
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070025/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
26 * The page copy blockops can use 0x6000000 to 0x8000000.
David S. Millerb18eb2d2014-05-07 14:07:32 -070027 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
28 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070029 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
30 * The vmalloc area spans 0x100000000 to 0x200000000.
31 * Since modules need to be in the lowest 32-bits of the address space,
32 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
33 * There is a single static kernel PMD which maps from 0x0 to address
34 * 0x400000000.
35 */
36#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
David S. Millerb18eb2d2014-05-07 14:07:32 -070037#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
38#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070039#define MODULES_VADDR _AC(0x0000000010000000,UL)
40#define MODULES_LEN _AC(0x00000000e0000000,UL)
41#define MODULES_END _AC(0x00000000f0000000,UL)
42#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
43#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
44#define VMALLOC_START _AC(0x0000000100000000,UL)
David S. Miller1b6b9d62009-09-28 14:39:58 -070045#define VMALLOC_END _AC(0x0000010000000000,UL)
46#define VMEMMAP_BASE _AC(0x0000010000000000,UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070047
48#define vmemmap ((struct page *)VMEMMAP_BASE)
49
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070050/* PMD_SHIFT determines the size of the area a second-level page
51 * table can map
52 */
David S. Miller37b3a8f2013-09-25 13:48:49 -070053#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070054#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
55#define PMD_MASK (~(PMD_SIZE-1))
David S. Miller2b779332013-09-25 14:33:16 -070056#define PMD_BITS (PAGE_SHIFT - 3)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070057
58/* PGDIR_SHIFT determines what a third-level page table entry can map */
David S. Miller37b3a8f2013-09-25 13:48:49 -070059#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070060#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
61#define PGDIR_MASK (~(PGDIR_SIZE-1))
David S. Miller2b779332013-09-25 14:33:16 -070062#define PGDIR_BITS (PAGE_SHIFT - 3)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070063
David S. Miller2b779332013-09-25 14:33:16 -070064#if (PGDIR_SHIFT + PGDIR_BITS) != 43
David Miller56a70b82012-10-08 16:34:20 -070065#error Page table parameters do not cover virtual address space properly.
66#endif
67
David Miller9e695d22012-10-08 16:34:29 -070068#if (PMD_SHIFT != HPAGE_SHIFT)
69#error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
70#endif
71
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070072#ifndef __ASSEMBLY__
73
74#include <linux/sched.h>
75
David S. Miller26cf4322014-04-29 13:03:27 -070076extern unsigned long sparc64_valid_addr_bitmap[];
77
78/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
79static inline bool __kern_addr_valid(unsigned long paddr)
80{
81 if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL)
82 return false;
83 return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap);
84}
85
86static inline bool kern_addr_valid(unsigned long addr)
87{
88 unsigned long paddr = __pa(addr);
89
90 return __kern_addr_valid(paddr);
91}
92
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070093/* Entries per page directory level. */
David S. Miller37b3a8f2013-09-25 13:48:49 -070094#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070095#define PTRS_PER_PMD (1UL << PMD_BITS)
96#define PTRS_PER_PGD (1UL << PGDIR_BITS)
97
98/* Kernel has a separate 44bit address space. */
99#define FIRST_USER_ADDRESS 0
100
David S. Millerfe866432014-04-29 13:28:23 -0700101#define pmd_ERROR(e) \
102 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
103 __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
104#define pgd_ERROR(e) \
105 pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
106 __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700107
108#endif /* !(__ASSEMBLY__) */
109
110/* PTE bits which are the same in SUN4U and SUN4V format. */
111#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
112#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
David S. Miller683d2fa2011-07-25 17:12:21 -0700113#define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
David S. Millera7b94032013-09-26 13:45:15 -0700114#define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
David S. Miller683d2fa2011-07-25 17:12:21 -0700115
116/* Advertise support for _PAGE_SPECIAL */
117#define __HAVE_ARCH_PTE_SPECIAL
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700118
119/* SUN4U pte bits... */
120#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
121#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
122#define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
123#define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
124#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
125#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
126#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
David S. Miller683d2fa2011-07-25 17:12:21 -0700127#define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */
David S. Millera7b94032013-09-26 13:45:15 -0700128#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700129#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
130#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
131#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
132#define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
133#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
134#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
135#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
136#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
137#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
138#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
139#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
140#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
141#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
142#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
143#define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
144#define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
145#define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
146#define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
147#define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
148#define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
149#define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
150
151/* SUN4V pte bits... */
152#define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
153#define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
154#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
155#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
156#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
157#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
David S. Miller683d2fa2011-07-25 17:12:21 -0700158#define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */
David S. Millera7b94032013-09-26 13:45:15 -0700159#define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700160#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
161#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
162#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
163#define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
164#define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
165#define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
166#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
167#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
168#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
169#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
170#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
171#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
172#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
173#define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
174#define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
175#define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
176#define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
177#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
178#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
179#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
180#define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
181
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700182#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
183#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700184
David S. Miller37b3a8f2013-09-25 13:48:49 -0700185#if REAL_HPAGE_SHIFT != 22
186#error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
187#endif
188
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700189#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
190#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700191
192/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
193#define __P000 __pgprot(0)
194#define __P001 __pgprot(0)
195#define __P010 __pgprot(0)
196#define __P011 __pgprot(0)
197#define __P100 __pgprot(0)
198#define __P101 __pgprot(0)
199#define __P110 __pgprot(0)
200#define __P111 __pgprot(0)
201
202#define __S000 __pgprot(0)
203#define __S001 __pgprot(0)
204#define __S010 __pgprot(0)
205#define __S011 __pgprot(0)
206#define __S100 __pgprot(0)
207#define __S101 __pgprot(0)
208#define __S110 __pgprot(0)
209#define __S111 __pgprot(0)
210
211#ifndef __ASSEMBLY__
212
213extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
214
215extern unsigned long pte_sz_bits(unsigned long size);
216
217extern pgprot_t PAGE_KERNEL;
218extern pgprot_t PAGE_KERNEL_LOCKED;
219extern pgprot_t PAGE_COPY;
220extern pgprot_t PAGE_SHARED;
221
222/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
223extern unsigned long _PAGE_IE;
224extern unsigned long _PAGE_E;
225extern unsigned long _PAGE_CACHE;
226
227extern unsigned long pg_iobits;
228extern unsigned long _PAGE_ALL_SZ_BITS;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700229
230extern struct page *mem_map_zero;
231#define ZERO_PAGE(vaddr) (mem_map_zero)
232
233/* PFNs are real physical page numbers. However, mem_map only begins to record
234 * per-page information starting at pfn_base. This is to handle systems where
235 * the first physical page in the machine is at some huge physical address,
236 * such as 4GB. This is common on a partitioned E10000, for example.
237 */
238static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
239{
240 unsigned long paddr = pfn << PAGE_SHIFT;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700241
David Miller15b93502012-10-08 16:34:19 -0700242 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
243 return __pte(paddr | pgprot_val(prot));
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700244}
245#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
246
David Miller9e695d22012-10-08 16:34:29 -0700247#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David S. Millera7b94032013-09-26 13:45:15 -0700248static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
David Miller9e695d22012-10-08 16:34:29 -0700249{
David S. Millera7b94032013-09-26 13:45:15 -0700250 pte_t pte = pfn_pte(page_nr, pgprot);
251
252 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700253}
David S. Millera7b94032013-09-26 13:45:15 -0700254#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
David Miller9e695d22012-10-08 16:34:29 -0700255#endif
256
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700257/* This one can be done with two shifts. */
258static inline unsigned long pte_pfn(pte_t pte)
259{
260 unsigned long ret;
261
262 __asm__ __volatile__(
263 "\n661: sllx %1, %2, %0\n"
264 " srlx %0, %3, %0\n"
265 " .section .sun4v_2insn_patch, \"ax\"\n"
266 " .word 661b\n"
267 " sllx %1, %4, %0\n"
268 " srlx %0, %5, %0\n"
269 " .previous\n"
270 : "=r" (ret)
271 : "r" (pte_val(pte)),
272 "i" (21), "i" (21 + PAGE_SHIFT),
273 "i" (8), "i" (8 + PAGE_SHIFT));
274
275 return ret;
276}
277#define pte_page(x) pfn_to_page(pte_pfn(x))
278
279static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
280{
281 unsigned long mask, tmp;
282
David S. Millereaf85da2014-04-28 19:11:27 -0700283 /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
284 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700285 *
286 * Even if we use negation tricks the result is still a 6
287 * instruction sequence, so don't try to play fancy and just
288 * do the most straightforward implementation.
289 *
290 * Note: We encode this into 3 sun4v 2-insn patch sequences.
291 */
292
David Miller15b93502012-10-08 16:34:19 -0700293 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700294 __asm__ __volatile__(
295 "\n661: sethi %%uhi(%2), %1\n"
296 " sethi %%hi(%2), %0\n"
297 "\n662: or %1, %%ulo(%2), %1\n"
298 " or %0, %%lo(%2), %0\n"
299 "\n663: sllx %1, 32, %1\n"
300 " or %0, %1, %0\n"
301 " .section .sun4v_2insn_patch, \"ax\"\n"
302 " .word 661b\n"
303 " sethi %%uhi(%3), %1\n"
304 " sethi %%hi(%3), %0\n"
305 " .word 662b\n"
306 " or %1, %%ulo(%3), %1\n"
307 " or %0, %%lo(%3), %0\n"
308 " .word 663b\n"
309 " sllx %1, 32, %1\n"
310 " or %0, %1, %0\n"
311 " .previous\n"
312 : "=r" (mask), "=r" (tmp)
313 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
David S. Millereaf85da2014-04-28 19:11:27 -0700314 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
David S. Millera7b94032013-09-26 13:45:15 -0700315 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700316 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
David S. Millereaf85da2014-04-28 19:11:27 -0700317 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
David S. Millera7b94032013-09-26 13:45:15 -0700318 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700319
320 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
321}
322
David S. Millera7b94032013-09-26 13:45:15 -0700323#ifdef CONFIG_TRANSPARENT_HUGEPAGE
324static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
325{
326 pte_t pte = __pte(pmd_val(pmd));
327
328 pte = pte_modify(pte, newprot);
329
330 return __pmd(pte_val(pte));
331}
332#endif
333
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700334static inline pte_t pgoff_to_pte(unsigned long off)
335{
336 off <<= PAGE_SHIFT;
337
338 __asm__ __volatile__(
339 "\n661: or %0, %2, %0\n"
340 " .section .sun4v_1insn_patch, \"ax\"\n"
341 " .word 661b\n"
342 " or %0, %3, %0\n"
343 " .previous\n"
344 : "=r" (off)
345 : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
346
347 return __pte(off);
348}
349
350static inline pgprot_t pgprot_noncached(pgprot_t prot)
351{
352 unsigned long val = pgprot_val(prot);
353
354 __asm__ __volatile__(
355 "\n661: andn %0, %2, %0\n"
356 " or %0, %3, %0\n"
357 " .section .sun4v_2insn_patch, \"ax\"\n"
358 " .word 661b\n"
359 " andn %0, %4, %0\n"
360 " or %0, %5, %0\n"
361 " .previous\n"
362 : "=r" (val)
363 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
364 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
365
366 return __pgprot(val);
367}
368/* Various pieces of code check for platform support by ifdef testing
369 * on "pgprot_noncached". That's broken and should be fixed, but for
370 * now...
371 */
372#define pgprot_noncached pgprot_noncached
373
David S. Millera7b94032013-09-26 13:45:15 -0700374#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700375static inline pte_t pte_mkhuge(pte_t pte)
376{
377 unsigned long mask;
378
379 __asm__ __volatile__(
380 "\n661: sethi %%uhi(%1), %0\n"
381 " sllx %0, 32, %0\n"
382 " .section .sun4v_2insn_patch, \"ax\"\n"
383 " .word 661b\n"
384 " mov %2, %0\n"
385 " nop\n"
386 " .previous\n"
387 : "=r" (mask)
388 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
389
390 return __pte(pte_val(pte) | mask);
391}
David S. Millera7b94032013-09-26 13:45:15 -0700392#ifdef CONFIG_TRANSPARENT_HUGEPAGE
393static inline pmd_t pmd_mkhuge(pmd_t pmd)
394{
395 pte_t pte = __pte(pmd_val(pmd));
396
397 pte = pte_mkhuge(pte);
398 pte_val(pte) |= _PAGE_PMD_HUGE;
399
400 return __pmd(pte_val(pte));
401}
402#endif
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700403#endif
404
405static inline pte_t pte_mkdirty(pte_t pte)
406{
407 unsigned long val = pte_val(pte), tmp;
408
409 __asm__ __volatile__(
410 "\n661: or %0, %3, %0\n"
411 " nop\n"
412 "\n662: nop\n"
413 " nop\n"
414 " .section .sun4v_2insn_patch, \"ax\"\n"
415 " .word 661b\n"
416 " sethi %%uhi(%4), %1\n"
417 " sllx %1, 32, %1\n"
418 " .word 662b\n"
419 " or %1, %%lo(%4), %1\n"
420 " or %0, %1, %0\n"
421 " .previous\n"
422 : "=r" (val), "=r" (tmp)
423 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
424 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
425
426 return __pte(val);
427}
428
429static inline pte_t pte_mkclean(pte_t pte)
430{
431 unsigned long val = pte_val(pte), tmp;
432
433 __asm__ __volatile__(
434 "\n661: andn %0, %3, %0\n"
435 " nop\n"
436 "\n662: nop\n"
437 " nop\n"
438 " .section .sun4v_2insn_patch, \"ax\"\n"
439 " .word 661b\n"
440 " sethi %%uhi(%4), %1\n"
441 " sllx %1, 32, %1\n"
442 " .word 662b\n"
443 " or %1, %%lo(%4), %1\n"
444 " andn %0, %1, %0\n"
445 " .previous\n"
446 : "=r" (val), "=r" (tmp)
447 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
448 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
449
450 return __pte(val);
451}
452
453static inline pte_t pte_mkwrite(pte_t pte)
454{
455 unsigned long val = pte_val(pte), mask;
456
457 __asm__ __volatile__(
458 "\n661: mov %1, %0\n"
459 " nop\n"
460 " .section .sun4v_2insn_patch, \"ax\"\n"
461 " .word 661b\n"
462 " sethi %%uhi(%2), %0\n"
463 " sllx %0, 32, %0\n"
464 " .previous\n"
465 : "=r" (mask)
466 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
467
468 return __pte(val | mask);
469}
470
471static inline pte_t pte_wrprotect(pte_t pte)
472{
473 unsigned long val = pte_val(pte), tmp;
474
475 __asm__ __volatile__(
476 "\n661: andn %0, %3, %0\n"
477 " nop\n"
478 "\n662: nop\n"
479 " nop\n"
480 " .section .sun4v_2insn_patch, \"ax\"\n"
481 " .word 661b\n"
482 " sethi %%uhi(%4), %1\n"
483 " sllx %1, 32, %1\n"
484 " .word 662b\n"
485 " or %1, %%lo(%4), %1\n"
486 " andn %0, %1, %0\n"
487 " .previous\n"
488 : "=r" (val), "=r" (tmp)
489 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
490 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
491
492 return __pte(val);
493}
494
495static inline pte_t pte_mkold(pte_t pte)
496{
497 unsigned long mask;
498
499 __asm__ __volatile__(
500 "\n661: mov %1, %0\n"
501 " nop\n"
502 " .section .sun4v_2insn_patch, \"ax\"\n"
503 " .word 661b\n"
504 " sethi %%uhi(%2), %0\n"
505 " sllx %0, 32, %0\n"
506 " .previous\n"
507 : "=r" (mask)
508 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
509
510 mask |= _PAGE_R;
511
512 return __pte(pte_val(pte) & ~mask);
513}
514
515static inline pte_t pte_mkyoung(pte_t pte)
516{
517 unsigned long mask;
518
519 __asm__ __volatile__(
520 "\n661: mov %1, %0\n"
521 " nop\n"
522 " .section .sun4v_2insn_patch, \"ax\"\n"
523 " .word 661b\n"
524 " sethi %%uhi(%2), %0\n"
525 " sllx %0, 32, %0\n"
526 " .previous\n"
527 : "=r" (mask)
528 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
529
530 mask |= _PAGE_R;
531
532 return __pte(pte_val(pte) | mask);
533}
534
535static inline pte_t pte_mkspecial(pte_t pte)
536{
David S. Miller683d2fa2011-07-25 17:12:21 -0700537 pte_val(pte) |= _PAGE_SPECIAL;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700538 return pte;
539}
540
541static inline unsigned long pte_young(pte_t pte)
542{
543 unsigned long mask;
544
545 __asm__ __volatile__(
546 "\n661: mov %1, %0\n"
547 " nop\n"
548 " .section .sun4v_2insn_patch, \"ax\"\n"
549 " .word 661b\n"
550 " sethi %%uhi(%2), %0\n"
551 " sllx %0, 32, %0\n"
552 " .previous\n"
553 : "=r" (mask)
554 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
555
556 return (pte_val(pte) & mask);
557}
558
559static inline unsigned long pte_dirty(pte_t pte)
560{
561 unsigned long mask;
562
563 __asm__ __volatile__(
564 "\n661: mov %1, %0\n"
565 " nop\n"
566 " .section .sun4v_2insn_patch, \"ax\"\n"
567 " .word 661b\n"
568 " sethi %%uhi(%2), %0\n"
569 " sllx %0, 32, %0\n"
570 " .previous\n"
571 : "=r" (mask)
572 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
573
574 return (pte_val(pte) & mask);
575}
576
577static inline unsigned long pte_write(pte_t pte)
578{
579 unsigned long mask;
580
581 __asm__ __volatile__(
582 "\n661: mov %1, %0\n"
583 " nop\n"
584 " .section .sun4v_2insn_patch, \"ax\"\n"
585 " .word 661b\n"
586 " sethi %%uhi(%2), %0\n"
587 " sllx %0, 32, %0\n"
588 " .previous\n"
589 : "=r" (mask)
590 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
591
592 return (pte_val(pte) & mask);
593}
594
595static inline unsigned long pte_exec(pte_t pte)
596{
597 unsigned long mask;
598
599 __asm__ __volatile__(
600 "\n661: sethi %%hi(%1), %0\n"
601 " .section .sun4v_1insn_patch, \"ax\"\n"
602 " .word 661b\n"
603 " mov %2, %0\n"
604 " .previous\n"
605 : "=r" (mask)
606 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
607
608 return (pte_val(pte) & mask);
609}
610
611static inline unsigned long pte_file(pte_t pte)
612{
613 unsigned long val = pte_val(pte);
614
615 __asm__ __volatile__(
616 "\n661: and %0, %2, %0\n"
617 " .section .sun4v_1insn_patch, \"ax\"\n"
618 " .word 661b\n"
619 " and %0, %3, %0\n"
620 " .previous\n"
621 : "=r" (val)
622 : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
623
624 return val;
625}
626
627static inline unsigned long pte_present(pte_t pte)
628{
629 unsigned long val = pte_val(pte);
630
631 __asm__ __volatile__(
632 "\n661: and %0, %2, %0\n"
633 " .section .sun4v_1insn_patch, \"ax\"\n"
634 " .word 661b\n"
635 " and %0, %3, %0\n"
636 " .previous\n"
637 : "=r" (val)
638 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
639
640 return val;
641}
642
David S. Miller4a9d1942012-12-18 16:06:16 -0800643#define pte_accessible pte_accessible
Rik van Riel20841402013-12-18 17:08:44 -0800644static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
David S. Miller4a9d1942012-12-18 16:06:16 -0800645{
646 return pte_val(a) & _PAGE_VALID;
647}
648
David S. Miller683d2fa2011-07-25 17:12:21 -0700649static inline unsigned long pte_special(pte_t pte)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700650{
David S. Miller683d2fa2011-07-25 17:12:21 -0700651 return pte_val(pte) & _PAGE_SPECIAL;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700652}
653
David S. Millera7b94032013-09-26 13:45:15 -0700654static inline unsigned long pmd_large(pmd_t pmd)
David S. Miller89a77912013-02-13 12:21:06 -0800655{
David S. Millera7b94032013-09-26 13:45:15 -0700656 pte_t pte = __pte(pmd_val(pmd));
657
David S. Miller04df4192014-04-25 10:21:12 -0700658 return pte_val(pte) & _PAGE_PMD_HUGE;
David S. Miller89a77912013-02-13 12:21:06 -0800659}
660
David Miller9e695d22012-10-08 16:34:29 -0700661#ifdef CONFIG_TRANSPARENT_HUGEPAGE
David S. Millera7b94032013-09-26 13:45:15 -0700662static inline unsigned long pmd_young(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700663{
David S. Millera7b94032013-09-26 13:45:15 -0700664 pte_t pte = __pte(pmd_val(pmd));
665
666 return pte_young(pte);
David Miller9e695d22012-10-08 16:34:29 -0700667}
668
David S. Millera7b94032013-09-26 13:45:15 -0700669static inline unsigned long pmd_write(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700670{
David S. Millera7b94032013-09-26 13:45:15 -0700671 pte_t pte = __pte(pmd_val(pmd));
672
673 return pte_write(pte);
David Miller9e695d22012-10-08 16:34:29 -0700674}
675
676static inline unsigned long pmd_pfn(pmd_t pmd)
677{
David S. Millera7b94032013-09-26 13:45:15 -0700678 pte_t pte = __pte(pmd_val(pmd));
David Miller9e695d22012-10-08 16:34:29 -0700679
David S. Millera7b94032013-09-26 13:45:15 -0700680 return pte_pfn(pte);
David Miller9e695d22012-10-08 16:34:29 -0700681}
682
David S. Millera7b94032013-09-26 13:45:15 -0700683static inline unsigned long pmd_trans_huge(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700684{
David S. Millera7b94032013-09-26 13:45:15 -0700685 pte_t pte = __pte(pmd_val(pmd));
686
687 return pte_val(pte) & _PAGE_PMD_HUGE;
David Miller9e695d22012-10-08 16:34:29 -0700688}
689
David S. Millera7b94032013-09-26 13:45:15 -0700690static inline unsigned long pmd_trans_splitting(pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700691{
David S. Millera7b94032013-09-26 13:45:15 -0700692 pte_t pte = __pte(pmd_val(pmd));
693
694 return pmd_trans_huge(pmd) && pte_special(pte);
David Miller9e695d22012-10-08 16:34:29 -0700695}
696
697#define has_transparent_hugepage() 1
698
699static inline pmd_t pmd_mkold(pmd_t pmd)
700{
David S. Millera7b94032013-09-26 13:45:15 -0700701 pte_t pte = __pte(pmd_val(pmd));
702
703 pte = pte_mkold(pte);
704
705 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700706}
707
708static inline pmd_t pmd_wrprotect(pmd_t pmd)
709{
David S. Millera7b94032013-09-26 13:45:15 -0700710 pte_t pte = __pte(pmd_val(pmd));
711
712 pte = pte_wrprotect(pte);
713
714 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700715}
716
717static inline pmd_t pmd_mkdirty(pmd_t pmd)
718{
David S. Millera7b94032013-09-26 13:45:15 -0700719 pte_t pte = __pte(pmd_val(pmd));
720
721 pte = pte_mkdirty(pte);
722
723 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700724}
725
726static inline pmd_t pmd_mkyoung(pmd_t pmd)
727{
David S. Millera7b94032013-09-26 13:45:15 -0700728 pte_t pte = __pte(pmd_val(pmd));
729
730 pte = pte_mkyoung(pte);
731
732 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700733}
734
735static inline pmd_t pmd_mkwrite(pmd_t pmd)
736{
David S. Millera7b94032013-09-26 13:45:15 -0700737 pte_t pte = __pte(pmd_val(pmd));
738
739 pte = pte_mkwrite(pte);
740
741 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700742}
743
David Miller9e695d22012-10-08 16:34:29 -0700744static inline pmd_t pmd_mksplitting(pmd_t pmd)
745{
David S. Millera7b94032013-09-26 13:45:15 -0700746 pte_t pte = __pte(pmd_val(pmd));
747
748 pte = pte_mkspecial(pte);
749
750 return __pmd(pte_val(pte));
David Miller9e695d22012-10-08 16:34:29 -0700751}
752
David S. Millera7b94032013-09-26 13:45:15 -0700753static inline pgprot_t pmd_pgprot(pmd_t entry)
754{
755 unsigned long val = pmd_val(entry);
756
757 return __pgprot(val);
758}
David Miller9e695d22012-10-08 16:34:29 -0700759#endif
760
761static inline int pmd_present(pmd_t pmd)
762{
David S. Miller2b779332013-09-25 14:33:16 -0700763 return pmd_val(pmd) != 0UL;
David Miller9e695d22012-10-08 16:34:29 -0700764}
765
766#define pmd_none(pmd) (!pmd_val(pmd))
767
David S. Miller26cf4322014-04-29 13:03:27 -0700768/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
769 * very simple, it's just the physical address. PTE tables are of
770 * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
771 * the top bits outside of the range of any physical address size we
772 * support are clear as well. We also validate the physical itself.
773 */
774#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \
775 !__kern_addr_valid(pmd_val(pmd)))
776
777#define pud_none(pud) (!pud_val(pud))
778
779#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
780 !__kern_addr_valid(pud_val(pud)))
781
David Miller9e695d22012-10-08 16:34:29 -0700782#ifdef CONFIG_TRANSPARENT_HUGEPAGE
783extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
784 pmd_t *pmdp, pmd_t pmd);
785#else
786static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
787 pmd_t *pmdp, pmd_t pmd)
788{
789 *pmdp = pmd;
790}
791#endif
792
793static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
794{
David S. Millera7b94032013-09-26 13:45:15 -0700795 unsigned long val = __pa((unsigned long) (ptep));
David Miller9e695d22012-10-08 16:34:29 -0700796
797 pmd_val(*pmdp) = val;
798}
799
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700800#define pud_set(pudp, pmdp) \
David S. Millera7b94032013-09-26 13:45:15 -0700801 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
David Miller9e695d22012-10-08 16:34:29 -0700802static inline unsigned long __pmd_page(pmd_t pmd)
803{
David S. Millera7b94032013-09-26 13:45:15 -0700804 pte_t pte = __pte(pmd_val(pmd));
805 unsigned long pfn;
806
807 pfn = pte_pfn(pte);
808
809 return ((unsigned long) __va(pfn << PAGE_SHIFT));
David Miller9e695d22012-10-08 16:34:29 -0700810}
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700811#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
812#define pud_page_vaddr(pud) \
David S. Millera7b94032013-09-26 13:45:15 -0700813 ((unsigned long) __va(pud_val(pud)))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700814#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
David S. Miller2b779332013-09-25 14:33:16 -0700815#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700816#define pud_present(pud) (pud_val(pud) != 0U)
David S. Miller2b779332013-09-25 14:33:16 -0700817#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700818
819/* Same in both SUN4V and SUN4U. */
820#define pte_none(pte) (!pte_val(pte))
821
822/* to find an entry in a page-table-directory. */
823#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
824#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
825
826/* to find an entry in a kernel page-table-directory */
827#define pgd_offset_k(address) pgd_offset(&init_mm, address)
828
829/* Find an entry in the second-level page table.. */
830#define pmd_offset(pudp, address) \
831 ((pmd_t *) pud_page_vaddr(*(pudp)) + \
832 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
833
834/* Find an entry in the third-level page table.. */
835#define pte_index(dir, address) \
836 ((pte_t *) __pmd_page(*(dir)) + \
837 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
838#define pte_offset_kernel pte_index
839#define pte_offset_map pte_index
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700840#define pte_unmap(pte) do { } while (0)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700841
842/* Actual page table PTE updates. */
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700843extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
844 pte_t *ptep, pte_t orig, int fullmm);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700845
David Miller9e695d22012-10-08 16:34:29 -0700846#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
847static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
848 unsigned long addr,
849 pmd_t *pmdp)
850{
851 pmd_t pmd = *pmdp;
David S. Miller2b779332013-09-25 14:33:16 -0700852 set_pmd_at(mm, addr, pmdp, __pmd(0UL));
David Miller9e695d22012-10-08 16:34:29 -0700853 return pmd;
854}
855
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700856static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
857 pte_t *ptep, pte_t pte, int fullmm)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700858{
859 pte_t orig = *ptep;
860
861 *ptep = pte;
862
863 /* It is more efficient to let flush_tlb_kernel_range()
864 * handle init_mm tlb flushes.
865 *
866 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
867 * and SUN4V pte layout, so this inline test is fine.
868 */
Rik van Riel20841402013-12-18 17:08:44 -0800869 if (likely(mm != &init_mm) && pte_accessible(mm, orig))
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700870 tlb_batch_add(mm, addr, ptep, orig, fullmm);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700871}
872
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700873#define set_pte_at(mm,addr,ptep,pte) \
874 __set_pte_at((mm), (addr), (ptep), (pte), 0)
875
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700876#define pte_clear(mm,addr,ptep) \
877 set_pte_at((mm), (addr), (ptep), __pte(0UL))
878
Peter Zijlstra90f08e32011-05-24 17:11:50 -0700879#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
880#define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
881 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
882
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700883#ifdef DCACHE_ALIASING_POSSIBLE
884#define __HAVE_ARCH_MOVE_PTE
885#define move_pte(pte, prot, old_addr, new_addr) \
886({ \
887 pte_t newpte = (pte); \
888 if (tlb_type != hypervisor && pte_present(pte)) { \
889 unsigned long this_pfn = pte_pfn(pte); \
890 \
891 if (pfn_valid(this_pfn) && \
892 (((old_addr) ^ (new_addr)) & (1 << 13))) \
893 flush_dcache_page_all(current->mm, \
894 pfn_to_page(this_pfn)); \
895 } \
896 newpte; \
897})
898#endif
899
David S. Miller2b779332013-09-25 14:33:16 -0700900extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
901extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700902
903extern void paging_init(void);
904extern unsigned long find_ecache_flush_span(unsigned long size);
905
Sam Ravnborgcb1b8202011-04-21 15:45:45 -0700906struct seq_file;
907extern void mmu_info(struct seq_file *);
908
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700909struct vm_area_struct;
Russell King4b3073e2009-12-18 16:40:18 +0000910extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
David Miller9e695d22012-10-08 16:34:29 -0700911#ifdef CONFIG_TRANSPARENT_HUGEPAGE
912extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
913 pmd_t *pmd);
914
David S. Miller51e5ef12014-04-24 13:58:02 -0700915#define __HAVE_ARCH_PMDP_INVALIDATE
916extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
917 pmd_t *pmdp);
918
David Miller9e695d22012-10-08 16:34:29 -0700919#define __HAVE_ARCH_PGTABLE_DEPOSIT
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700920extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
921 pgtable_t pgtable);
David Miller9e695d22012-10-08 16:34:29 -0700922
923#define __HAVE_ARCH_PGTABLE_WITHDRAW
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700924extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
David Miller9e695d22012-10-08 16:34:29 -0700925#endif
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700926
927/* Encode and de-code a swap entry */
928#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
929#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
930#define __swp_entry(type, offset) \
931 ( (swp_entry_t) \
932 { \
933 (((long)(type) << PAGE_SHIFT) | \
934 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
935 } )
936#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
937#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
938
939/* File offset in PTE support. */
940extern unsigned long pte_file(pte_t);
941#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
942extern pte_t pgoff_to_pte(unsigned long);
943#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
944
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700945extern int page_in_phys_avail(unsigned long paddr);
946
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700947/*
948 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
949 * its high 4 bits. These macros/functions put it there or get it from there.
950 */
951#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
952#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
953#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
954
David S. Miller3e37fd32011-11-17 18:17:59 -0800955extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
956 unsigned long, pgprot_t);
957
958static inline int io_remap_pfn_range(struct vm_area_struct *vma,
959 unsigned long from, unsigned long pfn,
960 unsigned long size, pgprot_t prot)
961{
962 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
963 int space = GET_IOSPACE(pfn);
964 unsigned long phys_base;
965
966 phys_base = offset | (((unsigned long) space) << 32UL);
967
968 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
969}
Al Viro40d158e2013-05-11 12:13:10 -0400970#define io_remap_pfn_range io_remap_pfn_range
David S. Miller3e37fd32011-11-17 18:17:59 -0800971
David S. Millerf36391d2013-04-19 17:26:26 -0400972#include <asm/tlbflush.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700973#include <asm-generic/pgtable.h>
974
975/* We provide our own get_unmapped_area to cope with VA holes and
976 * SHM area cache aliasing for userland.
977 */
978#define HAVE_ARCH_UNMAPPED_AREA
979#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
980
981/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
982 * the largest alignment possible such that larget PTEs can be used.
983 */
984extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
985 unsigned long, unsigned long,
986 unsigned long);
987#define HAVE_ARCH_FB_UNMAPPED_AREA
988
989extern void pgtable_cache_init(void);
990extern void sun4v_register_fault_status(void);
991extern void sun4v_ktsb_register(void);
992extern void __init cheetah_ecache_flush_init(void);
993extern void sun4v_patch_tlb_handlers(void);
994
995extern unsigned long cmdline_memory_size;
996
David S. Millerb539c462008-09-12 00:10:32 -0700997extern asmlinkage void do_sparc64_fault(struct pt_regs *regs);
998
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700999#endif /* !(__ASSEMBLY__) */
1000
1001#endif /* !(_SPARC64_PGTABLE_H) */