blob: 9d810675814291d14ce004f9d4447678af2227c3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
Corey Minyard5bbea362013-04-08 16:06:35 +020011#include <linux/mm_types.h>
Ralf Baechle970d0322012-10-18 13:54:15 +020012#include <linux/mmzone.h>
Ralf Baechle875d43e2005-09-03 15:56:16 -070013#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/pgtable-32.h>
15#endif
Ralf Baechle875d43e2005-09-03 15:56:16 -070016#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/pgtable-64.h>
18#endif
19
Pete Popovf10fae02005-07-14 00:17:05 +000020#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/pgtable-bits.h>
22
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080023struct mm_struct;
24struct vm_area_struct;
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -060027#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
Chris Dearman35133692007-09-19 00:58:24 +010028 _page_cachable_default)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -060029#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
30 _page_cachable_default)
31#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
Chris Dearman35133692007-09-19 00:58:24 +010032 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
Chris Dearman35133692007-09-19 00:58:24 +010034 _PAGE_GLOBAL | _page_cachable_default)
Paul Burtone2a9e5a2014-03-03 12:08:40 +000035#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -060037#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Chris Dearman35133692007-09-19 00:58:24 +010038 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
40 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
41
42/*
David Daney6dd93442010-02-10 15:12:47 -080043 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
44 * execute, and consider it to be the same as read. Also, write
45 * permissions imply read permissions. This is the closest we can get
46 * by reasonable means..
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Chris Dearman35133692007-09-19 00:58:24 +010049/*
50 * Dummy values to fill the table in mmap.c
51 * The real values will be generated at runtime
52 */
53#define __P000 __pgprot(0)
54#define __P001 __pgprot(0)
55#define __P010 __pgprot(0)
56#define __P011 __pgprot(0)
57#define __P100 __pgprot(0)
58#define __P101 __pgprot(0)
59#define __P110 __pgprot(0)
60#define __P111 __pgprot(0)
61
62#define __S000 __pgprot(0)
63#define __S001 __pgprot(0)
64#define __S010 __pgprot(0)
65#define __S011 __pgprot(0)
66#define __S100 __pgprot(0)
67#define __S101 __pgprot(0)
68#define __S110 __pgprot(0)
69#define __S111 __pgprot(0)
70
71extern unsigned long _page_cachable_default;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73/*
74 * ZERO_PAGE is a global shared page that is always zero; used
75 * for zero-mapped memory areas etc..
76 */
77
78extern unsigned long empty_zero_page;
79extern unsigned long zero_page_mask;
80
81#define ZERO_PAGE(vaddr) \
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020082 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080083#define __HAVE_COLOR_ZERO_PAGE
Hugh Dickins62eede62009-09-21 17:03:34 -070084
Linus Torvalds1da177e2005-04-16 15:20:36 -070085extern void paging_init(void);
86
87/*
88 * Conversion functions: convert a page and protection to a page entry,
89 * and a page entry and page directory to the page they refer to.
90 */
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010091#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
Ralf Baechle970d0322012-10-18 13:54:15 +020092
93#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
94#ifndef CONFIG_TRANSPARENT_HUGEPAGE
95#define pmd_page(pmd) __pmd_page(pmd)
96#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
97
Dave McCracken46a82b22006-09-25 23:31:48 -070098#define pmd_page_vaddr(pmd) pmd_val(pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Markos Chandrasf1014d12014-07-14 12:47:09 +0100100#define htw_stop() \
101do { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000102 unsigned long flags; \
103 \
Markos Chandras461d15972015-01-26 09:40:34 +0000104 if (cpu_has_htw) { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000105 local_irq_save(flags); \
106 if(!raw_current_cpu_data.htw_seq++) { \
107 write_c0_pwctl(read_c0_pwctl() & \
108 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
109 back_to_back_c0_hazard(); \
110 } \
111 local_irq_restore(flags); \
Markos Chandras461d15972015-01-26 09:40:34 +0000112 } \
Markos Chandrasf1014d12014-07-14 12:47:09 +0100113} while(0)
114
115#define htw_start() \
116do { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000117 unsigned long flags; \
118 \
Markos Chandrasf1014d12014-07-14 12:47:09 +0100119 if (cpu_has_htw) { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000120 local_irq_save(flags); \
121 if (!--raw_current_cpu_data.htw_seq) { \
122 write_c0_pwctl(read_c0_pwctl() | \
123 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
124 back_to_back_c0_hazard(); \
125 } \
126 local_irq_restore(flags); \
Markos Chandrasf1014d12014-07-14 12:47:09 +0100127 } \
128} while(0)
129
Ralf Baechle34adb282014-11-22 00:16:48 +0100130#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400131
Steven J. Hillc5b36782015-02-26 18:16:38 -0600132#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400133#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static inline void set_pte(pte_t *ptep, pte_t pte)
136{
137 ptep->pte_high = pte.pte_high;
138 smp_wmb();
139 ptep->pte_low = pte.pte_low;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Steven J. Hillc5b36782015-02-26 18:16:38 -0600141 if (pte.pte_high & _PAGE_GLOBAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 pte_t *buddy = ptep_buddy(ptep);
143 /*
144 * Make sure the buddy is global too (if it's !none,
145 * it better already be global)
146 */
Steven J. Hillc5b36782015-02-26 18:16:38 -0600147 if (pte_none(*buddy))
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400148 buddy->pte_high |= _PAGE_GLOBAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
150}
Lars Persson5b9593f2015-02-26 14:16:02 +0100151#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
154{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400155 pte_t null = __pte(0);
156
Markos Chandrasfde35382015-01-26 09:40:36 +0000157 htw_stop();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 /* Preserve global status for the pair */
Steven J. Hillc5b36782015-02-26 18:16:38 -0600159 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
160 null.pte_high = _PAGE_GLOBAL;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400161
162 set_pte_at(mm, addr, ptep, null);
Markos Chandrasfde35382015-01-26 09:40:36 +0000163 htw_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164}
165#else
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400166
167#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
168#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170/*
171 * Certain architectures need to do special things when pte's
172 * within a page table are directly modified. Thus, the following
173 * hook is made available.
174 */
175static inline void set_pte(pte_t *ptep, pte_t pteval)
176{
177 *ptep = pteval;
178#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
179 if (pte_val(pteval) & _PAGE_GLOBAL) {
180 pte_t *buddy = ptep_buddy(ptep);
181 /*
182 * Make sure the buddy is global too (if it's !none,
183 * it better already be global)
184 */
185 if (pte_none(*buddy))
186 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
187 }
188#endif
189}
Lars Persson5b9593f2015-02-26 14:16:02 +0100190#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
193{
Markos Chandrasfde35382015-01-26 09:40:36 +0000194 htw_stop();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
196 /* Preserve global status for the pair */
197 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
198 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
199 else
200#endif
201 set_pte_at(mm, addr, ptep, __pte(0));
Markos Chandrasfde35382015-01-26 09:40:36 +0000202 htw_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204#endif
205
206/*
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000207 * (pmds are folded into puds so this doesn't get actually called,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 * but the define is needed for a generic inline function.)
209 */
210#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000211
David Daney325f8a02009-12-04 13:52:36 -0800212#ifndef __PAGETABLE_PMD_FOLDED
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000213/*
214 * (puds are folded into pgds so this doesn't get actually called,
215 * but the define is needed for a generic inline function.)
216 */
217#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
218#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Ralf Baechle5ff97472007-08-01 15:25:28 +0100220#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
221#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
222#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Ralf Baechle9975e772007-08-13 12:44:41 +0100224/*
225 * We used to declare this array with size but gcc 3.3 and older are not able
226 * to find that this expression is a constant, so the size is dropped.
227 */
228extern pgd_t swapper_pg_dir[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230/*
231 * The following only work if pte_present() is true.
232 * Undefined behaviour if not..
233 */
Ralf Baechle34adb282014-11-22 00:16:48 +0100234#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400235static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
236static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
237static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239static inline pte_t pte_wrprotect(pte_t pte)
240{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600241 pte.pte_low &= ~_PAGE_WRITE;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400242 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 return pte;
244}
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246static inline pte_t pte_mkclean(pte_t pte)
247{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600248 pte.pte_low &= ~_PAGE_MODIFIED;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400249 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 return pte;
251}
252
253static inline pte_t pte_mkold(pte_t pte)
254{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600255 pte.pte_low &= ~_PAGE_ACCESSED;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400256 pte.pte_high &= ~_PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 return pte;
258}
259
260static inline pte_t pte_mkwrite(pte_t pte)
261{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400262 pte.pte_low |= _PAGE_WRITE;
Steven J. Hillc5b36782015-02-26 18:16:38 -0600263 if (pte.pte_low & _PAGE_MODIFIED)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400264 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 return pte;
266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268static inline pte_t pte_mkdirty(pte_t pte)
269{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400270 pte.pte_low |= _PAGE_MODIFIED;
Steven J. Hillc5b36782015-02-26 18:16:38 -0600271 if (pte.pte_low & _PAGE_WRITE)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400272 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 return pte;
274}
275
276static inline pte_t pte_mkyoung(pte_t pte)
277{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400278 pte.pte_low |= _PAGE_ACCESSED;
Steven J. Hillc5b36782015-02-26 18:16:38 -0600279 if (pte.pte_low & _PAGE_READ)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400280 pte.pte_high |= _PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 return pte;
282}
283#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
285static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
286static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288static inline pte_t pte_wrprotect(pte_t pte)
289{
290 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
291 return pte;
292}
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294static inline pte_t pte_mkclean(pte_t pte)
295{
Steven J. Hill77a5c592014-11-13 09:52:01 -0600296 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 return pte;
298}
299
300static inline pte_t pte_mkold(pte_t pte)
301{
Steven J. Hill77a5c592014-11-13 09:52:01 -0600302 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 return pte;
304}
305
306static inline pte_t pte_mkwrite(pte_t pte)
307{
308 pte_val(pte) |= _PAGE_WRITE;
309 if (pte_val(pte) & _PAGE_MODIFIED)
310 pte_val(pte) |= _PAGE_SILENT_WRITE;
311 return pte;
312}
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314static inline pte_t pte_mkdirty(pte_t pte)
315{
316 pte_val(pte) |= _PAGE_MODIFIED;
317 if (pte_val(pte) & _PAGE_WRITE)
318 pte_val(pte) |= _PAGE_SILENT_WRITE;
319 return pte;
320}
321
322static inline pte_t pte_mkyoung(pte_t pte)
323{
324 pte_val(pte) |= _PAGE_ACCESSED;
Steven J. Hillbe0c37c2015-02-26 18:16:37 -0600325#ifdef CONFIG_CPU_MIPSR2
326 if (!(pte_val(pte) & _PAGE_NO_READ))
327 pte_val(pte) |= _PAGE_SILENT_READ;
328 else
329#endif
330 if (pte_val(pte) & _PAGE_READ)
331 pte_val(pte) |= _PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 return pte;
333}
David Daneydd794392009-05-27 17:47:43 -0700334
Steven J. Hill05f98832015-02-19 10:18:50 -0600335#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneydd794392009-05-27 17:47:43 -0700336static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
337
338static inline pte_t pte_mkhuge(pte_t pte)
339{
340 pte_val(pte) |= _PAGE_HUGE;
341 return pte;
342}
Steven J. Hill05f98832015-02-19 10:18:50 -0600343#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#endif
Nick Piggin7e675132008-04-28 02:13:00 -0700345static inline int pte_special(pte_t pte) { return 0; }
346static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348/*
Ralf Baechle70342282013-01-22 12:59:30 +0100349 * Macro to make mark a page protection value as "uncacheable". Note
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 * that "protection" is really a misnomer here as the protection value
351 * contains the memory attribute bits, dirty bits, and various other
352 * bits as well.
353 */
354#define pgprot_noncached pgprot_noncached
355
356static inline pgprot_t pgprot_noncached(pgprot_t _prot)
357{
358 unsigned long prot = pgprot_val(_prot);
359
360 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
361
362 return __pgprot(prot);
363}
364
Markos Chandras4b050ba2014-07-18 10:51:33 +0100365static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
366{
367 unsigned long prot = pgprot_val(_prot);
368
369 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
370 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
371
372 return __pgprot(prot);
373}
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375/*
376 * Conversion functions: convert a page and protection to a page entry,
377 * and a page entry and page directory to the page they refer to.
378 */
379#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
380
Ralf Baechle34adb282014-11-22 00:16:48 +0100381#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
383{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600384 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
Steven J. Hill77a5c592014-11-13 09:52:01 -0600385 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
Steven J. Hillc5b36782015-02-26 18:16:38 -0600386 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
387 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 return pte;
389}
390#else
391static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
392{
393 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
394}
395#endif
396
397
398extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
399 pte_t pte);
Lars Persson5b9593f2015-02-26 14:16:02 +0100400extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
401 pte_t pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403static inline void update_mmu_cache(struct vm_area_struct *vma,
Russell King4b3073e2009-12-18 16:40:18 +0000404 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
Russell King4b3073e2009-12-18 16:40:18 +0000406 pte_t pte = *ptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 __update_tlb(vma, address, pte);
Lars Persson5b9593f2015-02-26 14:16:02 +0100408 __update_cache(vma, address, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409}
410
Ralf Baechle970d0322012-10-18 13:54:15 +0200411static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
412 unsigned long address, pmd_t *pmdp)
413{
414 pte_t pte = *(pte_t *)pmdp;
415
416 __update_tlb(vma, address, pte);
417}
418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419#define kern_addr_valid(addr) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Ralf Baechle34adb282014-11-22 00:16:48 +0100421#ifdef CONFIG_PHYS_ADDR_T_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424static inline int io_remap_pfn_range(struct vm_area_struct *vma,
425 unsigned long vaddr,
426 unsigned long pfn,
427 unsigned long size,
428 pgprot_t prot)
429{
Ralf Baechle15d45cc2014-11-22 00:22:09 +0100430 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
Thiemo Seuferac5d8c02005-04-11 12:24:16 +0000431 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
Al Viro40d158e2013-05-11 12:13:10 -0400433#define io_remap_pfn_range io_remap_pfn_range
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434#endif
435
Ralf Baechle970d0322012-10-18 13:54:15 +0200436#ifdef CONFIG_TRANSPARENT_HUGEPAGE
437
438extern int has_transparent_hugepage(void);
439
440static inline int pmd_trans_huge(pmd_t pmd)
441{
442 return !!(pmd_val(pmd) & _PAGE_HUGE);
443}
444
445static inline pmd_t pmd_mkhuge(pmd_t pmd)
446{
447 pmd_val(pmd) |= _PAGE_HUGE;
448
449 return pmd;
450}
451
452static inline int pmd_trans_splitting(pmd_t pmd)
453{
454 return !!(pmd_val(pmd) & _PAGE_SPLITTING);
455}
456
457static inline pmd_t pmd_mksplitting(pmd_t pmd)
458{
459 pmd_val(pmd) |= _PAGE_SPLITTING;
460
461 return pmd;
462}
463
464extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
465 pmd_t *pmdp, pmd_t pmd);
466
467#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
468/* Extern to avoid header file madness */
469extern void pmdp_splitting_flush(struct vm_area_struct *vma,
470 unsigned long address,
471 pmd_t *pmdp);
472
473#define __HAVE_ARCH_PMD_WRITE
474static inline int pmd_write(pmd_t pmd)
475{
476 return !!(pmd_val(pmd) & _PAGE_WRITE);
477}
478
479static inline pmd_t pmd_wrprotect(pmd_t pmd)
480{
481 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
482 return pmd;
483}
484
485static inline pmd_t pmd_mkwrite(pmd_t pmd)
486{
487 pmd_val(pmd) |= _PAGE_WRITE;
488 if (pmd_val(pmd) & _PAGE_MODIFIED)
489 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
490
491 return pmd;
492}
493
494static inline int pmd_dirty(pmd_t pmd)
495{
496 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
497}
498
499static inline pmd_t pmd_mkclean(pmd_t pmd)
500{
501 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
502 return pmd;
503}
504
505static inline pmd_t pmd_mkdirty(pmd_t pmd)
506{
507 pmd_val(pmd) |= _PAGE_MODIFIED;
508 if (pmd_val(pmd) & _PAGE_WRITE)
509 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
510
511 return pmd;
512}
513
514static inline int pmd_young(pmd_t pmd)
515{
516 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
517}
518
519static inline pmd_t pmd_mkold(pmd_t pmd)
520{
521 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
522
523 return pmd;
524}
525
526static inline pmd_t pmd_mkyoung(pmd_t pmd)
527{
528 pmd_val(pmd) |= _PAGE_ACCESSED;
529
Steven J. Hillbe0c37c2015-02-26 18:16:37 -0600530#ifdef CONFIG_CPU_MIPSR2
531 if (!(pmd_val(pmd) & _PAGE_NO_READ))
532 pmd_val(pmd) |= _PAGE_SILENT_READ;
533 else
534#endif
535 if (pmd_val(pmd) & _PAGE_READ)
536 pmd_val(pmd) |= _PAGE_SILENT_READ;
Ralf Baechle970d0322012-10-18 13:54:15 +0200537
538 return pmd;
539}
540
541/* Extern to avoid header file madness */
542extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
543
544static inline unsigned long pmd_pfn(pmd_t pmd)
545{
546 return pmd_val(pmd) >> _PFN_SHIFT;
547}
548
549static inline struct page *pmd_page(pmd_t pmd)
550{
551 if (pmd_trans_huge(pmd))
552 return pfn_to_page(pmd_pfn(pmd));
553
554 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
555}
556
557static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
558{
559 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
560 return pmd;
561}
562
563static inline pmd_t pmd_mknotpresent(pmd_t pmd)
564{
565 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
566
567 return pmd;
568}
569
570/*
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700571 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
Ralf Baechle970d0322012-10-18 13:54:15 +0200572 * different prototype.
573 */
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700574#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
575static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
576 unsigned long address, pmd_t *pmdp)
Ralf Baechle970d0322012-10-18 13:54:15 +0200577{
578 pmd_t old = *pmdp;
579
580 pmd_clear(pmdp);
581
582 return old;
583}
584
585#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587#include <asm-generic/pgtable.h>
588
589/*
Wu Zhangjin22f1fdf2009-11-11 13:59:23 +0800590 * uncached accelerated TLB map for video memory access
591 */
592#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
593#define __HAVE_PHYS_MEM_ACCESS_PROT
594
595struct file;
596pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
597 unsigned long size, pgprot_t vma_prot);
598int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
599 unsigned long size, pgprot_t *vma_prot);
600#endif
601
602/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 * We provide our own get_unmapped area to cope with the virtual aliasing
604 * constraints placed on us by the cache architecture.
605 */
606#define HAVE_ARCH_UNMAPPED_AREA
Jian Pengd0be89f2011-05-17 12:27:49 -0700607#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609/*
610 * No page table caches to initialise
611 */
612#define pgtable_cache_init() do { } while (0)
613
614#endif /* _ASM_PGTABLE_H */