blob: 45de5765161907ec52a20f02a3b47fc7c0ffb016 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
Corey Minyard5bbea362013-04-08 16:06:35 +020011#include <linux/mm_types.h>
Ralf Baechle970d0322012-10-18 13:54:15 +020012#include <linux/mmzone.h>
Ralf Baechle875d43e2005-09-03 15:56:16 -070013#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/pgtable-32.h>
15#endif
Ralf Baechle875d43e2005-09-03 15:56:16 -070016#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/pgtable-64.h>
18#endif
19
Pete Popovf10fae02005-07-14 00:17:05 +000020#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/pgtable-bits.h>
22
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080023struct mm_struct;
24struct vm_area_struct;
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -060027#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
Chris Dearman35133692007-09-19 00:58:24 +010028 _page_cachable_default)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -060029#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
30 _page_cachable_default)
31#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
Chris Dearman35133692007-09-19 00:58:24 +010032 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
Chris Dearman35133692007-09-19 00:58:24 +010034 _PAGE_GLOBAL | _page_cachable_default)
Paul Burtone2a9e5a2014-03-03 12:08:40 +000035#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -060037#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
Chris Dearman35133692007-09-19 00:58:24 +010038 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
40 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
41
42/*
David Daney6dd93442010-02-10 15:12:47 -080043 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
44 * execute, and consider it to be the same as read. Also, write
45 * permissions imply read permissions. This is the closest we can get
46 * by reasonable means..
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Chris Dearman35133692007-09-19 00:58:24 +010049/*
50 * Dummy values to fill the table in mmap.c
51 * The real values will be generated at runtime
52 */
53#define __P000 __pgprot(0)
54#define __P001 __pgprot(0)
55#define __P010 __pgprot(0)
56#define __P011 __pgprot(0)
57#define __P100 __pgprot(0)
58#define __P101 __pgprot(0)
59#define __P110 __pgprot(0)
60#define __P111 __pgprot(0)
61
62#define __S000 __pgprot(0)
63#define __S001 __pgprot(0)
64#define __S010 __pgprot(0)
65#define __S011 __pgprot(0)
66#define __S100 __pgprot(0)
67#define __S101 __pgprot(0)
68#define __S110 __pgprot(0)
69#define __S111 __pgprot(0)
70
71extern unsigned long _page_cachable_default;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73/*
74 * ZERO_PAGE is a global shared page that is always zero; used
75 * for zero-mapped memory areas etc..
76 */
77
78extern unsigned long empty_zero_page;
79extern unsigned long zero_page_mask;
80
81#define ZERO_PAGE(vaddr) \
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020082 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080083#define __HAVE_COLOR_ZERO_PAGE
Hugh Dickins62eede62009-09-21 17:03:34 -070084
Linus Torvalds1da177e2005-04-16 15:20:36 -070085extern void paging_init(void);
86
87/*
88 * Conversion functions: convert a page and protection to a page entry,
89 * and a page entry and page directory to the page they refer to.
90 */
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010091#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
Ralf Baechle970d0322012-10-18 13:54:15 +020092
93#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
94#ifndef CONFIG_TRANSPARENT_HUGEPAGE
95#define pmd_page(pmd) __pmd_page(pmd)
96#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
97
Dave McCracken46a82b22006-09-25 23:31:48 -070098#define pmd_page_vaddr(pmd) pmd_val(pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Markos Chandrasf1014d12014-07-14 12:47:09 +0100100#define htw_stop() \
101do { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000102 unsigned long flags; \
103 \
Markos Chandras461d15972015-01-26 09:40:34 +0000104 if (cpu_has_htw) { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000105 local_irq_save(flags); \
106 if(!raw_current_cpu_data.htw_seq++) { \
107 write_c0_pwctl(read_c0_pwctl() & \
108 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
109 back_to_back_c0_hazard(); \
110 } \
111 local_irq_restore(flags); \
Markos Chandras461d15972015-01-26 09:40:34 +0000112 } \
Markos Chandrasf1014d12014-07-14 12:47:09 +0100113} while(0)
114
115#define htw_start() \
116do { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000117 unsigned long flags; \
118 \
Markos Chandrasf1014d12014-07-14 12:47:09 +0100119 if (cpu_has_htw) { \
Markos Chandrased4cbc82015-01-26 13:04:33 +0000120 local_irq_save(flags); \
121 if (!--raw_current_cpu_data.htw_seq) { \
122 write_c0_pwctl(read_c0_pwctl() | \
123 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
124 back_to_back_c0_hazard(); \
125 } \
126 local_irq_restore(flags); \
Markos Chandrasf1014d12014-07-14 12:47:09 +0100127 } \
128} while(0)
129
Paul Burton37d22a0d2016-03-01 02:37:59 +0000130static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
131 pte_t *ptep, pte_t pteval);
132
Ralf Baechle34adb282014-11-22 00:16:48 +0100133#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400134
Steven J. Hillc5b36782015-02-26 18:16:38 -0600135#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400136#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
Paul Burton37d22a0d2016-03-01 02:37:59 +0000137#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139static inline void set_pte(pte_t *ptep, pte_t pte)
140{
141 ptep->pte_high = pte.pte_high;
142 smp_wmb();
143 ptep->pte_low = pte.pte_low;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Steven J. Hillc5b36782015-02-26 18:16:38 -0600145 if (pte.pte_high & _PAGE_GLOBAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 pte_t *buddy = ptep_buddy(ptep);
147 /*
148 * Make sure the buddy is global too (if it's !none,
149 * it better already be global)
150 */
Steven J. Hillc5b36782015-02-26 18:16:38 -0600151 if (pte_none(*buddy))
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400152 buddy->pte_high |= _PAGE_GLOBAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 }
154}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
156static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
157{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400158 pte_t null = __pte(0);
159
Markos Chandrasfde35382015-01-26 09:40:36 +0000160 htw_stop();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 /* Preserve global status for the pair */
Steven J. Hillc5b36782015-02-26 18:16:38 -0600162 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
163 null.pte_high = _PAGE_GLOBAL;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400164
165 set_pte_at(mm, addr, ptep, null);
Markos Chandrasfde35382015-01-26 09:40:36 +0000166 htw_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168#else
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400169
170#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
171#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
Paul Burton37d22a0d2016-03-01 02:37:59 +0000172#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
175 * Certain architectures need to do special things when pte's
176 * within a page table are directly modified. Thus, the following
177 * hook is made available.
178 */
179static inline void set_pte(pte_t *ptep, pte_t pteval)
180{
181 *ptep = pteval;
182#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
183 if (pte_val(pteval) & _PAGE_GLOBAL) {
184 pte_t *buddy = ptep_buddy(ptep);
185 /*
186 * Make sure the buddy is global too (if it's !none,
187 * it better already be global)
188 */
David Daney46011e62015-08-03 17:48:43 -0700189#ifdef CONFIG_SMP
190 /*
191 * For SMP, multiple CPUs can race, so we need to do
192 * this atomically.
193 */
David Daney46011e62015-08-03 17:48:43 -0700194 unsigned long page_global = _PAGE_GLOBAL;
195 unsigned long tmp;
196
Joshua Kinard12863932015-09-07 06:42:30 -0400197 if (kernel_uses_llsc && R10000_LLSC_WAR) {
198 __asm__ __volatile__ (
199 " .set arch=r4000 \n"
200 " .set push \n"
201 " .set noreorder \n"
202 "1:" __LL "%[tmp], %[buddy] \n"
203 " bnez %[tmp], 2f \n"
204 " or %[tmp], %[tmp], %[global] \n"
205 __SC "%[tmp], %[buddy] \n"
206 " beqzl %[tmp], 1b \n"
207 " nop \n"
208 "2: \n"
209 " .set pop \n"
210 " .set mips0 \n"
211 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
David Daney46011e62015-08-03 17:48:43 -0700212 : [global] "r" (page_global));
Joshua Kinard12863932015-09-07 06:42:30 -0400213 } else if (kernel_uses_llsc) {
214 __asm__ __volatile__ (
215 " .set "MIPS_ISA_ARCH_LEVEL" \n"
216 " .set push \n"
217 " .set noreorder \n"
218 "1:" __LL "%[tmp], %[buddy] \n"
219 " bnez %[tmp], 2f \n"
220 " or %[tmp], %[tmp], %[global] \n"
221 __SC "%[tmp], %[buddy] \n"
222 " beqz %[tmp], 1b \n"
223 " nop \n"
224 "2: \n"
225 " .set pop \n"
226 " .set mips0 \n"
227 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
228 : [global] "r" (page_global));
229 }
David Daney46011e62015-08-03 17:48:43 -0700230#else /* !CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 if (pte_none(*buddy))
232 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
David Daney46011e62015-08-03 17:48:43 -0700233#endif /* CONFIG_SMP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 }
235#endif
236}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
239{
Markos Chandrasfde35382015-01-26 09:40:36 +0000240 htw_stop();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
242 /* Preserve global status for the pair */
243 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
244 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
245 else
246#endif
247 set_pte_at(mm, addr, ptep, __pte(0));
Markos Chandrasfde35382015-01-26 09:40:36 +0000248 htw_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250#endif
251
Paul Burton37d22a0d2016-03-01 02:37:59 +0000252static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
253 pte_t *ptep, pte_t pteval)
254{
255 extern void __update_cache(unsigned long address, pte_t pte);
256
257 if (!pte_present(pteval))
258 goto cache_sync_done;
259
260 if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
261 goto cache_sync_done;
262
263 __update_cache(addr, pteval);
264cache_sync_done:
265 set_pte(ptep, pteval);
266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/*
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000269 * (pmds are folded into puds so this doesn't get actually called,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 * but the define is needed for a generic inline function.)
271 */
272#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000273
David Daney325f8a02009-12-04 13:52:36 -0800274#ifndef __PAGETABLE_PMD_FOLDED
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000275/*
276 * (puds are folded into pgds so this doesn't get actually called,
277 * but the define is needed for a generic inline function.)
278 */
279#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
280#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Ralf Baechle5ff97472007-08-01 15:25:28 +0100282#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
283#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
284#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Ralf Baechle9975e772007-08-13 12:44:41 +0100286/*
287 * We used to declare this array with size but gcc 3.3 and older are not able
288 * to find that this expression is a constant, so the size is dropped.
289 */
290extern pgd_t swapper_pg_dir[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
292/*
293 * The following only work if pte_present() is true.
294 * Undefined behaviour if not..
295 */
Ralf Baechle34adb282014-11-22 00:16:48 +0100296#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400297static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
298static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
299static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301static inline pte_t pte_wrprotect(pte_t pte)
302{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600303 pte.pte_low &= ~_PAGE_WRITE;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400304 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 return pte;
306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308static inline pte_t pte_mkclean(pte_t pte)
309{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600310 pte.pte_low &= ~_PAGE_MODIFIED;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400311 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 return pte;
313}
314
315static inline pte_t pte_mkold(pte_t pte)
316{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600317 pte.pte_low &= ~_PAGE_ACCESSED;
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400318 pte.pte_high &= ~_PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 return pte;
320}
321
322static inline pte_t pte_mkwrite(pte_t pte)
323{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400324 pte.pte_low |= _PAGE_WRITE;
Steven J. Hillc5b36782015-02-26 18:16:38 -0600325 if (pte.pte_low & _PAGE_MODIFIED)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400326 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 return pte;
328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330static inline pte_t pte_mkdirty(pte_t pte)
331{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400332 pte.pte_low |= _PAGE_MODIFIED;
Steven J. Hillc5b36782015-02-26 18:16:38 -0600333 if (pte.pte_low & _PAGE_WRITE)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400334 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 return pte;
336}
337
338static inline pte_t pte_mkyoung(pte_t pte)
339{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400340 pte.pte_low |= _PAGE_ACCESSED;
Steven J. Hillc5b36782015-02-26 18:16:38 -0600341 if (pte.pte_low & _PAGE_READ)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400342 pte.pte_high |= _PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 return pte;
344}
345#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
347static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
348static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350static inline pte_t pte_wrprotect(pte_t pte)
351{
352 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
353 return pte;
354}
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356static inline pte_t pte_mkclean(pte_t pte)
357{
Steven J. Hill77a5c592014-11-13 09:52:01 -0600358 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 return pte;
360}
361
362static inline pte_t pte_mkold(pte_t pte)
363{
Steven J. Hill77a5c592014-11-13 09:52:01 -0600364 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 return pte;
366}
367
368static inline pte_t pte_mkwrite(pte_t pte)
369{
370 pte_val(pte) |= _PAGE_WRITE;
371 if (pte_val(pte) & _PAGE_MODIFIED)
372 pte_val(pte) |= _PAGE_SILENT_WRITE;
373 return pte;
374}
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376static inline pte_t pte_mkdirty(pte_t pte)
377{
378 pte_val(pte) |= _PAGE_MODIFIED;
379 if (pte_val(pte) & _PAGE_WRITE)
380 pte_val(pte) |= _PAGE_SILENT_WRITE;
381 return pte;
382}
383
384static inline pte_t pte_mkyoung(pte_t pte)
385{
386 pte_val(pte) |= _PAGE_ACCESSED;
Huacai Chen4f33f6c2016-01-21 21:09:52 +0800387#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -0600388 if (!(pte_val(pte) & _PAGE_NO_READ))
389 pte_val(pte) |= _PAGE_SILENT_READ;
390 else
391#endif
392 if (pte_val(pte) & _PAGE_READ)
393 pte_val(pte) |= _PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 return pte;
395}
David Daneydd794392009-05-27 17:47:43 -0700396
Steven J. Hill05f98832015-02-19 10:18:50 -0600397#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneydd794392009-05-27 17:47:43 -0700398static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
399
400static inline pte_t pte_mkhuge(pte_t pte)
401{
402 pte_val(pte) |= _PAGE_HUGE;
403 return pte;
404}
Steven J. Hill05f98832015-02-19 10:18:50 -0600405#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406#endif
Nick Piggin7e675132008-04-28 02:13:00 -0700407static inline int pte_special(pte_t pte) { return 0; }
408static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410/*
Ralf Baechle70342282013-01-22 12:59:30 +0100411 * Macro to make mark a page protection value as "uncacheable". Note
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 * that "protection" is really a misnomer here as the protection value
413 * contains the memory attribute bits, dirty bits, and various other
414 * bits as well.
415 */
416#define pgprot_noncached pgprot_noncached
417
418static inline pgprot_t pgprot_noncached(pgprot_t _prot)
419{
420 unsigned long prot = pgprot_val(_prot);
421
422 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
423
424 return __pgprot(prot);
425}
426
Alex Smithc4687b12015-07-24 16:16:10 +0100427#define pgprot_writecombine pgprot_writecombine
428
Markos Chandras4b050ba2014-07-18 10:51:33 +0100429static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
430{
431 unsigned long prot = pgprot_val(_prot);
432
433 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
434 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
435
436 return __pgprot(prot);
437}
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439/*
440 * Conversion functions: convert a page and protection to a page entry,
441 * and a page entry and page directory to the page they refer to.
442 */
443#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
444
Ralf Baechle34adb282014-11-22 00:16:48 +0100445#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
447{
Steven J. Hillc5b36782015-02-26 18:16:38 -0600448 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
Steven J. Hill77a5c592014-11-13 09:52:01 -0600449 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
Steven J. Hillc5b36782015-02-26 18:16:38 -0600450 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
451 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 return pte;
453}
454#else
455static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
456{
457 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
458}
459#endif
460
461
462extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
463 pte_t pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465static inline void update_mmu_cache(struct vm_area_struct *vma,
Russell King4b3073e2009-12-18 16:40:18 +0000466 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
Russell King4b3073e2009-12-18 16:40:18 +0000468 pte_t pte = *ptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 __update_tlb(vma, address, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470}
471
Ralf Baechle970d0322012-10-18 13:54:15 +0200472static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
473 unsigned long address, pmd_t *pmdp)
474{
475 pte_t pte = *(pte_t *)pmdp;
476
477 __update_tlb(vma, address, pte);
478}
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480#define kern_addr_valid(addr) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Ralf Baechle34adb282014-11-22 00:16:48 +0100482#ifdef CONFIG_PHYS_ADDR_T_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485static inline int io_remap_pfn_range(struct vm_area_struct *vma,
486 unsigned long vaddr,
487 unsigned long pfn,
488 unsigned long size,
489 pgprot_t prot)
490{
Ralf Baechle15d45cc2014-11-22 00:22:09 +0100491 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
Thiemo Seuferac5d8c02005-04-11 12:24:16 +0000492 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493}
Al Viro40d158e2013-05-11 12:13:10 -0400494#define io_remap_pfn_range io_remap_pfn_range
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495#endif
496
Ralf Baechle970d0322012-10-18 13:54:15 +0200497#ifdef CONFIG_TRANSPARENT_HUGEPAGE
498
499extern int has_transparent_hugepage(void);
500
501static inline int pmd_trans_huge(pmd_t pmd)
502{
503 return !!(pmd_val(pmd) & _PAGE_HUGE);
504}
505
506static inline pmd_t pmd_mkhuge(pmd_t pmd)
507{
508 pmd_val(pmd) |= _PAGE_HUGE;
509
510 return pmd;
511}
512
Ralf Baechle970d0322012-10-18 13:54:15 +0200513extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
514 pmd_t *pmdp, pmd_t pmd);
515
Ralf Baechle970d0322012-10-18 13:54:15 +0200516#define __HAVE_ARCH_PMD_WRITE
517static inline int pmd_write(pmd_t pmd)
518{
519 return !!(pmd_val(pmd) & _PAGE_WRITE);
520}
521
522static inline pmd_t pmd_wrprotect(pmd_t pmd)
523{
524 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
525 return pmd;
526}
527
528static inline pmd_t pmd_mkwrite(pmd_t pmd)
529{
530 pmd_val(pmd) |= _PAGE_WRITE;
531 if (pmd_val(pmd) & _PAGE_MODIFIED)
532 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
533
534 return pmd;
535}
536
537static inline int pmd_dirty(pmd_t pmd)
538{
539 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
540}
541
542static inline pmd_t pmd_mkclean(pmd_t pmd)
543{
544 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
545 return pmd;
546}
547
548static inline pmd_t pmd_mkdirty(pmd_t pmd)
549{
550 pmd_val(pmd) |= _PAGE_MODIFIED;
551 if (pmd_val(pmd) & _PAGE_WRITE)
552 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
553
554 return pmd;
555}
556
557static inline int pmd_young(pmd_t pmd)
558{
559 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
560}
561
562static inline pmd_t pmd_mkold(pmd_t pmd)
563{
564 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
565
566 return pmd;
567}
568
569static inline pmd_t pmd_mkyoung(pmd_t pmd)
570{
571 pmd_val(pmd) |= _PAGE_ACCESSED;
572
Huacai Chen4f33f6c2016-01-21 21:09:52 +0800573#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
Steven J. Hillbe0c37c2015-02-26 18:16:37 -0600574 if (!(pmd_val(pmd) & _PAGE_NO_READ))
575 pmd_val(pmd) |= _PAGE_SILENT_READ;
576 else
577#endif
578 if (pmd_val(pmd) & _PAGE_READ)
579 pmd_val(pmd) |= _PAGE_SILENT_READ;
Ralf Baechle970d0322012-10-18 13:54:15 +0200580
581 return pmd;
582}
583
584/* Extern to avoid header file madness */
585extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
586
587static inline unsigned long pmd_pfn(pmd_t pmd)
588{
589 return pmd_val(pmd) >> _PFN_SHIFT;
590}
591
592static inline struct page *pmd_page(pmd_t pmd)
593{
594 if (pmd_trans_huge(pmd))
595 return pfn_to_page(pmd_pfn(pmd));
596
597 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
598}
599
600static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
601{
602 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
603 return pmd;
604}
605
606static inline pmd_t pmd_mknotpresent(pmd_t pmd)
607{
608 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
609
610 return pmd;
611}
612
613/*
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700614 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
Ralf Baechle970d0322012-10-18 13:54:15 +0200615 * different prototype.
616 */
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700617#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
618static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
619 unsigned long address, pmd_t *pmdp)
Ralf Baechle970d0322012-10-18 13:54:15 +0200620{
621 pmd_t old = *pmdp;
622
623 pmd_clear(pmdp);
624
625 return old;
626}
627
628#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
629
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630#include <asm-generic/pgtable.h>
631
632/*
Wu Zhangjin22f1fdf2009-11-11 13:59:23 +0800633 * uncached accelerated TLB map for video memory access
634 */
635#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
636#define __HAVE_PHYS_MEM_ACCESS_PROT
637
638struct file;
639pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
640 unsigned long size, pgprot_t vma_prot);
641int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
642 unsigned long size, pgprot_t *vma_prot);
643#endif
644
645/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 * We provide our own get_unmapped area to cope with the virtual aliasing
647 * constraints placed on us by the cache architecture.
648 */
649#define HAVE_ARCH_UNMAPPED_AREA
Jian Pengd0be89f2011-05-17 12:27:49 -0700650#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652/*
653 * No page table caches to initialise
654 */
655#define pgtable_cache_init() do { } while (0)
656
657#endif /* _ASM_PGTABLE_H */