Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2003 Ralf Baechle |
| 7 | */ |
| 8 | #ifndef _ASM_PGTABLE_H |
| 9 | #define _ASM_PGTABLE_H |
| 10 | |
Corey Minyard | 5bbea36 | 2013-04-08 16:06:35 +0200 | [diff] [blame] | 11 | #include <linux/mm_types.h> |
Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 12 | #include <linux/mmzone.h> |
Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 13 | #ifdef CONFIG_32BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm/pgtable-32.h> |
| 15 | #endif |
Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 16 | #ifdef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/pgtable-64.h> |
| 18 | #endif |
| 19 | |
Pete Popov | f10fae0 | 2005-07-14 00:17:05 +0000 | [diff] [blame] | 20 | #include <asm/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/pgtable-bits.h> |
| 22 | |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 23 | struct mm_struct; |
| 24 | struct vm_area_struct; |
| 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) |
Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame] | 27 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \ |
Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 28 | _page_cachable_default) |
Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame] | 29 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \ |
| 30 | _page_cachable_default) |
| 31 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ |
Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 32 | _page_cachable_default) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ |
Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 34 | _PAGE_GLOBAL | _page_cachable_default) |
Paul Burton | e2a9e5a | 2014-03-03 12:08:40 +0000 | [diff] [blame] | 35 | #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ |
| 36 | _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) |
Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame] | 37 | #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 38 | _page_cachable_default) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ |
| 40 | __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) |
| 41 | |
| 42 | /* |
David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 43 | * If _PAGE_NO_EXEC is not defined, we can't do page protection for |
| 44 | * execute, and consider it to be the same as read. Also, write |
| 45 | * permissions imply read permissions. This is the closest we can get |
| 46 | * by reasonable means.. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 49 | /* |
| 50 | * Dummy values to fill the table in mmap.c |
| 51 | * The real values will be generated at runtime |
| 52 | */ |
| 53 | #define __P000 __pgprot(0) |
| 54 | #define __P001 __pgprot(0) |
| 55 | #define __P010 __pgprot(0) |
| 56 | #define __P011 __pgprot(0) |
| 57 | #define __P100 __pgprot(0) |
| 58 | #define __P101 __pgprot(0) |
| 59 | #define __P110 __pgprot(0) |
| 60 | #define __P111 __pgprot(0) |
| 61 | |
| 62 | #define __S000 __pgprot(0) |
| 63 | #define __S001 __pgprot(0) |
| 64 | #define __S010 __pgprot(0) |
| 65 | #define __S011 __pgprot(0) |
| 66 | #define __S100 __pgprot(0) |
| 67 | #define __S101 __pgprot(0) |
| 68 | #define __S110 __pgprot(0) |
| 69 | #define __S111 __pgprot(0) |
| 70 | |
| 71 | extern unsigned long _page_cachable_default; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
| 73 | /* |
| 74 | * ZERO_PAGE is a global shared page that is always zero; used |
| 75 | * for zero-mapped memory areas etc.. |
| 76 | */ |
| 77 | |
| 78 | extern unsigned long empty_zero_page; |
| 79 | extern unsigned long zero_page_mask; |
| 80 | |
| 81 | #define ZERO_PAGE(vaddr) \ |
Franck Bui-Huu | 99e3b94 | 2006-10-19 13:19:59 +0200 | [diff] [blame] | 82 | (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) |
Kirill A. Shutemov | 816422a | 2012-12-12 13:52:36 -0800 | [diff] [blame] | 83 | #define __HAVE_COLOR_ZERO_PAGE |
Hugh Dickins | 62eede6 | 2009-09-21 17:03:34 -0700 | [diff] [blame] | 84 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | extern void paging_init(void); |
| 86 | |
| 87 | /* |
| 88 | * Conversion functions: convert a page and protection to a page entry, |
| 89 | * and a page entry and page directory to the page they refer to. |
| 90 | */ |
Franck Bui-Huu | c9d0696 | 2007-03-19 17:36:42 +0100 | [diff] [blame] | 91 | #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) |
Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 92 | |
| 93 | #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) |
| 94 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE |
| 95 | #define pmd_page(pmd) __pmd_page(pmd) |
| 96 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 97 | |
Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 98 | #define pmd_page_vaddr(pmd) pmd_val(pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
Markos Chandras | f1014d1 | 2014-07-14 12:47:09 +0100 | [diff] [blame] | 100 | #define htw_stop() \ |
| 101 | do { \ |
Markos Chandras | ed4cbc8 | 2015-01-26 13:04:33 +0000 | [diff] [blame] | 102 | unsigned long flags; \ |
| 103 | \ |
Markos Chandras | 461d1597 | 2015-01-26 09:40:34 +0000 | [diff] [blame] | 104 | if (cpu_has_htw) { \ |
Markos Chandras | ed4cbc8 | 2015-01-26 13:04:33 +0000 | [diff] [blame] | 105 | local_irq_save(flags); \ |
| 106 | if(!raw_current_cpu_data.htw_seq++) { \ |
| 107 | write_c0_pwctl(read_c0_pwctl() & \ |
| 108 | ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ |
| 109 | back_to_back_c0_hazard(); \ |
| 110 | } \ |
| 111 | local_irq_restore(flags); \ |
Markos Chandras | 461d1597 | 2015-01-26 09:40:34 +0000 | [diff] [blame] | 112 | } \ |
Markos Chandras | f1014d1 | 2014-07-14 12:47:09 +0100 | [diff] [blame] | 113 | } while(0) |
| 114 | |
| 115 | #define htw_start() \ |
| 116 | do { \ |
Markos Chandras | ed4cbc8 | 2015-01-26 13:04:33 +0000 | [diff] [blame] | 117 | unsigned long flags; \ |
| 118 | \ |
Markos Chandras | f1014d1 | 2014-07-14 12:47:09 +0100 | [diff] [blame] | 119 | if (cpu_has_htw) { \ |
Markos Chandras | ed4cbc8 | 2015-01-26 13:04:33 +0000 | [diff] [blame] | 120 | local_irq_save(flags); \ |
| 121 | if (!--raw_current_cpu_data.htw_seq) { \ |
| 122 | write_c0_pwctl(read_c0_pwctl() | \ |
| 123 | (1 << MIPS_PWCTL_PWEN_SHIFT)); \ |
| 124 | back_to_back_c0_hazard(); \ |
| 125 | } \ |
| 126 | local_irq_restore(flags); \ |
Markos Chandras | f1014d1 | 2014-07-14 12:47:09 +0100 | [diff] [blame] | 127 | } \ |
| 128 | } while(0) |
| 129 | |
Ralf Baechle | 34adb28 | 2014-11-22 00:16:48 +0100 | [diff] [blame] | 130 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 131 | |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 132 | #define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 133 | #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) |
| 134 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | static inline void set_pte(pte_t *ptep, pte_t pte) |
| 136 | { |
| 137 | ptep->pte_high = pte.pte_high; |
| 138 | smp_wmb(); |
| 139 | ptep->pte_low = pte.pte_low; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 141 | if (pte.pte_high & _PAGE_GLOBAL) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | pte_t *buddy = ptep_buddy(ptep); |
| 143 | /* |
| 144 | * Make sure the buddy is global too (if it's !none, |
| 145 | * it better already be global) |
| 146 | */ |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 147 | if (pte_none(*buddy)) |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 148 | buddy->pte_high |= _PAGE_GLOBAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | } |
| 150 | } |
Lars Persson | 5b9593f | 2015-02-26 14:16:02 +0100 | [diff] [blame] | 151 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
| 153 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 154 | { |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 155 | pte_t null = __pte(0); |
| 156 | |
Markos Chandras | fde3538 | 2015-01-26 09:40:36 +0000 | [diff] [blame] | 157 | htw_stop(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | /* Preserve global status for the pair */ |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 159 | if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) |
| 160 | null.pte_high = _PAGE_GLOBAL; |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 161 | |
| 162 | set_pte_at(mm, addr, ptep, null); |
Markos Chandras | fde3538 | 2015-01-26 09:40:36 +0000 | [diff] [blame] | 163 | htw_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | } |
| 165 | #else |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 166 | |
| 167 | #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) |
| 168 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) |
| 169 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | /* |
| 171 | * Certain architectures need to do special things when pte's |
| 172 | * within a page table are directly modified. Thus, the following |
| 173 | * hook is made available. |
| 174 | */ |
| 175 | static inline void set_pte(pte_t *ptep, pte_t pteval) |
| 176 | { |
| 177 | *ptep = pteval; |
| 178 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) |
| 179 | if (pte_val(pteval) & _PAGE_GLOBAL) { |
| 180 | pte_t *buddy = ptep_buddy(ptep); |
| 181 | /* |
| 182 | * Make sure the buddy is global too (if it's !none, |
| 183 | * it better already be global) |
| 184 | */ |
| 185 | if (pte_none(*buddy)) |
| 186 | pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; |
| 187 | } |
| 188 | #endif |
| 189 | } |
Lars Persson | 5b9593f | 2015-02-26 14:16:02 +0100 | [diff] [blame] | 190 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
| 192 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 193 | { |
Markos Chandras | fde3538 | 2015-01-26 09:40:36 +0000 | [diff] [blame] | 194 | htw_stop(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) |
| 196 | /* Preserve global status for the pair */ |
| 197 | if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) |
| 198 | set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); |
| 199 | else |
| 200 | #endif |
| 201 | set_pte_at(mm, addr, ptep, __pte(0)); |
Markos Chandras | fde3538 | 2015-01-26 09:40:36 +0000 | [diff] [blame] | 202 | htw_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } |
| 204 | #endif |
| 205 | |
| 206 | /* |
Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 207 | * (pmds are folded into puds so this doesn't get actually called, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | * but the define is needed for a generic inline function.) |
| 209 | */ |
| 210 | #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) |
Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 211 | |
David Daney | 325f8a0 | 2009-12-04 13:52:36 -0800 | [diff] [blame] | 212 | #ifndef __PAGETABLE_PMD_FOLDED |
Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 213 | /* |
| 214 | * (puds are folded into pgds so this doesn't get actually called, |
| 215 | * but the define is needed for a generic inline function.) |
| 216 | */ |
| 217 | #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) |
| 218 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | |
Ralf Baechle | 5ff9747 | 2007-08-01 15:25:28 +0100 | [diff] [blame] | 220 | #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) |
| 221 | #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) |
| 222 | #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | |
Ralf Baechle | 9975e77 | 2007-08-13 12:44:41 +0100 | [diff] [blame] | 224 | /* |
| 225 | * We used to declare this array with size but gcc 3.3 and older are not able |
| 226 | * to find that this expression is a constant, so the size is dropped. |
| 227 | */ |
| 228 | extern pgd_t swapper_pg_dir[]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | |
| 230 | /* |
| 231 | * The following only work if pte_present() is true. |
| 232 | * Undefined behaviour if not.. |
| 233 | */ |
Ralf Baechle | 34adb28 | 2014-11-22 00:16:48 +0100 | [diff] [blame] | 234 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 235 | static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } |
| 236 | static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } |
| 237 | static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 238 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | static inline pte_t pte_wrprotect(pte_t pte) |
| 240 | { |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 241 | pte.pte_low &= ~_PAGE_WRITE; |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 242 | pte.pte_high &= ~_PAGE_SILENT_WRITE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | return pte; |
| 244 | } |
| 245 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | static inline pte_t pte_mkclean(pte_t pte) |
| 247 | { |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 248 | pte.pte_low &= ~_PAGE_MODIFIED; |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 249 | pte.pte_high &= ~_PAGE_SILENT_WRITE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | return pte; |
| 251 | } |
| 252 | |
| 253 | static inline pte_t pte_mkold(pte_t pte) |
| 254 | { |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 255 | pte.pte_low &= ~_PAGE_ACCESSED; |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 256 | pte.pte_high &= ~_PAGE_SILENT_READ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | return pte; |
| 258 | } |
| 259 | |
| 260 | static inline pte_t pte_mkwrite(pte_t pte) |
| 261 | { |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 262 | pte.pte_low |= _PAGE_WRITE; |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 263 | if (pte.pte_low & _PAGE_MODIFIED) |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 264 | pte.pte_high |= _PAGE_SILENT_WRITE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | return pte; |
| 266 | } |
| 267 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | static inline pte_t pte_mkdirty(pte_t pte) |
| 269 | { |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 270 | pte.pte_low |= _PAGE_MODIFIED; |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 271 | if (pte.pte_low & _PAGE_WRITE) |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 272 | pte.pte_high |= _PAGE_SILENT_WRITE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | return pte; |
| 274 | } |
| 275 | |
| 276 | static inline pte_t pte_mkyoung(pte_t pte) |
| 277 | { |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 278 | pte.pte_low |= _PAGE_ACCESSED; |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 279 | if (pte.pte_low & _PAGE_READ) |
Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 280 | pte.pte_high |= _PAGE_SILENT_READ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | return pte; |
| 282 | } |
| 283 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } |
| 285 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } |
| 286 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
| 288 | static inline pte_t pte_wrprotect(pte_t pte) |
| 289 | { |
| 290 | pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); |
| 291 | return pte; |
| 292 | } |
| 293 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | static inline pte_t pte_mkclean(pte_t pte) |
| 295 | { |
Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 296 | pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | return pte; |
| 298 | } |
| 299 | |
| 300 | static inline pte_t pte_mkold(pte_t pte) |
| 301 | { |
Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 302 | pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | return pte; |
| 304 | } |
| 305 | |
| 306 | static inline pte_t pte_mkwrite(pte_t pte) |
| 307 | { |
| 308 | pte_val(pte) |= _PAGE_WRITE; |
| 309 | if (pte_val(pte) & _PAGE_MODIFIED) |
| 310 | pte_val(pte) |= _PAGE_SILENT_WRITE; |
| 311 | return pte; |
| 312 | } |
| 313 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | static inline pte_t pte_mkdirty(pte_t pte) |
| 315 | { |
| 316 | pte_val(pte) |= _PAGE_MODIFIED; |
| 317 | if (pte_val(pte) & _PAGE_WRITE) |
| 318 | pte_val(pte) |= _PAGE_SILENT_WRITE; |
| 319 | return pte; |
| 320 | } |
| 321 | |
| 322 | static inline pte_t pte_mkyoung(pte_t pte) |
| 323 | { |
| 324 | pte_val(pte) |= _PAGE_ACCESSED; |
Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame] | 325 | #ifdef CONFIG_CPU_MIPSR2 |
| 326 | if (!(pte_val(pte) & _PAGE_NO_READ)) |
| 327 | pte_val(pte) |= _PAGE_SILENT_READ; |
| 328 | else |
| 329 | #endif |
| 330 | if (pte_val(pte) & _PAGE_READ) |
| 331 | pte_val(pte) |= _PAGE_SILENT_READ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | return pte; |
| 333 | } |
David Daney | dd79439 | 2009-05-27 17:47:43 -0700 | [diff] [blame] | 334 | |
Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 335 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
David Daney | dd79439 | 2009-05-27 17:47:43 -0700 | [diff] [blame] | 336 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } |
| 337 | |
| 338 | static inline pte_t pte_mkhuge(pte_t pte) |
| 339 | { |
| 340 | pte_val(pte) |= _PAGE_HUGE; |
| 341 | return pte; |
| 342 | } |
Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 343 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | #endif |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 345 | static inline int pte_special(pte_t pte) { return 0; } |
| 346 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | |
| 348 | /* |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 349 | * Macro to make mark a page protection value as "uncacheable". Note |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | * that "protection" is really a misnomer here as the protection value |
| 351 | * contains the memory attribute bits, dirty bits, and various other |
| 352 | * bits as well. |
| 353 | */ |
| 354 | #define pgprot_noncached pgprot_noncached |
| 355 | |
| 356 | static inline pgprot_t pgprot_noncached(pgprot_t _prot) |
| 357 | { |
| 358 | unsigned long prot = pgprot_val(_prot); |
| 359 | |
| 360 | prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; |
| 361 | |
| 362 | return __pgprot(prot); |
| 363 | } |
| 364 | |
Markos Chandras | 4b050ba | 2014-07-18 10:51:33 +0100 | [diff] [blame] | 365 | static inline pgprot_t pgprot_writecombine(pgprot_t _prot) |
| 366 | { |
| 367 | unsigned long prot = pgprot_val(_prot); |
| 368 | |
| 369 | /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */ |
| 370 | prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine; |
| 371 | |
| 372 | return __pgprot(prot); |
| 373 | } |
| 374 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | /* |
| 376 | * Conversion functions: convert a page and protection to a page entry, |
| 377 | * and a page entry and page directory to the page they refer to. |
| 378 | */ |
| 379 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
| 380 | |
Ralf Baechle | 34adb28 | 2014-11-22 00:16:48 +0100 | [diff] [blame] | 381 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 383 | { |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 384 | pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); |
Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 385 | pte.pte_high &= (_PFN_MASK | _CACHE_MASK); |
Steven J. Hill | c5b3678 | 2015-02-26 18:16:38 -0600 | [diff] [blame] | 386 | pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; |
| 387 | pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | return pte; |
| 389 | } |
| 390 | #else |
| 391 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 392 | { |
| 393 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); |
| 394 | } |
| 395 | #endif |
| 396 | |
| 397 | |
| 398 | extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, |
| 399 | pte_t pte); |
Lars Persson | 5b9593f | 2015-02-26 14:16:02 +0100 | [diff] [blame] | 400 | extern void __update_cache(struct vm_area_struct *vma, unsigned long address, |
| 401 | pte_t pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | |
| 403 | static inline void update_mmu_cache(struct vm_area_struct *vma, |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 404 | unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | { |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 406 | pte_t pte = *ptep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | __update_tlb(vma, address, pte); |
Lars Persson | 5b9593f | 2015-02-26 14:16:02 +0100 | [diff] [blame] | 408 | __update_cache(vma, address, pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | } |
| 410 | |
Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 411 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, |
| 412 | unsigned long address, pmd_t *pmdp) |
| 413 | { |
| 414 | pte_t pte = *(pte_t *)pmdp; |
| 415 | |
| 416 | __update_tlb(vma, address, pte); |
| 417 | } |
| 418 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | #define kern_addr_valid(addr) (1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | |
Ralf Baechle | 34adb28 | 2014-11-22 00:16:48 +0100 | [diff] [blame] | 421 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); |
| 423 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | static inline int io_remap_pfn_range(struct vm_area_struct *vma, |
| 425 | unsigned long vaddr, |
| 426 | unsigned long pfn, |
| 427 | unsigned long size, |
| 428 | pgprot_t prot) |
| 429 | { |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 430 | phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); |
Thiemo Seufer | ac5d8c0 | 2005-04-11 12:24:16 +0000 | [diff] [blame] | 431 | return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | } |
Al Viro | 40d158e | 2013-05-11 12:13:10 -0400 | [diff] [blame] | 433 | #define io_remap_pfn_range io_remap_pfn_range |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | #endif |
| 435 | |
Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 436 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 437 | |
| 438 | extern int has_transparent_hugepage(void); |
| 439 | |
| 440 | static inline int pmd_trans_huge(pmd_t pmd) |
| 441 | { |
| 442 | return !!(pmd_val(pmd) & _PAGE_HUGE); |
| 443 | } |
| 444 | |
| 445 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
| 446 | { |
| 447 | pmd_val(pmd) |= _PAGE_HUGE; |
| 448 | |
| 449 | return pmd; |
| 450 | } |
| 451 | |
| 452 | static inline int pmd_trans_splitting(pmd_t pmd) |
| 453 | { |
| 454 | return !!(pmd_val(pmd) & _PAGE_SPLITTING); |
| 455 | } |
| 456 | |
| 457 | static inline pmd_t pmd_mksplitting(pmd_t pmd) |
| 458 | { |
| 459 | pmd_val(pmd) |= _PAGE_SPLITTING; |
| 460 | |
| 461 | return pmd; |
| 462 | } |
| 463 | |
| 464 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 465 | pmd_t *pmdp, pmd_t pmd); |
| 466 | |
| 467 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
| 468 | /* Extern to avoid header file madness */ |
| 469 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, |
| 470 | unsigned long address, |
| 471 | pmd_t *pmdp); |
| 472 | |
| 473 | #define __HAVE_ARCH_PMD_WRITE |
| 474 | static inline int pmd_write(pmd_t pmd) |
| 475 | { |
| 476 | return !!(pmd_val(pmd) & _PAGE_WRITE); |
| 477 | } |
| 478 | |
| 479 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
| 480 | { |
| 481 | pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); |
| 482 | return pmd; |
| 483 | } |
| 484 | |
| 485 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
| 486 | { |
| 487 | pmd_val(pmd) |= _PAGE_WRITE; |
| 488 | if (pmd_val(pmd) & _PAGE_MODIFIED) |
| 489 | pmd_val(pmd) |= _PAGE_SILENT_WRITE; |
| 490 | |
| 491 | return pmd; |
| 492 | } |
| 493 | |
| 494 | static inline int pmd_dirty(pmd_t pmd) |
| 495 | { |
| 496 | return !!(pmd_val(pmd) & _PAGE_MODIFIED); |
| 497 | } |
| 498 | |
| 499 | static inline pmd_t pmd_mkclean(pmd_t pmd) |
| 500 | { |
| 501 | pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); |
| 502 | return pmd; |
| 503 | } |
| 504 | |
| 505 | static inline pmd_t pmd_mkdirty(pmd_t pmd) |
| 506 | { |
| 507 | pmd_val(pmd) |= _PAGE_MODIFIED; |
| 508 | if (pmd_val(pmd) & _PAGE_WRITE) |
| 509 | pmd_val(pmd) |= _PAGE_SILENT_WRITE; |
| 510 | |
| 511 | return pmd; |
| 512 | } |
| 513 | |
| 514 | static inline int pmd_young(pmd_t pmd) |
| 515 | { |
| 516 | return !!(pmd_val(pmd) & _PAGE_ACCESSED); |
| 517 | } |
| 518 | |
| 519 | static inline pmd_t pmd_mkold(pmd_t pmd) |
| 520 | { |
| 521 | pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); |
| 522 | |
| 523 | return pmd; |
| 524 | } |
| 525 | |
| 526 | static inline pmd_t pmd_mkyoung(pmd_t pmd) |
| 527 | { |
| 528 | pmd_val(pmd) |= _PAGE_ACCESSED; |
| 529 | |
Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame] | 530 | #ifdef CONFIG_CPU_MIPSR2 |
| 531 | if (!(pmd_val(pmd) & _PAGE_NO_READ)) |
| 532 | pmd_val(pmd) |= _PAGE_SILENT_READ; |
| 533 | else |
| 534 | #endif |
| 535 | if (pmd_val(pmd) & _PAGE_READ) |
| 536 | pmd_val(pmd) |= _PAGE_SILENT_READ; |
Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 537 | |
| 538 | return pmd; |
| 539 | } |
| 540 | |
| 541 | /* Extern to avoid header file madness */ |
| 542 | extern pmd_t mk_pmd(struct page *page, pgprot_t prot); |
| 543 | |
| 544 | static inline unsigned long pmd_pfn(pmd_t pmd) |
| 545 | { |
| 546 | return pmd_val(pmd) >> _PFN_SHIFT; |
| 547 | } |
| 548 | |
| 549 | static inline struct page *pmd_page(pmd_t pmd) |
| 550 | { |
| 551 | if (pmd_trans_huge(pmd)) |
| 552 | return pfn_to_page(pmd_pfn(pmd)); |
| 553 | |
| 554 | return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); |
| 555 | } |
| 556 | |
| 557 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 558 | { |
| 559 | pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); |
| 560 | return pmd; |
| 561 | } |
| 562 | |
| 563 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
| 564 | { |
| 565 | pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); |
| 566 | |
| 567 | return pmd; |
| 568 | } |
| 569 | |
| 570 | /* |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 571 | * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a |
Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 572 | * different prototype. |
| 573 | */ |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 574 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
| 575 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 576 | unsigned long address, pmd_t *pmdp) |
Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 577 | { |
| 578 | pmd_t old = *pmdp; |
| 579 | |
| 580 | pmd_clear(pmdp); |
| 581 | |
| 582 | return old; |
| 583 | } |
| 584 | |
| 585 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 586 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | #include <asm-generic/pgtable.h> |
| 588 | |
| 589 | /* |
Wu Zhangjin | 22f1fdf | 2009-11-11 13:59:23 +0800 | [diff] [blame] | 590 | * uncached accelerated TLB map for video memory access |
| 591 | */ |
| 592 | #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED |
| 593 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
| 594 | |
| 595 | struct file; |
| 596 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 597 | unsigned long size, pgprot_t vma_prot); |
| 598 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
| 599 | unsigned long size, pgprot_t *vma_prot); |
| 600 | #endif |
| 601 | |
| 602 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | * We provide our own get_unmapped area to cope with the virtual aliasing |
| 604 | * constraints placed on us by the cache architecture. |
| 605 | */ |
| 606 | #define HAVE_ARCH_UNMAPPED_AREA |
Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 607 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | |
| 609 | /* |
| 610 | * No page table caches to initialise |
| 611 | */ |
| 612 | #define pgtable_cache_init() do { } while (0) |
| 613 | |
| 614 | #endif /* _ASM_PGTABLE_H */ |